Repository: caddyserver/caddy Branch: master Commit: df65455b1f0d Files: 588 Total size: 3.1 MB Directory structure: gitextract_8o2w0qyu/ ├── .editorconfig ├── .gitattributes ├── .github/ │ ├── CONTRIBUTING.md │ ├── FUNDING.yml │ ├── ISSUE_TEMPLATE/ │ │ ├── ISSUE.yml │ │ └── config.yml │ ├── SECURITY.md │ ├── dependabot.yml │ ├── pull_request_template.md │ └── workflows/ │ ├── ai.yml │ ├── auto-release-pr.yml │ ├── ci.yml │ ├── cross-build.yml │ ├── lint.yml │ ├── release-proposal.yml │ ├── release.yml │ ├── release_published.yml │ └── scorecard.yml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yml ├── .pre-commit-config.yaml ├── AUTHORS ├── LICENSE ├── README.md ├── admin.go ├── admin_test.go ├── caddy.go ├── caddy_test.go ├── caddyconfig/ │ ├── caddyfile/ │ │ ├── adapter.go │ │ ├── dispenser.go │ │ ├── dispenser_test.go │ │ ├── formatter.go │ │ ├── formatter_fuzz.go │ │ ├── formatter_test.go │ │ ├── importargs.go │ │ ├── importgraph.go │ │ ├── lexer.go │ │ ├── lexer_fuzz.go │ │ ├── lexer_test.go │ │ ├── parse.go │ │ ├── parse_test.go │ │ └── testdata/ │ │ ├── empty.txt │ │ ├── glob/ │ │ │ ├── .dotfile.txt │ │ │ └── import_test1.txt │ │ ├── import_args0.txt │ │ ├── import_args1.txt │ │ ├── import_glob0.txt │ │ ├── import_glob1.txt │ │ ├── import_glob2.txt │ │ ├── import_recursive0.txt │ │ ├── import_recursive1.txt │ │ ├── import_recursive2.txt │ │ ├── import_recursive3.txt │ │ ├── import_test1.txt │ │ ├── import_test2.txt │ │ └── only_white_space.txt │ ├── configadapters.go │ ├── httpcaddyfile/ │ │ ├── addresses.go │ │ ├── addresses_fuzz.go │ │ ├── addresses_test.go │ │ ├── builtins.go │ │ ├── builtins_test.go │ │ ├── directives.go │ │ ├── directives_test.go │ │ ├── httptype.go │ │ ├── httptype_test.go │ │ ├── options.go │ │ ├── options_test.go │ │ ├── pkiapp.go │ │ ├── pkiapp_test.go │ │ ├── serveroptions.go │ │ ├── shorthands.go │ │ ├── testdata/ │ │ │ ├── import_variadic.txt │ │ │ ├── import_variadic_snippet.txt │ │ │ └── import_variadic_with_import.txt │ │ ├── tlsapp.go │ │ └── tlsapp_test.go │ ├── httploader.go │ └── load.go ├── caddytest/ │ ├── a.caddy.localhost.crt │ ├── a.caddy.localhost.key │ ├── caddy.ca.cer │ ├── caddy.localhost.crt │ ├── caddy.localhost.key │ ├── caddytest.go │ ├── caddytest_test.go │ ├── integration/ │ │ ├── acme_test.go │ │ ├── acmeserver_test.go │ │ ├── autohttps_test.go │ │ ├── caddyfile_adapt/ │ │ │ ├── acme_dns_configured.caddyfiletest │ │ │ ├── acme_dns_naked_use_dns_defaults.caddyfiletest │ │ │ ├── acme_dns_naked_without_dns.caddyfiletest │ │ │ ├── acme_server_custom_challenges.caddyfiletest │ │ │ ├── acme_server_default_challenges.caddyfiletest │ │ │ ├── acme_server_lifetime.caddyfiletest │ │ │ ├── acme_server_multi_custom_challenges.caddyfiletest │ │ │ ├── acme_server_policy-allow.caddyfiletest │ │ │ ├── acme_server_policy-both.caddyfiletest │ │ │ ├── acme_server_policy-deny.caddyfiletest │ │ │ ├── acme_server_sign_with_root.caddyfiletest │ │ │ ├── ambiguous_site_definition.caddyfiletest │ │ │ ├── ambiguous_site_definition_duplicate_key.caddyfiletest │ │ │ ├── auto_https_disable_redirects.caddyfiletest │ │ │ ├── auto_https_ignore_loaded_certs.caddyfiletest │ │ │ ├── auto_https_off.caddyfiletest │ │ │ ├── bind_fd_fdgram_h123.caddyfiletest │ │ │ ├── bind_ipv6.caddyfiletest │ │ │ ├── directive_as_site_address.caddyfiletest │ │ │ ├── duplicate_listener_address_global.caddyfiletest │ │ │ ├── enable_tls_for_catch_all_site.caddyfiletest │ │ │ ├── encode_options.caddyfiletest │ │ │ ├── error_example.caddyfiletest │ │ │ ├── error_multi_site_blocks.caddyfiletest │ │ │ ├── error_range_codes.caddyfiletest │ │ │ ├── error_range_simple_codes.caddyfiletest │ │ │ ├── error_simple_codes.caddyfiletest │ │ │ ├── error_sort.caddyfiletest │ │ │ ├── error_subhandlers.caddyfiletest │ │ │ ├── expression_quotes.caddyfiletest │ │ │ ├── file_server_disable_canonical_uris.caddyfiletest │ │ │ ├── file_server_etag_file_extensions.caddyfiletest │ │ │ ├── file_server_file_limit.caddyfiletest │ │ │ ├── file_server_pass_thru.caddyfiletest │ │ │ ├── file_server_precompressed.caddyfiletest │ │ │ ├── file_server_sort.caddyfiletest │ │ │ ├── file_server_status.caddyfiletest │ │ │ ├── forward_auth_authelia.caddyfiletest │ │ │ ├── forward_auth_copy_headers_strip.caddyfiletest │ │ │ ├── forward_auth_rename_headers.caddyfiletest │ │ │ ├── global_options.caddyfiletest │ │ │ ├── global_options_acme.caddyfiletest │ │ │ ├── global_options_admin.caddyfiletest │ │ │ ├── global_options_admin_with_persist_config_off.caddyfiletest │ │ │ ├── global_options_debug_with_access_log.caddyfiletest │ │ │ ├── global_options_default_bind.caddyfiletest │ │ │ ├── global_options_log_and_site.caddyfiletest │ │ │ ├── global_options_log_basic.caddyfiletest │ │ │ ├── global_options_log_custom.caddyfiletest │ │ │ ├── global_options_log_multi.caddyfiletest │ │ │ ├── global_options_log_sampling.caddyfiletest │ │ │ ├── global_options_persist_config.caddyfiletest │ │ │ ├── global_options_preferred_chains.caddyfiletest │ │ │ ├── global_options_resolvers.caddyfiletest │ │ │ ├── global_options_resolvers_http_challenge.caddyfiletest │ │ │ ├── global_options_resolvers_local_dns_inherit.caddyfiletest │ │ │ ├── global_options_resolvers_local_override.caddyfiletest │ │ │ ├── global_options_resolvers_mixed.caddyfiletest │ │ │ ├── global_options_skip_install_trust.caddyfiletest │ │ │ ├── global_server_options_multi.caddyfiletest │ │ │ ├── global_server_options_single.caddyfiletest │ │ │ ├── handle_nested_in_route.caddyfiletest │ │ │ ├── handle_path.caddyfiletest │ │ │ ├── handle_path_sorting.caddyfiletest │ │ │ ├── header.caddyfiletest │ │ │ ├── header_placeholder_search.caddyfiletest │ │ │ ├── heredoc.caddyfiletest │ │ │ ├── heredoc_extra_indentation.caddyfiletest │ │ │ ├── heredoc_incomplete.caddyfiletest │ │ │ ├── heredoc_invalid_marker.caddyfiletest │ │ │ ├── heredoc_mismatched_whitespace.caddyfiletest │ │ │ ├── heredoc_missing_marker.caddyfiletest │ │ │ ├── heredoc_too_many_angle_brackets.caddyfiletest │ │ │ ├── http_only_hostnames.caddyfiletest │ │ │ ├── http_only_on_any_address.caddyfiletest │ │ │ ├── http_only_on_domain.caddyfiletest │ │ │ ├── http_only_on_hostless_block.caddyfiletest │ │ │ ├── http_only_on_localhost.caddyfiletest │ │ │ ├── http_only_on_non_standard_port.caddyfiletest │ │ │ ├── http_valid_directive_like_site_address.caddyfiletest │ │ │ ├── https_on_domain.caddyfiletest │ │ │ ├── import_args_file.caddyfiletest │ │ │ ├── import_args_snippet.caddyfiletest │ │ │ ├── import_args_snippet_env_placeholder.caddyfiletest │ │ │ ├── import_block_anonymous.caddyfiletest │ │ │ ├── import_block_snippet.caddyfiletest │ │ │ ├── import_block_snippet_args.caddyfiletest │ │ │ ├── import_block_snippet_non_replaced_block.caddyfiletest │ │ │ ├── import_block_snippet_non_replaced_block_from_separate_file.caddyfiletest │ │ │ ├── import_block_snippet_non_replaced_key_block.caddyfiletest │ │ │ ├── import_block_with_site_block.caddyfiletest │ │ │ ├── import_blocks_snippet.caddyfiletest │ │ │ ├── import_blocks_snippet_nested.caddyfiletest │ │ │ ├── import_cycle.caddyfiletest │ │ │ ├── intercept_response.caddyfiletest │ │ │ ├── invoke_named_routes.caddyfiletest │ │ │ ├── invoke_undefined_named_route.caddyfiletest │ │ │ ├── log_add.caddyfiletest │ │ │ ├── log_append_encoder.caddyfiletest │ │ │ ├── log_except_catchall_blocks.caddyfiletest │ │ │ ├── log_filter_no_wrap.caddyfiletest │ │ │ ├── log_filter_with_header.txt │ │ │ ├── log_filters.caddyfiletest │ │ │ ├── log_multi_logger_name.caddyfiletest │ │ │ ├── log_multiple_regexp_filters.caddyfiletest │ │ │ ├── log_override_hostname.caddyfiletest │ │ │ ├── log_override_name_multiaccess.caddyfiletest │ │ │ ├── log_override_name_multiaccess_debug.caddyfiletest │ │ │ ├── log_roll_days.caddyfiletest │ │ │ ├── log_sampling.caddyfiletest │ │ │ ├── log_skip_hosts.caddyfiletest │ │ │ ├── map_and_vars_with_raw_types.caddyfiletest │ │ │ ├── matcher_outside_site_block.caddyfiletest │ │ │ ├── matcher_syntax.caddyfiletest │ │ │ ├── matchers_in_route.caddyfiletest │ │ │ ├── method_directive.caddyfiletest │ │ │ ├── metrics_disable_om.caddyfiletest │ │ │ ├── metrics_merge_options.caddyfiletest │ │ │ ├── metrics_perhost.caddyfiletest │ │ │ ├── metrics_syntax.caddyfiletest │ │ │ ├── not_block_merging.caddyfiletest │ │ │ ├── php_fastcgi_expanded_form.caddyfiletest │ │ │ ├── php_fastcgi_handle_response.caddyfiletest │ │ │ ├── php_fastcgi_index_off.caddyfiletest │ │ │ ├── php_fastcgi_matcher.caddyfiletest │ │ │ ├── php_fastcgi_subdirectives.caddyfiletest │ │ │ ├── php_fastcgi_try_files_override.caddyfiletest │ │ │ ├── php_fastcgi_try_files_override_no_dir_index.caddyfiletest │ │ │ ├── portless_upstream.caddyfiletest │ │ │ ├── push.caddyfiletest │ │ │ ├── renewal_window_ratio_global.caddyfiletest │ │ │ ├── renewal_window_ratio_tls_directive.caddyfiletest │ │ │ ├── replaceable_upstream.caddyfiletest │ │ │ ├── replaceable_upstream_partial_port.caddyfiletest │ │ │ ├── replaceable_upstream_port.caddyfiletest │ │ │ ├── request_body.caddyfiletest │ │ │ ├── request_header.caddyfiletest │ │ │ ├── reverse_proxy_buffers.caddyfiletest │ │ │ ├── reverse_proxy_dynamic_upstreams.caddyfiletest │ │ │ ├── reverse_proxy_dynamic_upstreams_grace_period.caddyfiletest │ │ │ ├── reverse_proxy_empty_non_http_transport.caddyfiletest │ │ │ ├── reverse_proxy_h2c_shorthand.caddyfiletest │ │ │ ├── reverse_proxy_handle_response.caddyfiletest │ │ │ ├── reverse_proxy_health_headers.caddyfiletest │ │ │ ├── reverse_proxy_health_method.caddyfiletest │ │ │ ├── reverse_proxy_health_path_query.caddyfiletest │ │ │ ├── reverse_proxy_health_reqbody.caddyfiletest │ │ │ ├── reverse_proxy_http_transport_forward_proxy_url.txt │ │ │ ├── reverse_proxy_http_transport_none_proxy.txt │ │ │ ├── reverse_proxy_http_transport_tls_file_cert.txt │ │ │ ├── reverse_proxy_http_transport_tls_inline_cert.txt │ │ │ ├── reverse_proxy_http_transport_url_proxy.txt │ │ │ ├── reverse_proxy_load_balance.caddyfiletest │ │ │ ├── reverse_proxy_load_balance_wrr.caddyfiletest │ │ │ ├── reverse_proxy_localaddr.caddyfiletest │ │ │ ├── reverse_proxy_options.caddyfiletest │ │ │ ├── reverse_proxy_port_range.caddyfiletest │ │ │ ├── reverse_proxy_trusted_proxies.caddyfiletest │ │ │ ├── reverse_proxy_trusted_proxies_unix.caddyfiletest │ │ │ ├── reverse_proxy_upstream_placeholder.caddyfiletest │ │ │ ├── rewrite_directive_permutations.caddyfiletest │ │ │ ├── root_directive_permutations.caddyfiletest │ │ │ ├── server_names.caddyfiletest │ │ │ ├── shorthand_parameterized_placeholders.caddyfiletest │ │ │ ├── site_address_invalid_port.caddyfiletest │ │ │ ├── site_address_negative_port.caddyfiletest │ │ │ ├── site_address_unsupported_scheme.caddyfiletest │ │ │ ├── site_address_wss_invalid_port.caddyfiletest │ │ │ ├── site_address_wss_scheme.caddyfiletest │ │ │ ├── site_block_sorting.caddyfiletest │ │ │ ├── sort_directives_with_any_matcher_first.caddyfiletest │ │ │ ├── sort_directives_within_handle.caddyfiletest │ │ │ ├── sort_vars_in_reverse.caddyfiletest │ │ │ ├── tls_acme_dns_override_global_dns.caddyfiletest │ │ │ ├── tls_acme_preferred_chains.caddyfiletest │ │ │ ├── tls_automation_policies_1.caddyfiletest │ │ │ ├── tls_automation_policies_10.caddyfiletest │ │ │ ├── tls_automation_policies_11.caddyfiletest │ │ │ ├── tls_automation_policies_2.caddyfiletest │ │ │ ├── tls_automation_policies_3.caddyfiletest │ │ │ ├── tls_automation_policies_4.caddyfiletest │ │ │ ├── tls_automation_policies_5.caddyfiletest │ │ │ ├── tls_automation_policies_6.caddyfiletest │ │ │ ├── tls_automation_policies_7.caddyfiletest │ │ │ ├── tls_automation_policies_8.caddyfiletest │ │ │ ├── tls_automation_policies_9.caddyfiletest │ │ │ ├── tls_automation_policies_global_email_localhost.caddyfiletest │ │ │ ├── tls_automation_wildcard_force_automate.caddyfiletest │ │ │ ├── tls_automation_wildcard_shadowing.caddyfiletest │ │ │ ├── tls_client_auth_cert_file-legacy-with-verifier.caddyfiletest │ │ │ ├── tls_client_auth_cert_file-legacy.caddyfiletest │ │ │ ├── tls_client_auth_cert_file.caddyfiletest │ │ │ ├── tls_client_auth_inline_cert-legacy.caddyfiletest │ │ │ ├── tls_client_auth_inline_cert.caddyfiletest │ │ │ ├── tls_client_auth_inline_cert_with_leaf_trust.caddyfiletest │ │ │ ├── tls_client_auth_leaf_verifier_file_loader_block.caddyfiletest │ │ │ ├── tls_client_auth_leaf_verifier_file_loader_inline.caddyfiletest │ │ │ ├── tls_client_auth_leaf_verifier_file_loader_multi-in-block.caddyfiletest │ │ │ ├── tls_client_auth_leaf_verifier_folder_loader_block.caddyfiletest │ │ │ ├── tls_client_auth_leaf_verifier_folder_loader_inline.caddyfiletest │ │ │ ├── tls_client_auth_leaf_verifier_folder_loader_multi-in-block.caddyfiletest │ │ │ ├── tls_conn_policy_consolidate.caddyfiletest │ │ │ ├── tls_dns_multiple_options_without_provider.caddyfiletest │ │ │ ├── tls_dns_override_acme_dns.caddyfiletest │ │ │ ├── tls_dns_override_global_dns.caddyfiletest │ │ │ ├── tls_dns_propagation_timeout_without_provider.caddyfiletest │ │ │ ├── tls_dns_propagation_without_provider.caddyfiletest │ │ │ ├── tls_dns_resolvers_with_global_provider.caddyfiletest │ │ │ ├── tls_dns_ttl.caddyfiletest │ │ │ ├── tls_explicit_issuer_dns_ttl.caddyfiletest │ │ │ ├── tls_explicit_issuer_propagation_options.caddyfiletest │ │ │ ├── tls_internal_options.caddyfiletest │ │ │ ├── tls_propagation_options.caddyfiletest │ │ │ ├── tracing.caddyfiletest │ │ │ ├── uri_query_operations.caddyfiletest │ │ │ ├── uri_replace_brace_escape.caddyfiletest │ │ │ └── wildcard_pattern.caddyfiletest │ │ ├── caddyfile_adapt_test.go │ │ ├── caddyfile_test.go │ │ ├── forwardauth_test.go │ │ ├── h2listener_test.go │ │ ├── handler_test.go │ │ ├── intercept_test.go │ │ ├── leafcertloaders_test.go │ │ ├── listener_test.go │ │ ├── map_test.go │ │ ├── mockdns_test.go │ │ ├── pki_test.go │ │ ├── proxyprotocol_test.go │ │ ├── reverseproxy_test.go │ │ ├── sni_test.go │ │ ├── stream_test.go │ │ └── testdata/ │ │ ├── cookie.html │ │ ├── foo.txt │ │ ├── foo_with_multiple_trailing_newlines.txt │ │ ├── foo_with_trailing_newline.txt │ │ ├── import_respond.txt │ │ ├── index.localhost.html │ │ └── issue_7518_unused_block_panic_snippets.conf │ └── leafcert.pem ├── cmd/ │ ├── caddy/ │ │ ├── main.go │ │ └── setcap.sh │ ├── cobra.go │ ├── commandfactory.go │ ├── commandfuncs.go │ ├── commands.go │ ├── commands_test.go │ ├── main.go │ ├── main_test.go │ ├── packagesfuncs.go │ ├── removebinary.go │ ├── removebinary_windows.go │ ├── storagefuncs.go │ └── x509rootsfallback.go ├── context.go ├── context_test.go ├── duration_fuzz.go ├── filepath.go ├── filepath_windows.go ├── filesystem.go ├── go.mod ├── go.sum ├── internal/ │ ├── filesystems/ │ │ ├── map.go │ │ └── os.go │ ├── logbuffer.go │ ├── logs.go │ ├── metrics/ │ │ ├── metrics.go │ │ └── metrics_test.go │ ├── ranges.go │ ├── sockets.go │ └── testmocks/ │ └── dummyverifier.go ├── listen.go ├── listen_unix.go ├── listen_unix_setopt.go ├── listen_unix_setopt_freebsd.go ├── listeners.go ├── listeners_fuzz.go ├── listeners_test.go ├── logging.go ├── logging_test.go ├── metrics.go ├── modules/ │ ├── caddyevents/ │ │ ├── app.go │ │ └── eventsconfig/ │ │ └── caddyfile.go │ ├── caddyfs/ │ │ └── filesystem.go │ ├── caddyhttp/ │ │ ├── app.go │ │ ├── autohttps.go │ │ ├── caddyauth/ │ │ │ ├── argon2id.go │ │ │ ├── basicauth.go │ │ │ ├── bcrypt.go │ │ │ ├── caddyauth.go │ │ │ ├── caddyfile.go │ │ │ └── command.go │ │ ├── caddyhttp.go │ │ ├── caddyhttp_test.go │ │ ├── celmatcher.go │ │ ├── celmatcher_test.go │ │ ├── encode/ │ │ │ ├── brotli/ │ │ │ │ └── brotli_precompressed.go │ │ │ ├── caddyfile.go │ │ │ ├── encode.go │ │ │ ├── encode_test.go │ │ │ ├── gzip/ │ │ │ │ ├── gzip.go │ │ │ │ └── gzip_precompressed.go │ │ │ └── zstd/ │ │ │ ├── zstd.go │ │ │ └── zstd_precompressed.go │ │ ├── errors.go │ │ ├── fileserver/ │ │ │ ├── browse.go │ │ │ ├── browse.html │ │ │ ├── browsetplcontext.go │ │ │ ├── browsetplcontext_test.go │ │ │ ├── caddyfile.go │ │ │ ├── command.go │ │ │ ├── matcher.go │ │ │ ├── matcher_test.go │ │ │ ├── staticfiles.go │ │ │ ├── staticfiles_test.go │ │ │ └── testdata/ │ │ │ ├── %D9%85%D9%84%D9%81.txt │ │ │ ├── foo.php.php/ │ │ │ │ └── index.php │ │ │ ├── foo.txt │ │ │ ├── foodir/ │ │ │ │ ├── bar.txt │ │ │ │ └── foo.txt │ │ │ ├── index.php │ │ │ ├── large.txt │ │ │ ├── notphp.php.txt │ │ │ ├── remote.php │ │ │ └── ملف.txt │ │ ├── headers/ │ │ │ ├── caddyfile.go │ │ │ ├── headers.go │ │ │ └── headers_test.go │ │ ├── http2listener.go │ │ ├── httpredirectlistener.go │ │ ├── intercept/ │ │ │ └── intercept.go │ │ ├── invoke.go │ │ ├── ip_matchers.go │ │ ├── ip_range.go │ │ ├── logging/ │ │ │ ├── caddyfile.go │ │ │ └── logappend.go │ │ ├── logging.go │ │ ├── map/ │ │ │ ├── caddyfile.go │ │ │ ├── map.go │ │ │ └── map_test.go │ │ ├── marshalers.go │ │ ├── matchers.go │ │ ├── matchers_test.go │ │ ├── metrics.go │ │ ├── metrics_test.go │ │ ├── proxyprotocol/ │ │ │ ├── listenerwrapper.go │ │ │ ├── module.go │ │ │ └── policy.go │ │ ├── push/ │ │ │ ├── caddyfile.go │ │ │ ├── handler.go │ │ │ ├── link.go │ │ │ └── link_test.go │ │ ├── replacer.go │ │ ├── replacer_test.go │ │ ├── requestbody/ │ │ │ ├── caddyfile.go │ │ │ └── requestbody.go │ │ ├── responsematchers.go │ │ ├── responsematchers_test.go │ │ ├── responsewriter.go │ │ ├── responsewriter_test.go │ │ ├── reverseproxy/ │ │ │ ├── addresses.go │ │ │ ├── addresses_test.go │ │ │ ├── admin.go │ │ │ ├── admin_test.go │ │ │ ├── ascii.go │ │ │ ├── ascii_test.go │ │ │ ├── buffering_test.go │ │ │ ├── caddyfile.go │ │ │ ├── command.go │ │ │ ├── copyresponse.go │ │ │ ├── dynamic_upstreams_test.go │ │ │ ├── fastcgi/ │ │ │ │ ├── caddyfile.go │ │ │ │ ├── client.go │ │ │ │ ├── client_test.go │ │ │ │ ├── fastcgi.go │ │ │ │ ├── fastcgi_test.go │ │ │ │ ├── header.go │ │ │ │ ├── pool.go │ │ │ │ ├── reader.go │ │ │ │ ├── record.go │ │ │ │ └── writer.go │ │ │ ├── forwardauth/ │ │ │ │ └── caddyfile.go │ │ │ ├── headers_test.go │ │ │ ├── healthchecks.go │ │ │ ├── hosts.go │ │ │ ├── httptransport.go │ │ │ ├── httptransport_test.go │ │ │ ├── metrics.go │ │ │ ├── passive_health_test.go │ │ │ ├── retries_test.go │ │ │ ├── reverseproxy.go │ │ │ ├── selectionpolicies.go │ │ │ ├── selectionpolicies_test.go │ │ │ ├── streaming.go │ │ │ ├── streaming_test.go │ │ │ ├── upstreams.go │ │ │ └── upstreams_test.go │ │ ├── rewrite/ │ │ │ ├── caddyfile.go │ │ │ ├── rewrite.go │ │ │ └── rewrite_test.go │ │ ├── routes.go │ │ ├── server.go │ │ ├── server_test.go │ │ ├── standard/ │ │ │ └── imports.go │ │ ├── staticerror.go │ │ ├── staticresp.go │ │ ├── staticresp_test.go │ │ ├── subroute.go │ │ ├── templates/ │ │ │ ├── caddyfile.go │ │ │ ├── frontmatter.go │ │ │ ├── frontmatter_fuzz.go │ │ │ ├── templates.go │ │ │ ├── tplcontext.go │ │ │ └── tplcontext_test.go │ │ ├── tracing/ │ │ │ ├── module.go │ │ │ ├── module_test.go │ │ │ ├── tracer.go │ │ │ ├── tracer_test.go │ │ │ ├── tracerprovider.go │ │ │ └── tracerprovider_test.go │ │ └── vars.go │ ├── caddypki/ │ │ ├── acmeserver/ │ │ │ ├── acmeserver.go │ │ │ ├── acmeserver_test.go │ │ │ ├── caddyfile.go │ │ │ ├── challenges.go │ │ │ ├── policy.go │ │ │ └── policy_test.go │ │ ├── adminapi.go │ │ ├── ca.go │ │ ├── certificates.go │ │ ├── command.go │ │ ├── crypto.go │ │ ├── crypto_test.go │ │ ├── maintain.go │ │ ├── maintain_test.go │ │ └── pki.go │ ├── caddytls/ │ │ ├── acmeissuer.go │ │ ├── automation.go │ │ ├── capools.go │ │ ├── capools_test.go │ │ ├── certmanagers.go │ │ ├── certselection.go │ │ ├── connpolicy.go │ │ ├── connpolicy_test.go │ │ ├── distributedstek/ │ │ │ └── distributedstek.go │ │ ├── ech.go │ │ ├── fileloader.go │ │ ├── folderloader.go │ │ ├── internalissuer.go │ │ ├── internalissuer_test.go │ │ ├── leaffileloader.go │ │ ├── leaffileloader_test.go │ │ ├── leaffolderloader.go │ │ ├── leaffolderloader_test.go │ │ ├── leafpemloader.go │ │ ├── leafpemloader_test.go │ │ ├── leafstorageloader.go │ │ ├── matchers.go │ │ ├── matchers_test.go │ │ ├── ondemand.go │ │ ├── pemloader.go │ │ ├── sessiontickets.go │ │ ├── standardstek/ │ │ │ └── stek.go │ │ ├── storageloader.go │ │ ├── tls.go │ │ ├── values.go │ │ └── zerosslissuer.go │ ├── filestorage/ │ │ └── filestorage.go │ ├── internal/ │ │ └── network/ │ │ └── networkproxy.go │ ├── logging/ │ │ ├── appendencoder.go │ │ ├── cores.go │ │ ├── encoders.go │ │ ├── filewriter.go │ │ ├── filewriter_test.go │ │ ├── filewriter_test_windows.go │ │ ├── filterencoder.go │ │ ├── filters.go │ │ ├── filters_test.go │ │ ├── netwriter.go │ │ └── nopencoder.go │ ├── metrics/ │ │ ├── adminmetrics.go │ │ ├── metrics.go │ │ └── metrics_test.go │ └── standard/ │ └── imports.go ├── modules.go ├── modules_test.go ├── notify/ │ ├── notify_linux.go │ ├── notify_other.go │ └── notify_windows.go ├── replacer.go ├── replacer_fuzz.go ├── replacer_test.go ├── service_windows.go ├── sigtrap.go ├── sigtrap_nonposix.go ├── sigtrap_posix.go ├── storage.go └── usagepool.go ================================================ FILE CONTENTS ================================================ ================================================ FILE: .editorconfig ================================================ [*] end_of_line = lf [caddytest/integration/caddyfile_adapt/*.caddyfiletest] indent_style = tab ================================================ FILE: .gitattributes ================================================ *.go text eol=lf ================================================ FILE: .github/CONTRIBUTING.md ================================================ Contributing to Caddy ===================== Welcome! Thank you for choosing to be a part of our community. Caddy wouldn't be nearly as excellent without your involvement! For starters, we invite you to join [the Caddy forum](https://caddy.community) where you can hang out with other Caddy users and developers. ## Common Tasks - [Contributing code](#contributing-code) - [Writing a Caddy module](#writing-a-caddy-module) - [Asking or answering questions for help using Caddy](#getting-help-using-caddy) - [Reporting a bug](#reporting-bugs) - [Suggesting an enhancement or a new feature](#suggesting-features) - [Improving documentation](#improving-documentation) Other menu items: - [Values](#values) - [Coordinated Disclosure](#coordinated-disclosure) - [Thank You](#thank-you) ### Contributing code You can have a huge impact on the project by helping with its code. To contribute code to Caddy, first submit or comment in an issue to discuss your contribution, then open a [pull request](https://github.com/caddyserver/caddy/pulls) (PR). If you're new to our community, that's okay: **we gladly welcome pull requests from anyone, regardless of your native language or coding experience.** You can get familiar with Caddy's code base by using [code search at Sourcegraph](https://sourcegraph.com/github.com/caddyserver/caddy). We hold contributions to a high standard for quality :bowtie:, so don't be surprised if we ask for revisions—even if it seems small or insignificant. Please don't take it personally. :blue_heart: If your change is on the right track, we can guide you to make it mergeable. Here are some of the expectations we have of contributors: - **Open an issue to propose your change first.** This way we can avoid confusion, coordinate what everyone is working on, and ensure that any changes are in-line with the project's goals and the best interests of its users. We can also discuss the best possible implementation. If there's already an issue about it, comment on the existing issue to claim it. A lot of valuable time can be saved by discussing a proposal first. - **Keep pull requests small.** Smaller PRs are more likely to be merged because they are easier to review! We might ask you to break up large PRs into smaller ones. [An example of what we want to avoid.](https://twitter.com/iamdevloper/status/397664295875805184) - **Keep related commits together in a PR.** We do want pull requests to be small, but you should also keep multiple related commits in the same PR if they rely on each other. - **Write tests.** Good, automated tests are very valuable! Written properly, they ensure your change works, and that other changes in the future won't break your change. CI checks should pass. - **Benchmarks should be included for optimizations.** Optimizations sometimes make code harder to read or have changes that are less than obvious. They should be proven with benchmarks and profiling. - **[Squash](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) insignificant commits.** Every commit should be significant. Commits which merely rewrite a comment or fix a typo can be combined into another commit that has more substance. Interactive rebase can do this, or a simpler way is `git reset --soft ` then `git commit -s`. - **Be responsible for and maintain your contributions.** Caddy is a growing project, and it's much better when individual contributors help maintain their change after it is merged. - **Use comments properly.** We expect good godoc comments for package-level functions, types, and values. Comments are also useful whenever the purpose for a line of code is not obvious. - **Pull requests may still get closed.** The longer a PR stays open and idle, the more likely it is to be closed. If we haven't reviewed it in a while, it probably means the change is not a priority. Please don't take this personally, we're trying to balance a lot of tasks! If nobody else has commented or reacted to the PR, it likely means your change is useful only to you. The reality is this happens quite a lot. We don't tend to accept PRs that aren't generally helpful. For these reasons or others, the PR may get closed even after a review. We are not obligated to accept all proposed changes, even if the best justification we can give is something vague like, "It doesn't sit right." Sometimes PRs are just the wrong thing or the wrong time. Because it is open source, you can always build your own modified version of Caddy with a change you need, even if we reject it in the official repo. Plus, because Caddy is extensible, it's possible your feature could make a great plugin instead! - **You certify that you wrote and comprehend the code you submit.** The Caddy project welcomes original contributions that comply with [our CLA](https://cla-assistant.io/caddyserver/caddy), meaning that authors must be able to certify that they created or have rights to the code they are contributing. In addition, we require that code is not simply copy-pasted from Q/A sites or AI language models without full comprehension and rigorous testing. In other words: contributors are allowed to refer to communities for assistance and use AI tools such as language models for inspiration, but code which originates from or is assisted by these resources MUST be: - Licensed for you to freely share - Fully comprehended by you (be able to explain every line of code) - Verified by automated tests when feasible, or thorough manual tests otherwise We have found that current language models (LLMs, like ChatGPT) may understand code syntax and even problem spaces to an extent, but often fail in subtle ways to convey true knowledge and produce correct algorithms. Integrated tools such as GitHub Copilot and Sourcegraph Cody may be used for inspiration, but code generated by these tools still needs to meet our criteria for licensing, human comprehension, and testing. These tools may be used to help write code comments and tests as long as you can certify they are accurate and correct. Note that it is often more trouble than it's worth to certify that Copilot (for example) is not giving you code that is possibly plagiarised, unlicensed, or licensed with incompatible terms -- as the Caddy project cannot accept such contributions. If that's too difficult for you (or impossible), then we recommend using these resources only for inspiration and write your own code. Ultimately, you (the contributor) are responsible for the code you're submitting. As a courtesy to reviewers, we kindly ask that you disclose when contributing code that was generated by an AI tool or copied from another website so we can be aware of what to look for in code review. We often grant [collaborator status](#collaborator-instructions) to contributors who author one or more significant, high-quality PRs that are merged into the code base. #### HOW TO MAKE A PULL REQUEST TO CADDY Contributing to Go projects on GitHub is fun and easy. After you have proposed your change in an issue, we recommend the following workflow: 1. [Fork this repo](https://github.com/caddyserver/caddy). This makes a copy of the code you can write to. 2. If you don't already have this repo (caddyserver/caddy.git) repo on your computer, clone it down: `git clone https://github.com/caddyserver/caddy.git` 3. Tell git that it can push the caddyserver/caddy.git repo to your fork by adding a remote: `git remote add myfork https://github.com//caddy.git` 4. Make your changes in the caddyserver/caddy.git repo on your computer. 5. Push your changes to your fork: `git push myfork` 6. [Create a pull request](https://github.com/caddyserver/caddy/pull/new/master) to merge your changes into caddyserver/caddy @ master. (Click "compare across forks" and change the head fork.) This workflow is nice because you don't have to change import paths. You can get fancier by using different branches if you want. ### Writing a Caddy module Caddy can do more with modules! Anyone can write one. Caddy modules are Go libraries that get compiled into Caddy, extending its feature set. They can add directives to the Caddyfile, add new configuration adapters, and even implement new server types (e.g. HTTP, DNS). [Learn how to write a module here](https://caddyserver.com/docs/extending-caddy). You should also share and discuss your module idea [on the forums](https://caddy.community) to have people test it out. We don't use the Caddy issue tracker for third-party modules. ### Getting help using Caddy If you have a question about using Caddy, [ask on our forum](https://caddy.community)! There will be more people there who can help you than just the Caddy developers who follow our issue tracker. Issues are not the place for usage questions. Many people on the forums could benefit from your experience and expertise, too. Once you've been helped, consider giving back by answering other people's questions and participating in other discussions. ### Reporting bugs Like every software, Caddy has its flaws. If you find one, [search the issues](https://github.com/caddyserver/caddy/issues) to see if it has already been reported. If not, [open a new issue](https://github.com/caddyserver/caddy/issues/new) and describe the bug, and somebody will look into it! (This repository is only for Caddy and its standard modules.) **You can help us fix bugs!** Speed up the patch by identifying the bug in the code. This can sometimes be done by adding `fmt.Println()` statements (or similar) in relevant code paths to narrow down where the problem may be. It's a good way to [introduce yourself to the Go language](https://tour.golang.org), too. We may reply with an issue template. Please follow the template so we have all the needed information. Unredacted—yes, actual values matter. We need to be able to repeat the bug using your instructions. Please simplify the issue as much as possible. If you don't, we might close your report. The burden is on you to make it easily reproducible and to convince us that it is actually a bug in Caddy. This is easiest to do when you write clear, concise instructions so we can reproduce the behavior (even if it seems obvious). The more detailed and specific you are, the faster we will be able to help you! We suggest reading [How to Report Bugs Effectively](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html). Please be kind. :smile: Remember that Caddy comes at no cost to you, and you're getting free support when we fix your issues. If we helped you, please consider helping someone else! #### Bug reporting expectations Maintainers---or more generally, developers---need three things to act on bugs: 1. To agree or be convinced that it's a bug (reporter's responsibility). - A bug is unintentional, undesired, or surprising behavior which violates documentation or relevant spec. It might be either a mistake in the documentation or a bug in the code. - This project usually does not work around bugs in other software, systems, and dependencies; instead, we recommend that those bugs are fixed at their source. This sometimes means we close issues or reject PRs that attempt to fix, workaround, or hide bugs in other projects. 2. To be able to understand what is happening (mostly reporter's responsibility). - If the reporter can provide satisfactory instructions such that a developer can reproduce the bug, the developer will likely be able to understand the bug, write a test case, and implement a fix. This is the least amount of work for everyone and path to the fastest resolution. - Otherwise, the burden is on the reporter to test possible solutions. This is less preferable because it loosens the feedback loop, slows down debugging efforts, obscures the true nature of the problem from the developers, and is unlikely to result in new test cases. 3. A solution, or ideas toward a solution (mostly maintainer's responsibility). - Sometimes the best solution is a documentation change. - Usually the developers have the best domain knowledge for inventing a solution, but reporters may have ideas or preferences for how they would like the software to work. - Security, correctness, and project goals/vision all take priority over a user's preferences. - It's simply good business to yield a solution that satisfies the users, and it's even better business to leave them impressed. Thus, at the very least, the reporter is expected to: 1. Convince the reader that it's a bug in Caddy (if it's not obvious). 2. Reduce the problem down to the minimum specific steps required to reproduce it. The maintainer is usually able to do the rest; but of course the reporter may invest additional effort to speed up the process. ### Suggesting features First, [search to see if your feature has already been requested](https://github.com/caddyserver/caddy/issues). If it has, you can add a :+1: reaction to vote for it. If your feature idea is new, open an issue to request the feature. Please describe your idea thoroughly so that we know how to implement it! Really vague requests may not be helpful or actionable and, without clarification, will have to be closed. While we really do value your requests and implement many of them, not all features are a good fit for Caddy. Most of those [make good modules](#writing-a-caddy-module), which can be made by anyone! But if a feature is not in the best interest of the Caddy project or its users in general, we may politely decline to implement it into Caddy core. Additionally, some features are bad ideas altogether (for either obvious or non-obvious reasons) which may be rejected. We'll try to explain why we reject a feature, but sometimes the best we can do is, "It's not a good fit for the project." ### Improving documentation Caddy's documentation is available at [https://caddyserver.com/docs](https://caddyserver.com/docs) and its source is in the [website repo](https://github.com/caddyserver/website). If you would like to make a fix to the docs, please submit an issue there describing the change to make. Note that third-party module documentation is not hosted by the Caddy website, other than basic usage examples. They are managed by the individual module authors, and you will have to contact them to change their documentation. Our documentation is scoped to the Caddy project only: it is not for describing how other software or systems work, even if they relate to Caddy or web servers. That kind of content [can be found in our community wiki](https://caddy.community/c/wiki/13), however. ## Collaborator Instructions Collaborators have push rights to the repository. We grant this permission after one or more successful, high-quality PRs are merged! We thank them for their help. The expectations we have of collaborators are: - **Help review pull requests.** Be meticulous, but also kind. We love our contributors, but we critique the contribution to make it better. Multiple, thorough reviews make for the best contributions! Here are some questions to consider: - Can the change be made more elegant? - Is this a maintenance burden? - What assumptions does the code make? - Is it well-tested? - Is the change a good fit for the project? - Does it actually fix the problem or is it creating a special case instead? - Does the change incur any new dependencies? (Avoid these!) - **Answer issues.** If every collaborator helped out with issues, we could count the number of open issues on two hands. This means getting involved in the discussion, investigating the code, and yes, debugging it. It's fun. Really! :smile: Please, please help with open issues. Granted, some issues need to be done before others. And of course some are larger than others: you don't have to do it all yourself. Work with other collaborators as a team! - **Do not merge pull requests until they have been approved by one or two other collaborators.** If a project owner approves the PR, it can be merged (as long as the conversation has finished too). - **Prefer squashed commits over a messy merge.** If there are many little commits, please [squash the commits](https://stackoverflow.com/a/11732910/1048862) so we don't clutter the commit history. - **Don't accept new dependencies lightly.** Dependencies can make the world crash and burn, but they are sometimes necessary. Choose carefully. Extremely small dependencies (a few lines of code) can be inlined. The rest may not be needed. For those that are, Caddy uses [go modules](https://github.com/golang/go/wiki/Modules). All external dependencies must be installed as modules, and _Caddy must not export any types defined by those dependencies_. Check this diligently! - **Be extra careful in some areas of the code.** There are some critical areas in the Caddy code base that we review extra meticulously: the `caddyhttp` and `caddytls` packages especially. - **Make sure tests test the actual thing.** Double-check that the tests fail without the change, and pass with it. It's important that they assert what they're purported to assert. - **Recommended reading** - [CodeReviewComments](https://github.com/golang/go/wiki/CodeReviewComments) for an idea of what we look for in good, clean Go code - [Linus Torvalds describes a good commit message](https://gist.github.com/matthewhudson/1475276) - [Best Practices for Maintainers](https://opensource.guide/best-practices/) - [Shrinking Code Review](https://alexgaynor.net/2015/dec/29/shrinking-code-review/) ## Values (WIP) - A person is always more important than code. People don't like being handled "efficiently". But we can still process issues and pull requests efficiently while being kind, patient, and considerate. - The ends justify the means, if the means are good. A good tree won't produce bad fruit. But if we cut corners or are hasty in our process, the end result will not be good. ## Security Policy If you think you've found a security vulnerability, please refer to our [Security Policy](https://github.com/caddyserver/caddy/security/policy) document. ## Thank you Thanks for your help! Caddy would not be what it is today without your contributions. ================================================ FILE: .github/FUNDING.yml ================================================ # These are supported funding model platforms github: [mholt] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] patreon: # Replace with a single Patreon username open_collective: # Replace with a single Open Collective username ko_fi: # Replace with a single Ko-fi username tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry liberapay: # Replace with a single Liberapay username issuehunt: # Replace with a single IssueHunt username otechie: # Replace with a single Otechie username custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] ================================================ FILE: .github/ISSUE_TEMPLATE/ISSUE.yml ================================================ name: Issue description: An actionable development item, like a bug report or feature request body: - type: markdown attributes: value: | Thank you for opening an issue! This is for actionable development items like bug reports and feature requests. If you have a question about using Caddy, please [post on our forums](https://caddy.community) instead. - type: textarea id: content attributes: label: Issue Details placeholder: Describe the issue here. Be specific by providing complete logs and minimal instructions to reproduce, or a thoughtful proposal, etc. validations: required: true - type: dropdown id: assistance-disclosure attributes: label: Assistance Disclosure description: "Our project allows assistance by AI/LLM tools as long as it is disclosed and described so we can better respond. Please certify whether you have used any such tooling related to this issue:" options: - - AI used - AI not used validations: required: true - type: input id: assistance-description attributes: label: If AI was used, describe the extent to which it was used. description: 'Examples: "ChatGPT translated from my native language" or "Claude proposed this change/feature"' ================================================ FILE: .github/ISSUE_TEMPLATE/config.yml ================================================ blank_issues_enabled: false contact_links: - name: Caddy forum url: https://caddy.community about: If you have questions (or answers!) about using Caddy, please use our forum ================================================ FILE: .github/SECURITY.md ================================================ # Security Policy The Caddy project would like to make sure that it stays on top of all relevant and practically-exploitable vulnerabilities. ## Supported Versions | Version | Supported | | ----------- | ----------| | 2.latest | ✔️ | | <= 2.latest | :x: | ## Acceptable Scope A security report must demonstrate a security bug in the source code from this repository. Some security problems are the result of interplay between different components of the Web, rather than a vulnerability in the web server itself. Please only report vulnerabilities in the web server itself, as we cannot coerce the rest of the Web to be fixed (for example, we do not consider IP spoofing, BGP hijacks, or missing/misconfigured HTTP headers a vulnerability in the Caddy web server). Vulnerabilities caused by misconfigurations are out of scope. Yes, it is entirely possible to craft and use a configuration that is unsafe, just like with every other web server; we recommend against doing that. Similarly, external misconfigurations are out of scope. For example, an open or forwarded port from a public network to a Caddy instance intended to serve only internal clients is not a vulnerability in Caddy. We do not accept reports if the steps imply or require a compromised system or third-party software, as we cannot control those. We expect that users secure their own systems and keep all their software patched. For example, if untrusted users are able to upload/write/host arbitrary files in the web root directory, it is NOT a security bug in Caddy if those files get served to clients; however, it _would_ be a valid report if a bug in Caddy's source code unintentionally gave unauthorized users the ability to upload unsafe files or delete files without relying on an unpatched system or piece of software. Client-side exploits are out of scope. In other words, it is not a bug in Caddy if the web browser does something unsafe, even if the downloaded content was served by Caddy. (Those kinds of exploits can generally be mitigated by proper configuration of HTTP headers.) As a general rule, the content served by Caddy is not considered in scope because content is configurable by the site owner or the associated web application. Security bugs in code dependencies (including Go's standard library) are out of scope. Instead, if a dependency has patched a relevant security bug, please feel free to open a public issue or pull request to update that dependency in our code. We accept security reports and patches, but do not assign CVEs, for code that has not been released with a non-prerelease tag. ## Reporting a Vulnerability We get a lot of difficult reports that turn out to be invalid. Clear, obvious reports tend to be the most credible (but are also rare). First please ensure your report falls within the accepted scope of security bugs (above). :warning: **YOU MUST DISCLOSE WHETHER YOU USED LLMs ("AI") IN ANY WAY.** Whether you are using AI for discovery, as part of writing the report or its replies, and/or testing or validating proofs and changes, we require you to mention the extent of it. **FAILURE TO INCLUDE A DISCLOSURE EVEN IF YOU DO NOT USE AI MAY LEAD TO IMMEDIATE DISMISSAL OF YOUR REPORT AND POTENTIAL BLOCKLISTING.** We will not waste our time chatting with bots. But if you're a human, pull up a chair and we'll drink some chocolate milk. We'll need enough information to verify the bug and make a patch. To speed things up, please include: - Most minimal possible config (without redactions!) - Command(s) - Precise HTTP requests (`curl -v` and its output please) - Full log output (please enable debug mode) - Specific minimal steps to reproduce the issue from scratch - A working patch Please DO NOT use containers, VMs, cloud instances or services, or any other complex infrastructure in your steps. Always prefer `curl -v` instead of web browsers. We consider publicly-registered domain names to be public information. This necessary in order to maintain the integrity of certificate transparency, public DNS, and other public trust systems. Do not redact domain names from your reports. The actual content of your domain name affects Caddy's behavior, so we need the exact domain name(s) to reproduce with, or your report will be ignored. It will speed things up if you suggest a working patch, such as a code diff, and explain why and how it works. Reports that are not actionable, do not contain enough information, are too pushy/demanding, or are not able to convince us that it is a viable and practical attack on the web server itself may be deferred to a later time or possibly ignored, depending on available resources. Priority will be given to credible, responsible reports that are constructive, specific, and actionable. (We get a lot of invalid reports.) Thank you for understanding. When you are ready, please submit a [new private vulnerability report](https://github.com/caddyserver/caddy/security/advisories/new). Please don't encrypt the message. It only makes the process more complicated. Please also understand that due to our nature as an open source project, we do not have a budget to award security bounties. We can only thank you. If your report is valid and a patch is released, we will not reveal your identity by default. If you wish to be credited, please give us the name to use and/or your GitHub username. If you don't provide this we can't credit you. Thanks for responsibly helping Caddy—and thousands of websites—be more secure! ================================================ FILE: .github/dependabot.yml ================================================ --- version: 2 updates: - package-ecosystem: "github-actions" directory: "/" open-pull-requests-limit: 1 groups: actions-deps: patterns: - "*" schedule: interval: "monthly" - package-ecosystem: "gomod" directory: "/" open-pull-requests-limit: 1 groups: all-updates: patterns: - "*" schedule: interval: "monthly" ================================================ FILE: .github/pull_request_template.md ================================================ ## Assistance Disclosure _This PR is missing an assistance disclosure._ ================================================ FILE: .github/workflows/ai.yml ================================================ name: AI Moderator permissions: read-all on: issues: types: [opened] issue_comment: types: [created] pull_request_review_comment: types: [created] jobs: spam-detection: runs-on: ubuntu-latest permissions: issues: write pull-requests: write models: read contents: read steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd - uses: github/ai-moderator@81159c370785e295c97461ade67d7c33576e9319 with: token: ${{ secrets.GITHUB_TOKEN }} spam-label: 'spam' ai-label: 'ai-generated' minimize-detected-comments: true # Built-in prompt configuration (all enabled by default) enable-spam-detection: true enable-link-spam-detection: true enable-ai-detection: true # custom-prompt-path: '.github/prompts/my-custom.prompt.yml' # Optional ================================================ FILE: .github/workflows/auto-release-pr.yml ================================================ name: Release Proposal Approval Tracker on: pull_request_review: types: [submitted, dismissed] pull_request: types: [labeled, unlabeled, synchronize, closed] permissions: contents: read pull-requests: write issues: write jobs: check-approvals: name: Track Maintainer Approvals runs-on: ubuntu-latest # Only run on PRs with release-proposal label if: contains(github.event.pull_request.labels.*.name, 'release-proposal') && github.event.pull_request.state == 'open' steps: - name: Check approvals and update PR uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 env: MAINTAINER_LOGINS: ${{ secrets.MAINTAINER_LOGINS }} with: script: | const pr = context.payload.pull_request; // Extract version from PR title (e.g., "Release Proposal: v1.2.3") const versionMatch = pr.title.match(/Release Proposal:\s*(v[\d.]+(?:-[\w.]+)?)/); const commitMatch = pr.body.match(/\*\*Target Commit:\*\*\s*`([a-f0-9]+)`/); if (!versionMatch || !commitMatch) { console.log('Could not extract version from title or commit from body'); return; } const version = versionMatch[1]; const targetCommit = commitMatch[1]; console.log(`Version: ${version}, Target Commit: ${targetCommit}`); // Get all reviews const reviews = await github.rest.pulls.listReviews({ owner: context.repo.owner, repo: context.repo.repo, pull_number: pr.number }); // Get list of maintainers const maintainerLoginsRaw = process.env.MAINTAINER_LOGINS || ''; const maintainerLogins = maintainerLoginsRaw .split(/[,;]/) .map(login => login.trim()) .filter(login => login.length > 0); console.log(`Maintainer logins: ${maintainerLogins.join(', ')}`); // Get the latest review from each user const latestReviewsByUser = {}; reviews.data.forEach(review => { const username = review.user.login; if (!latestReviewsByUser[username] || new Date(review.submitted_at) > new Date(latestReviewsByUser[username].submitted_at)) { latestReviewsByUser[username] = review; } }); // Count approvals from maintainers const maintainerApprovals = Object.entries(latestReviewsByUser) .filter(([username, review]) => maintainerLogins.includes(username) && review.state === 'APPROVED' ) .map(([username, review]) => username); const approvalCount = maintainerApprovals.length; console.log(`Found ${approvalCount} maintainer approvals from: ${maintainerApprovals.join(', ')}`); // Get current labels const currentLabels = pr.labels.map(label => label.name); const hasApprovedLabel = currentLabels.includes('approved'); const hasAwaitingApprovalLabel = currentLabels.includes('awaiting-approval'); if (approvalCount >= 2 && !hasApprovedLabel) { console.log('✅ Quorum reached! Updating PR...'); // Remove awaiting-approval label if present if (hasAwaitingApprovalLabel) { await github.rest.issues.removeLabel({ owner: context.repo.owner, repo: context.repo.repo, issue_number: pr.number, name: 'awaiting-approval' }).catch(e => console.log('Label not found:', e.message)); } // Add approved label await github.rest.issues.addLabels({ owner: context.repo.owner, repo: context.repo.repo, issue_number: pr.number, labels: ['approved'] }); // Add comment with tagging instructions const approversList = maintainerApprovals.map(u => `@${u}`).join(', '); const commentBody = [ '## ✅ Approval Quorum Reached', '', `This release proposal has been approved by ${approvalCount} maintainers: ${approversList}`, '', '### Tagging Instructions', '', 'A maintainer should now create and push the signed tag:', '', '```bash', `git checkout ${targetCommit}`, `git tag -s ${version} -m "Release ${version}"`, `git push origin ${version}`, `git checkout -`, '```', '', 'The release workflow will automatically start when the tag is pushed.' ].join('\n'); await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: pr.number, body: commentBody }); console.log('Posted tagging instructions'); } else if (approvalCount < 2 && hasApprovedLabel) { console.log('⚠️ Approval count dropped below quorum, removing approved label'); // Remove approved label await github.rest.issues.removeLabel({ owner: context.repo.owner, repo: context.repo.repo, issue_number: pr.number, name: 'approved' }).catch(e => console.log('Label not found:', e.message)); // Add awaiting-approval label if (!hasAwaitingApprovalLabel) { await github.rest.issues.addLabels({ owner: context.repo.owner, repo: context.repo.repo, issue_number: pr.number, labels: ['awaiting-approval'] }); } } else { console.log(`⏳ Waiting for more approvals (${approvalCount}/2 required)`); } handle-pr-closed: name: Handle PR Closed Without Tag runs-on: ubuntu-latest if: | contains(github.event.pull_request.labels.*.name, 'release-proposal') && github.event.action == 'closed' && !contains(github.event.pull_request.labels.*.name, 'released') steps: - name: Add cancelled label and comment uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | const pr = context.payload.pull_request; // Check if the release-in-progress label is present const hasReleaseInProgress = pr.labels.some(label => label.name === 'release-in-progress'); if (hasReleaseInProgress) { // PR was closed while release was in progress - this is unusual await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: pr.number, body: '⚠️ **Warning:** This PR was closed while a release was in progress. This may indicate an error. Please verify the release status.' }); } else { // PR was closed before tag was created - this is normal cancellation const versionMatch = pr.title.match(/Release Proposal:\s*(v[\d.]+(?:-[\w.]+)?)/); const version = versionMatch ? versionMatch[1] : 'unknown'; await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: pr.number, body: `## 🚫 Release Proposal Cancelled\n\nThis release proposal for ${version} was closed without creating the tag.\n\nIf you want to proceed with this release later, you can create a new release proposal.` }); } // Add cancelled label await github.rest.issues.addLabels({ owner: context.repo.owner, repo: context.repo.repo, issue_number: pr.number, labels: ['cancelled'] }); // Remove other workflow labels if present const labelsToRemove = ['awaiting-approval', 'approved', 'release-in-progress']; for (const label of labelsToRemove) { try { await github.rest.issues.removeLabel({ owner: context.repo.owner, repo: context.repo.repo, issue_number: pr.number, name: label }); } catch (e) { console.log(`Label ${label} not found or already removed`); } } console.log('Added cancelled label and cleaned up workflow labels'); ================================================ FILE: .github/workflows/ci.yml ================================================ # Used as inspiration: https://github.com/mvdan/github-actions-golang name: Tests on: push: branches: - master - 2.* pull_request: branches: - master - 2.* env: GOFLAGS: '-tags=nobadger,nomysql,nopgx' # https://github.com/actions/setup-go/issues/491 GOTOOLCHAIN: local permissions: contents: read jobs: test: strategy: # Default is true, cancels jobs for other platforms in the matrix if one fails fail-fast: false matrix: os: - linux - mac - windows go: - '1.26' include: # Set the minimum Go patch version for the given Go minor # Usable via ${{ matrix.GO_SEMVER }} - go: '1.26' GO_SEMVER: '~1.26.0' # Set some variables per OS, usable via ${{ matrix.VAR }} # OS_LABEL: the VM label from GitHub Actions (see https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners/about-github-hosted-runners#standard-github-hosted-runners-for-public-repositories) # CADDY_BIN_PATH: the path to the compiled Caddy binary, for artifact publishing # SUCCESS: the typical value for $? per OS (Windows/pwsh returns 'True') - os: linux OS_LABEL: ubuntu-latest CADDY_BIN_PATH: ./cmd/caddy/caddy SUCCESS: 0 - os: mac OS_LABEL: macos-14 CADDY_BIN_PATH: ./cmd/caddy/caddy SUCCESS: 0 - os: windows OS_LABEL: windows-latest CADDY_BIN_PATH: ./cmd/caddy/caddy.exe SUCCESS: 'True' runs-on: ${{ matrix.OS_LABEL }} permissions: contents: read pull-requests: read actions: write # to allow uploading artifacts and cache steps: - name: Harden the runner (Audit all outbound calls) uses: step-security/harden-runner@a90bcbc6539c36a85cdfeb73f7e2f433735f215b # v2.15.0 with: egress-policy: audit - name: Checkout code uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Install Go uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 with: go-version: ${{ matrix.GO_SEMVER }} check-latest: true # These tools would be useful if we later decide to reinvestigate # publishing test/coverage reports to some tool for easier consumption # - name: Install test and coverage analysis tools # run: | # go get github.com/axw/gocov/gocov # go get github.com/AlekSi/gocov-xml # go get -u github.com/jstemmer/go-junit-report # echo "$(go env GOPATH)/bin" >> $GITHUB_PATH - name: Print Go version and environment id: vars shell: bash run: | printf "Using go at: $(which go)\n" printf "Go version: $(go version)\n" printf "\n\nGo environment:\n\n" go env printf "\n\nSystem environment:\n\n" env printf "Git version: $(git version)\n\n" # Calculate the short SHA1 hash of the git commit echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT - name: Get dependencies run: | go get -v -t -d ./... # mkdir test-results - name: Build Caddy working-directory: ./cmd/caddy env: CGO_ENABLED: 0 run: | go build -trimpath -ldflags="-w -s" -v - name: Smoke test Caddy working-directory: ./cmd/caddy run: | ./caddy start ./caddy stop - name: Publish Build Artifact uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 with: name: caddy_${{ runner.os }}_go${{ matrix.go }}_${{ steps.vars.outputs.short_sha }} path: ${{ matrix.CADDY_BIN_PATH }} compression-level: 0 # Commented bits below were useful to allow the job to continue # even if the tests fail, so we can publish the report separately # For info about set-output, see https://stackoverflow.com/questions/57850553/github-actions-check-steps-status - name: Run tests # id: step_test # continue-on-error: true run: | # (go test -v -coverprofile=cover-profile.out -race ./... 2>&1) > test-results/test-result.out go test -v -coverprofile="cover-profile.out" -short -race ./... # echo "status=$?" >> $GITHUB_OUTPUT # Relevant step if we reinvestigate publishing test/coverage reports # - name: Prepare coverage reports # run: | # mkdir coverage # gocov convert cover-profile.out > coverage/coverage.json # # Because Windows doesn't work with input redirection like *nix, but output redirection works. # (cat ./coverage/coverage.json | gocov-xml) > coverage/coverage.xml # To return the correct result even though we set 'continue-on-error: true' # - name: Coerce correct build result # if: matrix.os != 'windows' && steps.step_test.outputs.status != ${{ matrix.SUCCESS }} # run: | # echo "step_test ${{ steps.step_test.outputs.status }}\n" # exit 1 s390x-test: name: test (s390x on IBM Z) permissions: contents: read pull-requests: read runs-on: ubuntu-latest if: github.event.pull_request.head.repo.full_name == 'caddyserver/caddy' && github.actor != 'dependabot[bot]' continue-on-error: true # August 2020: s390x VM is down due to weather and power issues steps: - name: Harden the runner (Audit all outbound calls) uses: step-security/harden-runner@a90bcbc6539c36a85cdfeb73f7e2f433735f215b # v2.15.0 with: egress-policy: audit allowed-endpoints: ci-s390x.caddyserver.com:22 - name: Checkout code uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Run Tests run: | set +e mkdir -p ~/.ssh && echo -e "${SSH_KEY//_/\\n}" > ~/.ssh/id_ecdsa && chmod og-rwx ~/.ssh/id_ecdsa # short sha is enough? short_sha=$(git rev-parse --short HEAD) # To shorten the following lines ssh_opts="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" ssh_host="$CI_USER@ci-s390x.caddyserver.com" # The environment is fresh, so there's no point in keeping accepting and adding the key. rsync -arz -e "ssh $ssh_opts" --progress --delete --exclude '.git' . "$ssh_host":/var/tmp/"$short_sha" ssh $ssh_opts -t "$ssh_host" bash < 0)); do CGO_ENABLED=0 go test -p 1 -v ./... exit_code=$? if ((exit_code == 0)); then break fi echo "\n\nTest failed: \$exit_code, retrying..." ((retries--)) done echo "Remote exit code: \$exit_code" exit \$exit_code EOF test_result=$? # There's no need leaving the files around ssh $ssh_opts "$ssh_host" "rm -rf /var/tmp/'$short_sha'" echo "Test exit code: $test_result" exit $test_result env: SSH_KEY: ${{ secrets.S390X_SSH_KEY }} CI_USER: ${{ secrets.CI_USER }} goreleaser-check: runs-on: ubuntu-latest permissions: contents: read pull-requests: read if: github.event.pull_request.head.repo.full_name == 'caddyserver/caddy' && github.actor != 'dependabot[bot]' steps: - name: Harden the runner (Audit all outbound calls) uses: step-security/harden-runner@a90bcbc6539c36a85cdfeb73f7e2f433735f215b # v2.15.0 with: egress-policy: audit - name: Checkout code uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7.0.0 with: version: latest args: check - name: Install Go uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 with: go-version: "~1.26" check-latest: true - name: Install xcaddy run: | go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest xcaddy version - uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7.0.0 with: version: latest args: build --single-target --snapshot env: TAG: ${{ github.head_ref || github.ref_name }} ================================================ FILE: .github/workflows/cross-build.yml ================================================ name: Cross-Build on: push: branches: - master - 2.* pull_request: branches: - master - 2.* env: GOFLAGS: '-tags=nobadger,nomysql,nopgx' CGO_ENABLED: '0' # https://github.com/actions/setup-go/issues/491 GOTOOLCHAIN: local permissions: contents: read jobs: build: strategy: fail-fast: false matrix: goos: - 'aix' - 'linux' - 'solaris' - 'illumos' - 'dragonfly' - 'freebsd' - 'openbsd' - 'windows' - 'darwin' - 'netbsd' go: - '1.26' include: # Set the minimum Go patch version for the given Go minor # Usable via ${{ matrix.GO_SEMVER }} - go: '1.26' GO_SEMVER: '~1.26.0' runs-on: ubuntu-latest permissions: contents: read pull-requests: read continue-on-error: true steps: - name: Harden the runner (Audit all outbound calls) uses: step-security/harden-runner@a90bcbc6539c36a85cdfeb73f7e2f433735f215b # v2.15.0 with: egress-policy: audit - name: Checkout code uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Install Go uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 with: go-version: ${{ matrix.GO_SEMVER }} check-latest: true - name: Print Go version and environment id: vars run: | printf "Using go at: $(which go)\n" printf "Go version: $(go version)\n" printf "\n\nGo environment:\n\n" go env printf "\n\nSystem environment:\n\n" env - name: Run Build env: GOOS: ${{ matrix.goos }} GOARCH: ${{ matrix.goos == 'aix' && 'ppc64' || 'amd64' }} shell: bash continue-on-error: true working-directory: ./cmd/caddy run: go build -trimpath -o caddy-"$GOOS"-$GOARCH 2> /dev/null ================================================ FILE: .github/workflows/lint.yml ================================================ name: Lint on: push: branches: - master - 2.* pull_request: branches: - master - 2.* permissions: contents: read env: # https://github.com/actions/setup-go/issues/491 GOTOOLCHAIN: local jobs: # From https://github.com/golangci/golangci-lint-action golangci: permissions: contents: read # for actions/checkout to fetch code pull-requests: read # for golangci/golangci-lint-action to fetch pull requests name: lint strategy: matrix: os: - linux - mac - windows include: - os: linux OS_LABEL: ubuntu-latest - os: mac OS_LABEL: macos-14 - os: windows OS_LABEL: windows-latest runs-on: ${{ matrix.OS_LABEL }} steps: - name: Harden the runner (Audit all outbound calls) uses: step-security/harden-runner@a90bcbc6539c36a85cdfeb73f7e2f433735f215b # v2.15.0 with: egress-policy: audit - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 with: go-version: '~1.26' check-latest: true - name: golangci-lint uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0 with: version: latest # Windows times out frequently after about 5m50s if we don't set a longer timeout. args: --timeout 10m # Optional: show only new issues if it's a pull request. The default value is `false`. # only-new-issues: true govulncheck: permissions: contents: read pull-requests: read runs-on: ubuntu-latest steps: - name: Harden the runner (Audit all outbound calls) uses: step-security/harden-runner@a90bcbc6539c36a85cdfeb73f7e2f433735f215b # v2.15.0 with: egress-policy: audit - name: govulncheck uses: golang/govulncheck-action@b625fbe08f3bccbe446d94fbf87fcc875a4f50ee # v1.0.4 with: go-version-input: '~1.26.0' check-latest: true dependency-review: runs-on: ubuntu-latest permissions: contents: read pull-requests: write steps: - name: Harden the runner (Audit all outbound calls) uses: step-security/harden-runner@a90bcbc6539c36a85cdfeb73f7e2f433735f215b # v2.15.0 with: egress-policy: audit - name: 'Checkout Repository' uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: 'Dependency Review' uses: actions/dependency-review-action@05fe4576374b728f0c523d6a13d64c25081e0803 # v4.8.3 with: comment-summary-in-pr: on-failure # https://github.com/actions/dependency-review-action/issues/430#issuecomment-1468975566 base-ref: ${{ github.event.pull_request.base.sha || 'master' }} head-ref: ${{ github.event.pull_request.head.sha || github.ref }} ================================================ FILE: .github/workflows/release-proposal.yml ================================================ name: Release Proposal # This workflow creates a release proposal as a PR that requires approval from maintainers # Triggered manually by maintainers when ready to prepare a release on: workflow_dispatch: inputs: version: description: 'Version to release (e.g., v2.8.0)' required: true type: string commit_hash: description: 'Commit hash to release from' required: true type: string permissions: contents: read jobs: create-proposal: name: Create Release Proposal runs-on: ubuntu-latest permissions: contents: write pull-requests: write issues: write steps: - name: Harden the runner (Audit all outbound calls) uses: step-security/harden-runner@a90bcbc6539c36a85cdfeb73f7e2f433735f215b # v2.15.0 with: egress-policy: audit - name: Checkout code uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: fetch-depth: 0 - name: Trim and validate inputs id: inputs run: | # Trim whitespace from inputs VERSION=$(echo "${{ inputs.version }}" | xargs) COMMIT_HASH=$(echo "${{ inputs.commit_hash }}" | xargs) echo "version=$VERSION" >> $GITHUB_OUTPUT echo "commit_hash=$COMMIT_HASH" >> $GITHUB_OUTPUT # Validate version format if [[ ! "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.]+)?$ ]]; then echo "Error: Version must follow semver format (e.g., v2.8.0 or v2.8.0-beta.1)" exit 1 fi # Validate commit hash format if [[ ! "$COMMIT_HASH" =~ ^[a-f0-9]{7,40}$ ]]; then echo "Error: Commit hash must be a valid SHA (7-40 characters)" exit 1 fi # Check if commit exists if ! git cat-file -e "$COMMIT_HASH"; then echo "Error: Commit $COMMIT_HASH does not exist" exit 1 fi - name: Check if tag already exists run: | if git rev-parse "${{ steps.inputs.outputs.version }}" >/dev/null 2>&1; then echo "Error: Tag ${{ steps.inputs.outputs.version }} already exists" exit 1 fi - name: Check for existing proposal PR id: check_existing uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | const version = '${{ steps.inputs.outputs.version }}'; // Search for existing open PRs with release-proposal label that match this version const openPRs = await github.rest.pulls.list({ owner: context.repo.owner, repo: context.repo.repo, state: 'open', sort: 'updated', direction: 'desc' }); const existingOpenPR = openPRs.data.find(pr => pr.title.includes(version) && pr.labels.some(label => label.name === 'release-proposal') ); if (existingOpenPR) { const hasReleased = existingOpenPR.labels.some(label => label.name === 'released'); const hasReleaseInProgress = existingOpenPR.labels.some(label => label.name === 'release-in-progress'); if (hasReleased || hasReleaseInProgress) { core.setFailed(`A release for ${version} is already in progress or completed: ${existingOpenPR.html_url}`); } else { core.setFailed(`An open release proposal already exists for ${version}: ${existingOpenPR.html_url}\n\nPlease use the existing PR or close it first.`); } return; } // Check for closed PRs with this version that were cancelled const closedPRs = await github.rest.pulls.list({ owner: context.repo.owner, repo: context.repo.repo, state: 'closed', sort: 'updated', direction: 'desc' }); const cancelledPR = closedPRs.data.find(pr => pr.title.includes(version) && pr.labels.some(label => label.name === 'release-proposal') && pr.labels.some(label => label.name === 'cancelled') ); if (cancelledPR) { console.log(`Found previously cancelled proposal for ${version}: ${cancelledPR.html_url}`); console.log('Creating new proposal to replace cancelled one...'); } else { console.log(`No existing proposal found for ${version}, proceeding...`); } - name: Generate changelog and create branch id: setup run: | VERSION="${{ steps.inputs.outputs.version }}" COMMIT_HASH="${{ steps.inputs.outputs.commit_hash }}" # Create a new branch for the release proposal BRANCH_NAME="release_proposal-$VERSION" git checkout -b "$BRANCH_NAME" # Calculate how many commits behind HEAD COMMITS_BEHIND=$(git rev-list --count ${COMMIT_HASH}..HEAD) if [ "$COMMITS_BEHIND" -eq 0 ]; then BEHIND_INFO="This is the latest commit (HEAD)" else BEHIND_INFO="This commit is **${COMMITS_BEHIND} commits behind HEAD**" fi echo "commits_behind=$COMMITS_BEHIND" >> $GITHUB_OUTPUT echo "behind_info=$BEHIND_INFO" >> $GITHUB_OUTPUT # Get the last tag LAST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "") if [ -z "$LAST_TAG" ]; then echo "No previous tag found, generating full changelog" COMMITS=$(git log --pretty=format:"- %s (%h)" --reverse "$COMMIT_HASH") else echo "Generating changelog since $LAST_TAG" COMMITS=$(git log --pretty=format:"- %s (%h)" --reverse "${LAST_TAG}..$COMMIT_HASH") fi # Store changelog for PR body CLEANSED_COMMITS=$(echo "$COMMITS" | sed 's/`/\\`/g') echo "changelog<> $GITHUB_OUTPUT echo "$CLEANSED_COMMITS" >> $GITHUB_OUTPUT echo "EOF" >> $GITHUB_OUTPUT # Create empty commit for the PR git config user.name "github-actions[bot]" git config user.email "github-actions[bot]@users.noreply.github.com" git commit --allow-empty -m "Release proposal for $VERSION" # Push the branch git push origin "$BRANCH_NAME" echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT - name: Create release proposal PR id: create_pr uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | const changelog = `${{ steps.setup.outputs.changelog }}`; const pr = await github.rest.pulls.create({ owner: context.repo.owner, repo: context.repo.repo, title: `Release Proposal: ${{ steps.inputs.outputs.version }}`, head: '${{ steps.setup.outputs.branch_name }}', base: 'master', body: `## Release Proposal: ${{ steps.inputs.outputs.version }} **Target Commit:** \`${{ steps.inputs.outputs.commit_hash }}\` **Requested by:** @${{ github.actor }} **Commit Status:** ${{ steps.setup.outputs.behind_info }} This PR proposes creating release tag \`${{ steps.inputs.outputs.version }}\` at commit \`${{ steps.inputs.outputs.commit_hash }}\`. ### Approval Process This PR requires **approval from 2+ maintainers** before the tag can be created. ### What happens next? 1. Maintainers review this proposal 2. When 2+ maintainer approvals are received, an automated workflow will post tagging instructions 3. A maintainer manually creates and pushes the signed tag 4. The release workflow is triggered automatically by the tag push 5. Upon release completion, this PR is closed and the branch is deleted ### Changes Since Last Release ${changelog} ### Release Checklist - [ ] All tests pass - [ ] Security review completed - [ ] Documentation updated - [ ] Breaking changes documented --- **Note:** Tag creation is manual and requires a signed tag from a maintainer.`, draft: true }); // Add labels await github.rest.issues.addLabels({ owner: context.repo.owner, repo: context.repo.repo, issue_number: pr.data.number, labels: ['release-proposal', 'awaiting-approval'] }); console.log(`Created PR: ${pr.data.html_url}`); return { number: pr.data.number, url: pr.data.html_url }; result-encoding: json - name: Post summary run: | echo "## Release Proposal PR Created! 🚀" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "Version: **${{ steps.inputs.outputs.version }}**" >> $GITHUB_STEP_SUMMARY echo "Commit: **${{ steps.inputs.outputs.commit_hash }}**" >> $GITHUB_STEP_SUMMARY echo "Status: ${{ steps.setup.outputs.behind_info }}" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "PR: ${{ fromJson(steps.create_pr.outputs.result).url }}" >> $GITHUB_STEP_SUMMARY ================================================ FILE: .github/workflows/release.yml ================================================ name: Release on: push: tags: - 'v*.*.*' env: # https://github.com/actions/setup-go/issues/491 GOTOOLCHAIN: local permissions: contents: read jobs: verify-tag: name: Verify Tag Signature and Approvals runs-on: ubuntu-latest permissions: contents: write pull-requests: write issues: write outputs: verification_passed: ${{ steps.verify.outputs.passed }} tag_version: ${{ steps.info.outputs.version }} proposal_issue_number: ${{ steps.find_proposal.outputs.result && fromJson(steps.find_proposal.outputs.result).number || '' }} steps: - name: Checkout code uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: fetch-depth: 0 # Force fetch upstream tags -- because 65 minutes # tl;dr: actions/checkout@v3 runs this line: # git -c protocol.version=2 fetch --no-tags --prune --progress --no-recurse-submodules --depth=1 origin +ebc278ec98bb24f2852b61fde2a9bf2e3d83818b:refs/tags/ # which makes its own local lightweight tag, losing all the annotations in the process. Our earlier script ran: # git fetch --prune --unshallow # which doesn't overwrite that tag because that would be destructive. # Credit to @francislavoie for the investigation. # https://github.com/actions/checkout/issues/290#issuecomment-680260080 - name: Force fetch upstream tags run: git fetch --tags --force - name: Get tag info id: info run: | echo "version=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT echo "sha=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT # https://github.community/t5/GitHub-Actions/How-to-get-just-the-tag-name/m-p/32167/highlight/true#M1027 - name: Print Go version and environment id: vars run: | printf "Using go at: $(which go)\n" printf "Go version: $(go version)\n" printf "\n\nGo environment:\n\n" go env printf "\n\nSystem environment:\n\n" env echo "version_tag=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT # Add "pip install" CLI tools to PATH echo ~/.local/bin >> $GITHUB_PATH # Parse semver TAG=${GITHUB_REF/refs\/tags\//} SEMVER_RE='[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z\.-]*\)' TAG_MAJOR=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\1#"` TAG_MINOR=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\2#"` TAG_PATCH=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\3#"` TAG_SPECIAL=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\4#"` echo "tag_major=${TAG_MAJOR}" >> $GITHUB_OUTPUT echo "tag_minor=${TAG_MINOR}" >> $GITHUB_OUTPUT echo "tag_patch=${TAG_PATCH}" >> $GITHUB_OUTPUT echo "tag_special=${TAG_SPECIAL}" >> $GITHUB_OUTPUT - name: Validate commits and tag signatures id: verify env: signing_keys: ${{ secrets.SIGNING_KEYS }} run: | # Read the string into an array, splitting by IFS IFS=";" read -ra keys_collection <<< "$signing_keys" # ref: https://docs.github.com/en/actions/reference/workflows-and-actions/contexts#example-usage-of-the-runner-context touch "${{ runner.temp }}/allowed_signers" # Iterate and print the split elements for item in "${keys_collection[@]}"; do # trim leading whitespaces item="${item##*( )}" # trim trailing whitespaces item="${item%%*( )}" IFS=" " read -ra key_components <<< "$item" # git wants it in format: email address, type, public key # ssh has it in format: type, public key, email address echo "${key_components[2]} namespaces=\"git\" ${key_components[0]} ${key_components[1]}" >> "${{ runner.temp }}/allowed_signers" done git config set --global gpg.ssh.allowedSignersFile "${{ runner.temp }}/allowed_signers" echo "Verifying the tag: ${{ steps.vars.outputs.version_tag }}" # Verify the tag is signed if ! git verify-tag -v "${{ steps.vars.outputs.version_tag }}" 2>&1; then echo "❌ Tag verification failed!" echo "passed=false" >> $GITHUB_OUTPUT git push --delete origin "${{ steps.vars.outputs.version_tag }}" exit 1 fi # Run it again to capture the output git verify-tag -v "${{ steps.vars.outputs.version_tag }}" 2>&1 | tee /tmp/verify-output.txt; # SSH verification output typically includes the key fingerprint # Use GNU grep with Perl regex for cleaner extraction (Linux environment) KEY_SHA256=$(grep -oP "SHA256:[\"']?\K[A-Za-z0-9+/=]+(?=[\"']?)" /tmp/verify-output.txt | head -1 || echo "") if [ -z "$KEY_SHA256" ]; then # Try alternative pattern with "key" prefix KEY_SHA256=$(grep -oP "key SHA256:[\"']?\K[A-Za-z0-9+/=]+(?=[\"']?)" /tmp/verify-output.txt | head -1 || echo "") fi if [ -z "$KEY_SHA256" ]; then # Fallback: extract any base64-like string (40+ chars) KEY_SHA256=$(grep -oP '[A-Za-z0-9+/]{40,}=?' /tmp/verify-output.txt | head -1 || echo "") fi if [ -z "$KEY_SHA256" ]; then echo "Somehow could not extract SSH key fingerprint from git verify-tag output" echo "Cancelling flow and deleting tag" echo "passed=false" >> $GITHUB_OUTPUT git push --delete origin "${{ steps.vars.outputs.version_tag }}" exit 1 fi echo "✅ Tag verification succeeded!" echo "passed=true" >> $GITHUB_OUTPUT echo "key_id=$KEY_SHA256" >> $GITHUB_OUTPUT - name: Find related release proposal id: find_proposal uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | const version = '${{ steps.vars.outputs.version_tag }}'; // Search for PRs with release-proposal label that match this version const prs = await github.rest.pulls.list({ owner: context.repo.owner, repo: context.repo.repo, state: 'open', // Changed to 'all' to find both open and closed PRs sort: 'updated', direction: 'desc' }); // Find the most recent PR for this version const proposal = prs.data.find(pr => pr.title.includes(version) && pr.labels.some(label => label.name === 'release-proposal') ); if (!proposal) { console.log(`⚠️ No release proposal PR found for ${version}`); console.log('This might be a hotfix or emergency release'); return { number: null, approved: true, approvals: 0, proposedCommit: null }; } console.log(`Found proposal PR #${proposal.number} for version ${version}`); // Extract commit hash from PR body const commitMatch = proposal.body.match(/\*\*Target Commit:\*\*\s*`([a-f0-9]+)`/); const proposedCommit = commitMatch ? commitMatch[1] : null; if (proposedCommit) { console.log(`Proposal was for commit: ${proposedCommit}`); } else { console.log('⚠️ No target commit hash found in PR body'); } // Get PR reviews to extract approvers let approvers = 'Validated by automation'; let approvalCount = 2; // Minimum required try { const reviews = await github.rest.pulls.listReviews({ owner: context.repo.owner, repo: context.repo.repo, pull_number: proposal.number }); // Get latest review per user and filter for approvals const latestReviewsByUser = {}; reviews.data.forEach(review => { const username = review.user.login; if (!latestReviewsByUser[username] || new Date(review.submitted_at) > new Date(latestReviewsByUser[username].submitted_at)) { latestReviewsByUser[username] = review; } }); const approvalReviews = Object.values(latestReviewsByUser).filter(review => review.state === 'APPROVED' ); if (approvalReviews.length > 0) { approvers = approvalReviews.map(r => '@' + r.user.login).join(', '); approvalCount = approvalReviews.length; console.log(`Found ${approvalCount} approvals from: ${approvers}`); } } catch (error) { console.log(`Could not fetch reviews: ${error.message}`); } return { number: proposal.number, approved: true, approvals: approvalCount, approvers: approvers, proposedCommit: proposedCommit }; result-encoding: json - name: Verify proposal commit run: | APPROVALS='${{ steps.find_proposal.outputs.result }}' # Parse JSON PROPOSED_COMMIT=$(echo "$APPROVALS" | jq -r '.proposedCommit') CURRENT_COMMIT="${{ steps.info.outputs.sha }}" echo "Proposed commit: $PROPOSED_COMMIT" echo "Current commit: $CURRENT_COMMIT" # Check if commits match (if proposal had a target commit) if [ "$PROPOSED_COMMIT" != "null" ] && [ -n "$PROPOSED_COMMIT" ]; then # Normalize both commits to full SHA for comparison PROPOSED_FULL=$(git rev-parse "$PROPOSED_COMMIT" 2>/dev/null || echo "") CURRENT_FULL=$(git rev-parse "$CURRENT_COMMIT" 2>/dev/null || echo "") if [ -z "$PROPOSED_FULL" ]; then echo "⚠️ Could not resolve proposed commit: $PROPOSED_COMMIT" elif [ "$PROPOSED_FULL" != "$CURRENT_FULL" ]; then echo "❌ Commit mismatch!" echo "The tag points to commit $CURRENT_FULL but the proposal was for $PROPOSED_FULL" echo "This indicates an error in tag creation." # Delete the tag remotely git push --delete origin "${{ steps.vars.outputs.version_tag }}" echo "Tag ${{steps.vars.outputs.version_tag}} has been deleted" exit 1 else echo "✅ Commit hash matches proposal" fi else echo "⚠️ No target commit found in proposal (might be legacy release)" fi echo "✅ Tag verification completed" - name: Update release proposal PR if: fromJson(steps.find_proposal.outputs.result).number != null uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | const result = ${{ steps.find_proposal.outputs.result }}; if (result.number) { // Add in-progress label await github.rest.issues.addLabels({ owner: context.repo.owner, repo: context.repo.repo, issue_number: result.number, labels: ['release-in-progress'] }); // Remove approved label if present try { await github.rest.issues.removeLabel({ owner: context.repo.owner, repo: context.repo.repo, issue_number: result.number, name: 'approved' }); } catch (e) { console.log('Approved label not found:', e.message); } const commentBody = [ '## 🚀 Release Workflow Started', '', '- **Tag:** ${{ steps.info.outputs.version }}', '- **Signed by key:** ${{ steps.verify.outputs.key_id }}', '- **Commit:** ${{ steps.info.outputs.sha }}', '- **Approved by:** ' + result.approvers, '', 'Release workflow is now running. This PR will be updated when the release is published.' ].join('\n'); await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: result.number, body: commentBody }); } - name: Summary run: | APPROVALS='${{ steps.find_proposal.outputs.result }}' PROPOSED_COMMIT=$(echo "$APPROVALS" | jq -r '.proposedCommit // "N/A"') APPROVERS=$(echo "$APPROVALS" | jq -r '.approvers // "N/A"') echo "## Tag Verification Summary 🔐" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "- **Tag:** ${{ steps.info.outputs.version }}" >> $GITHUB_STEP_SUMMARY echo "- **Commit:** ${{ steps.info.outputs.sha }}" >> $GITHUB_STEP_SUMMARY echo "- **Proposed Commit:** $PROPOSED_COMMIT" >> $GITHUB_STEP_SUMMARY echo "- **Signature:** ✅ Verified" >> $GITHUB_STEP_SUMMARY echo "- **Signed by:** ${{ steps.verify.outputs.key_id }}" >> $GITHUB_STEP_SUMMARY echo "- **Approvals:** ✅ Sufficient" >> $GITHUB_STEP_SUMMARY echo "- **Approved by:** $APPROVERS" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "Proceeding with release build..." >> $GITHUB_STEP_SUMMARY release: name: Release needs: verify-tag if: ${{ needs.verify-tag.outputs.verification_passed == 'true' }} strategy: matrix: os: - ubuntu-latest go: - '1.26' include: # Set the minimum Go patch version for the given Go minor # Usable via ${{ matrix.GO_SEMVER }} - go: '1.26' GO_SEMVER: '~1.26.0' runs-on: ${{ matrix.os }} # https://github.com/sigstore/cosign/issues/1258#issuecomment-1002251233 # https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings permissions: id-token: write # https://docs.github.com/en/rest/overview/permissions-required-for-github-apps#permission-on-contents # "Releases" is part of `contents`, so it needs the `write` contents: write issues: write pull-requests: write steps: - name: Harden the runner (Audit all outbound calls) uses: step-security/harden-runner@a90bcbc6539c36a85cdfeb73f7e2f433735f215b # v2.15.0 with: egress-policy: audit - name: Checkout code uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: fetch-depth: 0 - name: Install Go uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 with: go-version: ${{ matrix.GO_SEMVER }} check-latest: true # Force fetch upstream tags -- because 65 minutes # tl;dr: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v4.2.2 runs this line: # git -c protocol.version=2 fetch --no-tags --prune --progress --no-recurse-submodules --depth=1 origin +ebc278ec98bb24f2852b61fde2a9bf2e3d83818b:refs/tags/ # which makes its own local lightweight tag, losing all the annotations in the process. Our earlier script ran: # git fetch --prune --unshallow # which doesn't overwrite that tag because that would be destructive. # Credit to @francislavoie for the investigation. # https://github.com/actions/checkout/issues/290#issuecomment-680260080 - name: Force fetch upstream tags run: git fetch --tags --force # https://github.community/t5/GitHub-Actions/How-to-get-just-the-tag-name/m-p/32167/highlight/true#M1027 - name: Print Go version and environment id: vars run: | printf "Using go at: $(which go)\n" printf "Go version: $(go version)\n" printf "\n\nGo environment:\n\n" go env printf "\n\nSystem environment:\n\n" env echo "version_tag=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT # Add "pip install" CLI tools to PATH echo ~/.local/bin >> $GITHUB_PATH # Parse semver TAG=${GITHUB_REF/refs\/tags\//} SEMVER_RE='[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z\.-]*\)' TAG_MAJOR=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\1#"` TAG_MINOR=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\2#"` TAG_PATCH=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\3#"` TAG_SPECIAL=`echo ${TAG#v} | sed -e "s#$SEMVER_RE#\4#"` echo "tag_major=${TAG_MAJOR}" >> $GITHUB_OUTPUT echo "tag_minor=${TAG_MINOR}" >> $GITHUB_OUTPUT echo "tag_patch=${TAG_PATCH}" >> $GITHUB_OUTPUT echo "tag_special=${TAG_SPECIAL}" >> $GITHUB_OUTPUT # Cloudsmith CLI tooling for pushing releases # See https://help.cloudsmith.io/docs/cli - name: Install Cloudsmith CLI run: pip install --upgrade cloudsmith-cli - name: Install Cosign uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # main - name: Cosign version run: cosign version - name: Install Syft uses: anchore/sbom-action/download-syft@17ae1740179002c89186b61233e0f892c3118b11 # main - name: Syft version run: syft version - name: Install xcaddy run: | go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest xcaddy version # GoReleaser will take care of publishing those artifacts into the release - name: Run GoReleaser uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7.0.0 with: version: latest args: release --clean --timeout 60m env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} TAG: ${{ steps.vars.outputs.version_tag }} COSIGN_EXPERIMENTAL: 1 # Only publish on non-special tags (e.g. non-beta) # We will continue to push to Gemfury for the foreseeable future, although # Cloudsmith is probably better, to not break things for existing users of Gemfury. # See https://gemfury.com/caddy/deb:caddy - name: Publish .deb to Gemfury if: ${{ steps.vars.outputs.tag_special == '' }} env: GEMFURY_PUSH_TOKEN: ${{ secrets.GEMFURY_PUSH_TOKEN }} run: | for filename in dist/*.deb; do # armv6 and armv7 are both "armhf" so we can skip the duplicate if [[ "$filename" == *"armv6"* ]]; then echo "Skipping $filename" continue fi curl -F package=@"$filename" https://${GEMFURY_PUSH_TOKEN}:@push.fury.io/caddy/ done # Publish only special tags (unstable/beta/rc) to the "testing" repo # See https://cloudsmith.io/~caddy/repos/testing/ - name: Publish .deb to Cloudsmith (special tags) if: ${{ steps.vars.outputs.tag_special != '' }} env: CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }} run: | for filename in dist/*.deb; do # armv6 and armv7 are both "armhf" so we can skip the duplicate if [[ "$filename" == *"armv6"* ]]; then echo "Skipping $filename" continue fi echo "Pushing $filename to 'testing'" cloudsmith push deb caddy/testing/any-distro/any-version $filename done # Publish stable tags to Cloudsmith to both repos, "stable" and "testing" # See https://cloudsmith.io/~caddy/repos/stable/ - name: Publish .deb to Cloudsmith (stable tags) if: ${{ steps.vars.outputs.tag_special == '' }} env: CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }} run: | for filename in dist/*.deb; do # armv6 and armv7 are both "armhf" so we can skip the duplicate if [[ "$filename" == *"armv6"* ]]; then echo "Skipping $filename" continue fi echo "Pushing $filename to 'stable'" cloudsmith push deb caddy/stable/any-distro/any-version $filename echo "Pushing $filename to 'testing'" cloudsmith push deb caddy/testing/any-distro/any-version $filename done - name: Update release proposal PR if: needs.verify-tag.outputs.proposal_issue_number != '' uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 with: script: | const prNumber = parseInt('${{ needs.verify-tag.outputs.proposal_issue_number }}'); if (prNumber) { // Get PR details to find the branch const pr = await github.rest.pulls.get({ owner: context.repo.owner, repo: context.repo.repo, pull_number: prNumber }); const branchName = pr.data.head.ref; // Remove in-progress label try { await github.rest.issues.removeLabel({ owner: context.repo.owner, repo: context.repo.repo, issue_number: prNumber, name: 'release-in-progress' }); } catch (e) { console.log('Label not found:', e.message); } // Add released label await github.rest.issues.addLabels({ owner: context.repo.owner, repo: context.repo.repo, issue_number: prNumber, labels: ['released'] }); // Add final comment await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: prNumber, body: '## ✅ Release Published\n\nThe release has been successfully published and is now available.' }); // Close the PR if it's still open if (pr.data.state === 'open') { await github.rest.pulls.update({ owner: context.repo.owner, repo: context.repo.repo, pull_number: prNumber, state: 'closed' }); console.log(`Closed PR #${prNumber}`); } // Delete the branch try { await github.rest.git.deleteRef({ owner: context.repo.owner, repo: context.repo.repo, ref: `heads/${branchName}` }); console.log(`Deleted branch: ${branchName}`); } catch (e) { console.log(`Could not delete branch ${branchName}: ${e.message}`); } } ================================================ FILE: .github/workflows/release_published.yml ================================================ name: Release Published # Event payload: https://developer.github.com/webhooks/event-payloads/#release on: release: types: [published] permissions: contents: read jobs: release: name: Release Published strategy: matrix: os: - ubuntu-latest runs-on: ${{ matrix.os }} permissions: contents: read pull-requests: read actions: write steps: # See https://github.com/peter-evans/repository-dispatch - name: Harden the runner (Audit all outbound calls) uses: step-security/harden-runner@a90bcbc6539c36a85cdfeb73f7e2f433735f215b # v2.15.0 with: egress-policy: audit - name: Trigger event on caddyserver/dist uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1 with: token: ${{ secrets.REPO_DISPATCH_TOKEN }} repository: caddyserver/dist event-type: release-tagged client-payload: '{"tag": "${{ github.event.release.tag_name }}"}' - name: Trigger event on caddyserver/caddy-docker uses: peter-evans/repository-dispatch@28959ce8df70de7be546dd1250a005dd32156697 # v4.0.1 with: token: ${{ secrets.REPO_DISPATCH_TOKEN }} repository: caddyserver/caddy-docker event-type: release-tagged client-payload: '{"tag": "${{ github.event.release.tag_name }}"}' ================================================ FILE: .github/workflows/scorecard.yml ================================================ # This workflow uses actions that are not certified by GitHub. They are provided # by a third-party and are governed by separate terms of service, privacy # policy, and support documentation. name: OpenSSF Scorecard supply-chain security on: # For Branch-Protection check. Only the default branch is supported. See # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection branch_protection_rule: # To guarantee Maintained check is occasionally updated. See # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained schedule: - cron: '20 2 * * 5' push: branches: [ "master", "2.*" ] pull_request: branches: [ "master", "2.*" ] # Declare default permissions as read only. permissions: read-all jobs: analysis: name: Scorecard analysis runs-on: ubuntu-latest # `publish_results: true` only works when run from the default branch. conditional can be removed if disabled. if: github.event.repository.default_branch == github.ref_name || github.event_name == 'pull_request' permissions: # Needed to upload the results to code-scanning dashboard. security-events: write # Needed to publish results and get a badge (see publish_results below). id-token: write # Uncomment the permissions below if installing in a private repository. # contents: read # actions: read steps: - name: Harden the runner (Audit all outbound calls) uses: step-security/harden-runner@a90bcbc6539c36a85cdfeb73f7e2f433735f215b # v2.15.0 with: egress-policy: audit - name: "Checkout code" uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - name: "Run analysis" uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3 with: results_file: results.sarif results_format: sarif # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: # - you want to enable the Branch-Protection check on a *public* repository, or # - you are installing Scorecard on a *private* repository # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. # repo_token: ${{ secrets.SCORECARD_TOKEN }} # Public repositories: # - Publish results to OpenSSF REST API for easy access by consumers # - Allows the repository to include the Scorecard badge. # - See https://github.com/ossf/scorecard-action#publishing-results. # For private repositories: # - `publish_results` will always be set to `false`, regardless # of the value entered here. publish_results: true # (Optional) Uncomment file_mode if you have a .gitattributes with files marked export-ignore # file_mode: git # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 with: name: SARIF file path: results.sarif retention-days: 5 # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" uses: github/codeql-action/upload-sarif@89a39a4e59826350b863aa6b6252a07ad50cf83e # v3.29.5 with: sarif_file: results.sarif ================================================ FILE: .gitignore ================================================ _gitignore/ *.log Caddyfile Caddyfile.* !caddyfile/ !caddyfile.go # artifacts from pprof tooling *.prof *.test # build artifacts and helpers cmd/caddy/caddy cmd/caddy/caddy.exe cmd/caddy/tmp/*.exe cmd/caddy/.env # mac specific .DS_Store # go modules vendor # goreleaser artifacts dist caddy-build caddy-dist # IDE files .idea/ .vscode/ ================================================ FILE: .golangci.yml ================================================ version: "2" run: issues-exit-code: 1 tests: false build-tags: - nobadger - nomysql - nopgx output: formats: text: path: stdout print-linter-name: true print-issued-lines: true linters: default: none enable: - asasalint - asciicheck - bidichk - bodyclose - decorder - dogsled - dupl - dupword - durationcheck - errcheck - errname - exhaustive - gosec - govet - importas - ineffassign - misspell - modernize - prealloc - promlinter - sloglint - sqlclosecheck - staticcheck - testableexamples - testifylint - tparallel - unconvert - unused - wastedassign - whitespace - zerologlint settings: staticcheck: checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-QF1006", "-QF1008"] # default, and exclude 1 more undesired check errcheck: exclude-functions: - fmt.* - (go.uber.org/zap/zapcore.ObjectEncoder).AddObject - (go.uber.org/zap/zapcore.ObjectEncoder).AddArray exhaustive: ignore-enum-types: reflect.Kind|svc.Cmd exclusions: generated: lax presets: - comments - common-false-positives - legacy - std-error-handling rules: - linters: - gosec text: G115 # TODO: Either we should fix the issues or nuke the linter if it's bad - linters: - gosec text: G107 # we aren't calling unknown URL - linters: - gosec text: G203 # as a web server that's expected to handle any template, this is totally in the hands of the user. - linters: - gosec text: G204 # we're shelling out to known commands, not relying on user-defined input. - linters: - gosec # the choice of weakrand is deliberate, hence the named import "weakrand" path: modules/caddyhttp/reverseproxy/selectionpolicies.go text: G404 - linters: - gosec path: modules/caddyhttp/reverseproxy/streaming.go text: G404 - linters: - dupl path: modules/logging/filters.go - linters: - dupl path: modules/caddyhttp/matchers.go - linters: - dupl path: modules/caddyhttp/vars.go - linters: - errcheck path: _test\.go paths: - third_party$ - builtin$ - examples$ formatters: enable: - gci - gofmt - gofumpt - goimports settings: gci: sections: - standard # Standard section: captures all standard packages. - default # Default section: contains all imports that could not be matched to another section type. - prefix(github.com/caddyserver/caddy/v2/cmd) # ensure that this is always at the top and always has a line break. - prefix(github.com/caddyserver/caddy) # Custom section: groups all imports with the specified Prefix. custom-order: true exclusions: generated: lax paths: - third_party$ - builtin$ - examples$ ================================================ FILE: .goreleaser.yml ================================================ version: 2 before: hooks: # The build is done in this particular way to build Caddy in a designated directory named in .gitignore. # This is so we can run goreleaser on tag without Git complaining of being dirty. The main.go in cmd/caddy directory # cannot be built within that directory due to changes necessary for the build causing Git to be dirty, which # subsequently causes gorleaser to refuse running. - rm -rf caddy-build caddy-dist vendor # vendor Caddy deps - go mod vendor - mkdir -p caddy-build - cp cmd/caddy/main.go caddy-build/main.go - /bin/sh -c 'cd ./caddy-build && go mod init caddy' # prepare syso files for windows embedding - /bin/sh -c 'for a in amd64 arm64; do XCADDY_SKIP_BUILD=1 GOOS=windows GOARCH=$a xcaddy build {{.Env.TAG}}; done' - /bin/sh -c 'mv /tmp/buildenv_*/*.syso caddy-build' # GoReleaser doesn't seem to offer {{.Tag}} at this stage, so we have to embed it into the env # so we run: TAG=$(git describe --abbrev=0) goreleaser release --rm-dist --skip-publish --skip-validate - go mod edit -require=github.com/caddyserver/caddy/v2@{{.Env.TAG}} ./caddy-build/go.mod # as of Go 1.16, `go` commands no longer automatically change go.{mod,sum}. We now have to explicitly # run `go mod tidy`. The `/bin/sh -c '...'` is because goreleaser can't find cd in PATH without shell invocation. - /bin/sh -c 'cd ./caddy-build && go mod tidy' # vendor the deps of the prepared to-build module - /bin/sh -c 'cd ./caddy-build && go mod vendor' - git clone --depth 1 https://github.com/caddyserver/dist caddy-dist - mkdir -p caddy-dist/man - go mod download - go run cmd/caddy/main.go manpage --directory ./caddy-dist/man - gzip -r ./caddy-dist/man/ - /bin/sh -c 'go run cmd/caddy/main.go completion bash > ./caddy-dist/scripts/bash-completion' builds: - env: - CGO_ENABLED=0 - GO111MODULE=on dir: ./caddy-build binary: caddy goos: - darwin - linux - windows - freebsd goarch: - amd64 - arm - arm64 - s390x - ppc64le - riscv64 goarm: - "5" - "6" - "7" ignore: - goos: darwin goarch: arm - goos: darwin goarch: ppc64le - goos: darwin goarch: s390x - goos: darwin goarch: riscv64 - goos: windows goarch: ppc64le - goos: windows goarch: s390x - goos: windows goarch: riscv64 - goos: windows goarch: arm - goos: freebsd goarch: ppc64le - goos: freebsd goarch: s390x - goos: freebsd goarch: riscv64 - goos: freebsd goarch: arm goarm: "5" flags: - -trimpath - -mod=readonly ldflags: - -s -w tags: - nobadger - nomysql - nopgx signs: - cmd: cosign signature: "${artifact}.sig" certificate: '{{ trimsuffix (trimsuffix .Env.artifact ".zip") ".tar.gz" }}.pem' args: ["sign-blob", "--yes", "--output-signature=${signature}", "--output-certificate", "${certificate}", "${artifact}"] artifacts: all sboms: - artifacts: binary documents: - >- {{ .ProjectName }}_ {{- .Version }}_ {{- if eq .Os "darwin" }}mac{{ else }}{{ .Os }}{{ end }}_ {{- .Arch }} {{- with .Arm }}v{{ . }}{{ end }} {{- with .Mips }}_{{ . }}{{ end }} {{- if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}.sbom cmd: syft args: ["$artifact", "--file", "${document}", "--output", "cyclonedx-json"] archives: - id: default format_overrides: - goos: windows formats: zip name_template: >- {{ .ProjectName }}_ {{- .Version }}_ {{- if eq .Os "darwin" }}mac{{ else }}{{ .Os }}{{ end }}_ {{- .Arch }} {{- with .Arm }}v{{ . }}{{ end }} {{- with .Mips }}_{{ . }}{{ end }} {{- if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }} # package the 'caddy-build' directory into a tarball, # allowing users to build the exact same set of files as ours. - id: source meta: true name_template: "{{ .ProjectName }}_{{ .Version }}_buildable-artifact" files: - src: LICENSE dst: ./LICENSE - src: README.md dst: ./README.md - src: AUTHORS dst: ./AUTHORS - src: ./caddy-build dst: ./ source: enabled: true name_template: '{{ .ProjectName }}_{{ .Version }}_src' format: 'tar.gz' # Additional files/template/globs you want to add to the source archive. # # Default: empty. files: - vendor checksum: algorithm: sha512 nfpms: - id: default package_name: caddy vendor: Dyanim homepage: https://caddyserver.com maintainer: Matthew Holt description: | Caddy - Powerful, enterprise-ready, open source web server with automatic HTTPS written in Go license: Apache 2.0 formats: - deb # - rpm bindir: /usr/bin contents: - src: ./caddy-dist/init/caddy.service dst: /lib/systemd/system/caddy.service - src: ./caddy-dist/init/caddy-api.service dst: /lib/systemd/system/caddy-api.service - src: ./caddy-dist/welcome/index.html dst: /usr/share/caddy/index.html - src: ./caddy-dist/scripts/bash-completion dst: /etc/bash_completion.d/caddy - src: ./caddy-dist/config/Caddyfile dst: /etc/caddy/Caddyfile type: config - src: ./caddy-dist/man/* dst: /usr/share/man/man8/ scripts: postinstall: ./caddy-dist/scripts/postinstall.sh preremove: ./caddy-dist/scripts/preremove.sh postremove: ./caddy-dist/scripts/postremove.sh provides: - httpd release: github: owner: caddyserver name: caddy draft: true prerelease: auto changelog: sort: asc filters: exclude: - '^chore:' - '^ci:' - '^docs?:' - '^readme:' - '^tests?:' - '^\w+\s+' # a hack to remove commit messages without colons thus don't correspond to a package ================================================ FILE: .pre-commit-config.yaml ================================================ repos: - repo: https://github.com/gitleaks/gitleaks rev: v8.16.3 hooks: - id: gitleaks - repo: https://github.com/golangci/golangci-lint rev: v1.52.2 hooks: - id: golangci-lint-config-verify - id: golangci-lint - id: golangci-lint-fmt - repo: https://github.com/jumanjihouse/pre-commit-hooks rev: 3.0.0 hooks: - id: shellcheck - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - id: end-of-file-fixer - id: trailing-whitespace ================================================ FILE: AUTHORS ================================================ # This is the official list of Caddy Authors for copyright purposes. # Authors may be either individual people or legal entities. # # Not all individual contributors are authors. For the full list of # contributors, refer to the project's page on GitHub or the repo's # commit history. Matthew Holt Light Code Labs Ardan Labs ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================

Caddy

a project


Every site on HTTPS

Caddy is an extensible server platform that uses TLS by default.

Releases · Documentation · Get Help

      @caddyserver on Twitter   Caddy Forum
Caddy on Sourcegraph   Cloudsmith

Powered by
CertMagic


Special thanks to:
Warp sponsorship ### [Warp, built for coding with multiple AI agents](https://go.warp.dev/caddy) [Available for MacOS, Linux, & Windows](https://go.warp.dev/caddy)

### Menu - [Features](#features) - [Install](#install) - [Build from source](#build-from-source) - [For development](#for-development) - [With version information and/or plugins](#with-version-information-andor-plugins) - [Quick start](#quick-start) - [Overview](#overview) - [Full documentation](#full-documentation) - [Getting help](#getting-help) - [About](#about) ## [Features](https://caddyserver.com/features) - **Easy configuration** with the [Caddyfile](https://caddyserver.com/docs/caddyfile) - **Powerful configuration** with its [native JSON config](https://caddyserver.com/docs/json/) - **Dynamic configuration** with the [JSON API](https://caddyserver.com/docs/api) - [**Config adapters**](https://caddyserver.com/docs/config-adapters) if you don't like JSON - **Automatic HTTPS** by default - [ZeroSSL](https://zerossl.com) and [Let's Encrypt](https://letsencrypt.org) for public names - Fully-managed local CA for internal names & IPs - Can coordinate with other Caddy instances in a cluster - Multi-issuer fallback - Encrypted ClientHello (ECH) support - **Stays up when other servers go down** due to TLS/OCSP/certificate-related issues - **Production-ready** after serving trillions of requests and managing millions of TLS certificates - **Scales to hundreds of thousands of sites** as proven in production - **HTTP/1.1, HTTP/2, and HTTP/3** all supported by default - **Highly extensible** [modular architecture](https://caddyserver.com/docs/architecture) lets Caddy do anything without bloat - **Runs anywhere** with **no external dependencies** (not even libc) - Written in Go, a language with higher **memory safety guarantees** than other servers - Actually **fun to use** - So much more to [discover](https://caddyserver.com/features) ## Install The simplest, cross-platform way to get started is to download Caddy from [GitHub Releases](https://github.com/caddyserver/caddy/releases) and place the executable file in your PATH. See [our online documentation](https://caddyserver.com/docs/install) for other install instructions. ## Build from source Requirements: - [Go 1.25.0 or newer](https://golang.org/dl/) ### For development _**Note:** These steps [will not embed proper version information](https://github.com/golang/go/issues/29228). For that, please follow the instructions in the next section._ ```bash $ git clone "https://github.com/caddyserver/caddy.git" $ cd caddy/cmd/caddy/ $ go build ``` When you run Caddy, it may try to bind to low ports unless otherwise specified in your config. If your OS requires elevated privileges for this, you will need to give your new binary permission to do so. On Linux, this can be done easily with: `sudo setcap cap_net_bind_service=+ep ./caddy` If you prefer to use `go run` which only creates temporary binaries, you can still do this with the included `setcap.sh` like so: ```bash $ go run -exec ./setcap.sh main.go ``` If you don't want to type your password for `setcap`, use `sudo visudo` to edit your sudoers file and allow your user account to run that command without a password, for example: ``` username ALL=(ALL:ALL) NOPASSWD: /usr/sbin/setcap ``` replacing `username` with your actual username. Please be careful and only do this if you know what you are doing! We are only qualified to document how to use Caddy, not Go tooling or your computer, and we are providing these instructions for convenience only; please learn how to use your own computer at your own risk and make any needful adjustments. Then you can run the tests in all modules or a specific one: ```bash $ go test ./... $ go test ./modules/caddyhttp/tracing/ ``` ### With version information and/or plugins Using [our builder tool, `xcaddy`](https://github.com/caddyserver/xcaddy)... ```bash $ xcaddy build ``` ...the following steps are automated: 1. Create a new folder: `mkdir caddy` 2. Change into it: `cd caddy` 3. Copy [Caddy's main.go](https://github.com/caddyserver/caddy/blob/master/cmd/caddy/main.go) into the empty folder. Add imports for any custom plugins you want to add. 4. Initialize a Go module: `go mod init caddy` 5. (Optional) Pin Caddy version: `go get github.com/caddyserver/caddy/v2@version` replacing `version` with a git tag, commit, or branch name. 6. (Optional) Add plugins by adding their import: `_ "import/path/here"` 7. Compile: `go build -tags=nobadger,nomysql,nopgx` ## Quick start The [Caddy website](https://caddyserver.com/docs/) has documentation that includes tutorials, quick-start guides, reference, and more. **We recommend that all users -- regardless of experience level -- do our [Getting Started](https://caddyserver.com/docs/getting-started) guide to become familiar with using Caddy.** If you've only got a minute, [the website has several quick-start tutorials](https://caddyserver.com/docs/quick-starts) to choose from! However, after finishing a quick-start tutorial, please read more documentation to understand how the software works. 🙂 ## Overview Caddy is most often used as an HTTPS server, but it is suitable for any long-running Go program. First and foremost, it is a platform to run Go applications. Caddy "apps" are just Go programs that are implemented as Caddy modules. Two apps -- `tls` and `http` -- ship standard with Caddy. Caddy apps instantly benefit from [automated documentation](https://caddyserver.com/docs/json/), graceful on-line [config changes via API](https://caddyserver.com/docs/api), and unification with other Caddy apps. Although [JSON](https://caddyserver.com/docs/json/) is Caddy's native config language, Caddy can accept input from [config adapters](https://caddyserver.com/docs/config-adapters) which can essentially convert any config format of your choice into JSON: Caddyfile, JSON 5, YAML, TOML, NGINX config, and more. The primary way to configure Caddy is through [its API](https://caddyserver.com/docs/api), but if you prefer config files, the [command-line interface](https://caddyserver.com/docs/command-line) supports those too. Caddy exposes an unprecedented level of control compared to any web server in existence. In Caddy, you are usually setting the actual values of the initialized types in memory that power everything from your HTTP handlers and TLS handshakes to your storage medium. Caddy is also ridiculously extensible, with a powerful plugin system that makes vast improvements over other web servers. To wield the power of this design, you need to know how the config document is structured. Please see [our documentation site](https://caddyserver.com/docs/) for details about [Caddy's config structure](https://caddyserver.com/docs/json/). Nearly all of Caddy's configuration is contained in a single config document, rather than being scattered across CLI flags and env variables and a configuration file as with other web servers. This makes managing your server config more straightforward and reduces hidden variables/factors. ## Full documentation Our website has complete documentation: **https://caddyserver.com/docs/** The docs are also open source. You can contribute to them here: https://github.com/caddyserver/website ## Getting help - We advise companies using Caddy to secure a support contract through [Ardan Labs](https://www.ardanlabs.com) before help is needed. - A [sponsorship](https://github.com/sponsors/mholt) goes a long way! We can offer private help to sponsors. If Caddy is benefitting your company, please consider a sponsorship. This not only helps fund full-time work to ensure the longevity of the project, it provides your company the resources, support, and discounts you need; along with being a great look for your company to your customers and potential customers! - Individuals can exchange help for free on our community forum at https://caddy.community. Remember that people give help out of their spare time and good will. The best way to get help is to give it first! Please use our [issue tracker](https://github.com/caddyserver/caddy/issues) only for bug reports and feature requests, i.e. actionable development items (support questions will usually be referred to the forums). ## About Matthew Holt began developing Caddy in 2014 while studying computer science at Brigham Young University. (The name "Caddy" was chosen because this software helps with the tedious, mundane tasks of serving the Web, and is also a single place for multiple things to be organized together.) It soon became the first web server to use HTTPS automatically and by default, and now has hundreds of contributors and has served trillions of HTTPS requests. **The name "Caddy" is trademarked.** The name of the software is "Caddy", not "Caddy Server" or "CaddyServer". Please call it "Caddy" or, if you wish to clarify, "the Caddy web server". Caddy is a registered trademark of Stack Holdings GmbH. - _Project on X: [@caddyserver](https://x.com/caddyserver)_ - _Author on X: [@mholt6](https://x.com/mholt6)_ Caddy is a project of [ZeroSSL](https://zerossl.com), an HID Global company. Debian package repository hosting is graciously provided by [Cloudsmith](https://cloudsmith.com). Cloudsmith is the only fully hosted, cloud-native, universal package management solution, that enables your organization to create, store and share packages in any format, to any place, with total confidence. ================================================ FILE: admin.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "bytes" "context" "crypto" "crypto/tls" "crypto/x509" "encoding/base64" "encoding/json" "errors" "expvar" "fmt" "hash" "io" "net" "net/http" "net/http/pprof" "net/url" "os" "path" "regexp" "slices" "strconv" "strings" "sync" "time" "github.com/caddyserver/certmagic" "github.com/cespare/xxhash/v2" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) // testCertMagicStorageOverride is a package-level test hook. Tests may set // this variable to provide a temporary certmagic.Storage so that cert // management in tests does not hit the real default storage on disk. // This must NOT be set in production code. var testCertMagicStorageOverride certmagic.Storage func init() { // The hard-coded default `DefaultAdminListen` can be overridden // by setting the `CADDY_ADMIN` environment variable. // The environment variable may be used by packagers to change // the default admin address to something more appropriate for // that platform. See #5317 for discussion. if env, exists := os.LookupEnv("CADDY_ADMIN"); exists { DefaultAdminListen = env } } // AdminConfig configures Caddy's API endpoint, which is used // to manage Caddy while it is running. type AdminConfig struct { // If true, the admin endpoint will be completely disabled. // Note that this makes any runtime changes to the config // impossible, since the interface to do so is through the // admin endpoint. Disabled bool `json:"disabled,omitempty"` // The address to which the admin endpoint's listener should // bind itself. Can be any single network address that can be // parsed by Caddy. Accepts placeholders. // Default: the value of the `CADDY_ADMIN` environment variable, // or `localhost:2019` otherwise. // // Remember: When changing this value through a config reload, // be sure to use the `--address` CLI flag to specify the current // admin address if the currently-running admin endpoint is not // the default address. Listen string `json:"listen,omitempty"` // If true, CORS headers will be emitted, and requests to the // API will be rejected if their `Host` and `Origin` headers // do not match the expected value(s). Use `origins` to // customize which origins/hosts are allowed. If `origins` is // not set, the listen address is the only value allowed by // default. Enforced only on local (plaintext) endpoint. EnforceOrigin bool `json:"enforce_origin,omitempty"` // The list of allowed origins/hosts for API requests. Only needed // if accessing the admin endpoint from a host different from the // socket's network interface or if `enforce_origin` is true. If not // set, the listener address will be the default value. If set but // empty, no origins will be allowed. Enforced only on local // (plaintext) endpoint. Origins []string `json:"origins,omitempty"` // Options pertaining to configuration management. Config *ConfigSettings `json:"config,omitempty"` // Options that establish this server's identity. Identity refers to // credentials which can be used to uniquely identify and authenticate // this server instance. This is required if remote administration is // enabled (but does not require remote administration to be enabled). // Default: no identity management. Identity *IdentityConfig `json:"identity,omitempty"` // Options pertaining to remote administration. By default, remote // administration is disabled. If enabled, identity management must // also be configured, as that is how the endpoint is secured. // See the neighboring "identity" object. // // EXPERIMENTAL: This feature is subject to change. Remote *RemoteAdmin `json:"remote,omitempty"` // Holds onto the routers so that we can later provision them // if they require provisioning. routers []AdminRouter } // ConfigSettings configures the management of configuration. type ConfigSettings struct { // Whether to keep a copy of the active config on disk. Default is true. // Note that "pulled" dynamic configs (using the neighboring "load" module) // are not persisted; only configs that are pushed to Caddy get persisted. Persist *bool `json:"persist,omitempty"` // Loads a new configuration. This is helpful if your configs are // managed elsewhere and you want Caddy to pull its config dynamically // when it starts. The pulled config completely replaces the current // one, just like any other config load. It is an error if a pulled // config is configured to pull another config without a load_delay, // as this creates a tight loop. // // EXPERIMENTAL: Subject to change. LoadRaw json.RawMessage `json:"load,omitempty" caddy:"namespace=caddy.config_loaders inline_key=module"` // The duration after which to load config. If set, config will be pulled // from the config loader after this duration. A delay is required if a // dynamically-loaded config is configured to load yet another config. To // load configs on a regular interval, ensure this value is set the same // on all loaded configs; it can also be variable if needed, and to stop // the loop, simply remove dynamic config loading from the next-loaded // config. // // EXPERIMENTAL: Subject to change. LoadDelay Duration `json:"load_delay,omitempty"` } // IdentityConfig configures management of this server's identity. An identity // consists of credentials that uniquely verify this instance; for example, // TLS certificates (public + private key pairs). type IdentityConfig struct { // List of names or IP addresses which refer to this server. // Certificates will be obtained for these identifiers so // secure TLS connections can be made using them. Identifiers []string `json:"identifiers,omitempty"` // Issuers that can provide this admin endpoint its identity // certificate(s). Default: ACME issuers configured for // ZeroSSL and Let's Encrypt. Be sure to change this if you // require credentials for private identifiers. IssuersRaw []json.RawMessage `json:"issuers,omitempty" caddy:"namespace=tls.issuance inline_key=module"` issuers []certmagic.Issuer } // RemoteAdmin enables and configures remote administration. If enabled, // a secure listener enforcing mutual TLS authentication will be started // on a different port from the standard plaintext admin server. // // This endpoint is secured using identity management, which must be // configured separately (because identity management does not depend // on remote administration). See the admin/identity config struct. // // EXPERIMENTAL: Subject to change. type RemoteAdmin struct { // The address on which to start the secure listener. Accepts placeholders. // Default: :2021 Listen string `json:"listen,omitempty"` // List of access controls for this secure admin endpoint. // This configures TLS mutual authentication (i.e. authorized // client certificates), but also application-layer permissions // like which paths and methods each identity is authorized for. AccessControl []*AdminAccess `json:"access_control,omitempty"` } // AdminAccess specifies what permissions an identity or group // of identities are granted. type AdminAccess struct { // Base64-encoded DER certificates containing public keys to accept. // (The contents of PEM certificate blocks are base64-encoded DER.) // Any of these public keys can appear in any part of a verified chain. PublicKeys []string `json:"public_keys,omitempty"` // Limits what the associated identities are allowed to do. // If unspecified, all permissions are granted. Permissions []AdminPermissions `json:"permissions,omitempty"` publicKeys []crypto.PublicKey } // AdminPermissions specifies what kinds of requests are allowed // to be made to the admin endpoint. type AdminPermissions struct { // The API paths allowed. Paths are simple prefix matches. // Any subpath of the specified paths will be allowed. Paths []string `json:"paths,omitempty"` // The HTTP methods allowed for the given paths. Methods []string `json:"methods,omitempty"` } // newAdminHandler reads admin's config and returns an http.Handler suitable // for use in an admin endpoint server, which will be listening on listenAddr. func (admin *AdminConfig) newAdminHandler(addr NetworkAddress, remote bool, _ Context) adminHandler { muxWrap := adminHandler{mux: http.NewServeMux()} // secure the local or remote endpoint respectively if remote { muxWrap.remoteControl = admin.Remote } else { // see comment in allowedOrigins() as to why we disable the host check for unix/fd networks muxWrap.enforceHost = !addr.isWildcardInterface() && !addr.IsUnixNetwork() && !addr.IsFdNetwork() muxWrap.allowedOrigins = admin.allowedOrigins(addr) muxWrap.enforceOrigin = admin.EnforceOrigin } addRouteWithMetrics := func(pattern string, handlerLabel string, h http.Handler) { labels := prometheus.Labels{"path": pattern, "handler": handlerLabel} h = instrumentHandlerCounter( adminMetrics.requestCount.MustCurryWith(labels), h, ) muxWrap.mux.Handle(pattern, h) } // addRoute just calls muxWrap.mux.Handle after // wrapping the handler with error handling addRoute := func(pattern string, handlerLabel string, h AdminHandler) { wrapper := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { err := h.ServeHTTP(w, r) if err != nil { labels := prometheus.Labels{ "path": pattern, "handler": handlerLabel, "method": strings.ToUpper(r.Method), } adminMetrics.requestErrors.With(labels).Inc() } muxWrap.handleError(w, r, err) }) addRouteWithMetrics(pattern, handlerLabel, wrapper) } const handlerLabel = "admin" // register standard config control endpoints addRoute("/"+rawConfigKey+"/", handlerLabel, AdminHandlerFunc(handleConfig)) addRoute("/id/", handlerLabel, AdminHandlerFunc(handleConfigID)) addRoute("/stop", handlerLabel, AdminHandlerFunc(handleStop)) // register debugging endpoints addRouteWithMetrics("/debug/pprof/", handlerLabel, http.HandlerFunc(pprof.Index)) addRouteWithMetrics("/debug/pprof/cmdline", handlerLabel, http.HandlerFunc(pprof.Cmdline)) addRouteWithMetrics("/debug/pprof/profile", handlerLabel, http.HandlerFunc(pprof.Profile)) addRouteWithMetrics("/debug/pprof/symbol", handlerLabel, http.HandlerFunc(pprof.Symbol)) addRouteWithMetrics("/debug/pprof/trace", handlerLabel, http.HandlerFunc(pprof.Trace)) addRouteWithMetrics("/debug/vars", handlerLabel, expvar.Handler()) // register third-party module endpoints for _, m := range GetModules("admin.api") { router := m.New().(AdminRouter) for _, route := range router.Routes() { addRoute(route.Pattern, handlerLabel, route.Handler) } admin.routers = append(admin.routers, router) } return muxWrap } // provisionAdminRouters provisions all the router modules // in the admin.api namespace that need provisioning. func (admin *AdminConfig) provisionAdminRouters(ctx Context) error { for _, router := range admin.routers { provisioner, ok := router.(Provisioner) if !ok { continue } err := provisioner.Provision(ctx) if err != nil { return err } } // We no longer need the routers once provisioned, allow for GC admin.routers = nil return nil } // allowedOrigins returns a list of origins that are allowed. // If admin.Origins is nil (null), the provided listen address // will be used as the default origin. If admin.Origins is // empty, no origins will be allowed, effectively bricking the // endpoint for non-unix-socket endpoints, but whatever. func (admin AdminConfig) allowedOrigins(addr NetworkAddress) []*url.URL { uniqueOrigins := make(map[string]struct{}) for _, o := range admin.Origins { uniqueOrigins[o] = struct{}{} } // RFC 2616, Section 14.26: // "A client MUST include a Host header field in all HTTP/1.1 request // messages. If the requested URI does not include an Internet host // name for the service being requested, then the Host header field MUST // be given with an empty value." // // UPDATE July 2023: Go broke this by patching a minor security bug in 1.20.6. // Understandable, but frustrating. See: // https://github.com/golang/go/issues/60374 // See also the discussion here: // https://github.com/golang/go/issues/61431 // // We can no longer conform to RFC 2616 Section 14.26 from either Go or curl // in purity. (Curl allowed no host between 7.40 and 7.50, but now requires a // bogus host; see https://superuser.com/a/925610.) If we disable Host/Origin // security checks, the infosec community assures me that it is secure to do // so, because: // // 1) Browsers do not allow access to unix sockets // 2) DNS is irrelevant to unix sockets // // If either of those two statements ever fail to hold true, it is not the // fault of Caddy. // // Thus, we do not fill out allowed origins and do not enforce Host // requirements for unix sockets. Enforcing it leads to confusion and // frustration, when UDS have their own permissions from the OS. // Enforcing host requirements here is effectively security theater, // and a false sense of security. // // See also the discussion in #6832. if admin.Origins == nil && !addr.IsUnixNetwork() && !addr.IsFdNetwork() { if addr.isLoopback() { uniqueOrigins[net.JoinHostPort("localhost", addr.port())] = struct{}{} uniqueOrigins[net.JoinHostPort("::1", addr.port())] = struct{}{} uniqueOrigins[net.JoinHostPort("127.0.0.1", addr.port())] = struct{}{} } else { uniqueOrigins[addr.JoinHostPort(0)] = struct{}{} } } allowed := make([]*url.URL, 0, len(uniqueOrigins)) for originStr := range uniqueOrigins { var origin *url.URL if strings.Contains(originStr, "://") { var err error origin, err = url.Parse(originStr) if err != nil { continue } origin.Path = "" origin.RawPath = "" origin.Fragment = "" origin.RawFragment = "" origin.RawQuery = "" } else { origin = &url.URL{Host: originStr} } allowed = append(allowed, origin) } return allowed } // replaceLocalAdminServer replaces the running local admin server // according to the relevant configuration in cfg. If no configuration // for the admin endpoint exists in cfg, a default one is used, so // that there is always an admin server (unless it is explicitly // configured to be disabled). // Critically note that some elements and functionality of the context // may not be ready, e.g. storage. Tread carefully. func replaceLocalAdminServer(cfg *Config, ctx Context) error { // always* be sure to close down the old admin endpoint // as gracefully as possible, even if the new one is // disabled -- careful to use reference to the current // (old) admin endpoint since it will be different // when the function returns // (* except if the new one fails to start) oldAdminServer := localAdminServer var err error defer func() { // do the shutdown asynchronously so that any // current API request gets a response; this // goroutine may last a few seconds if oldAdminServer != nil && err == nil { go func(oldAdminServer *http.Server) { err := stopAdminServer(oldAdminServer) if err != nil { Log().Named("admin").Error("stopping current admin endpoint", zap.Error(err)) } }(oldAdminServer) } }() // set a default if admin wasn't otherwise configured if cfg.Admin == nil { cfg.Admin = &AdminConfig{ Listen: DefaultAdminListen, } } // if new admin endpoint is to be disabled, we're done if cfg.Admin.Disabled { Log().Named("admin").Warn("admin endpoint disabled") return nil } // extract a singular listener address addr, err := parseAdminListenAddr(cfg.Admin.Listen, DefaultAdminListen) if err != nil { return err } handler := cfg.Admin.newAdminHandler(addr, false, ctx) // run the provisioners for loaded modules to make sure local // state is properly re-initialized in the new admin server err = cfg.Admin.provisionAdminRouters(ctx) if err != nil { return err } ln, err := addr.Listen(context.TODO(), 0, net.ListenConfig{}) if err != nil { return err } serverMu.Lock() localAdminServer = &http.Server{ Addr: addr.String(), // for logging purposes only Handler: handler, ReadTimeout: 10 * time.Second, ReadHeaderTimeout: 5 * time.Second, IdleTimeout: 60 * time.Second, MaxHeaderBytes: 1024 * 64, } serverMu.Unlock() adminLogger := Log().Named("admin") go func() { serverMu.Lock() server := localAdminServer serverMu.Unlock() if err := server.Serve(ln.(net.Listener)); !errors.Is(err, http.ErrServerClosed) { adminLogger.Error("admin server shutdown for unknown reason", zap.Error(err)) } }() adminLogger.Info("admin endpoint started", zap.String("address", addr.String()), zap.Bool("enforce_origin", cfg.Admin.EnforceOrigin), zap.Array("origins", loggableURLArray(handler.allowedOrigins))) if !handler.enforceHost { adminLogger.Warn("admin endpoint on open interface; host checking disabled", zap.String("address", addr.String())) } return nil } // manageIdentity sets up automated identity management for this server. func manageIdentity(ctx Context, cfg *Config) error { if cfg == nil || cfg.Admin == nil || cfg.Admin.Identity == nil { return nil } // set default issuers; this is pretty hacky because we can't // import the caddytls package -- but it works if cfg.Admin.Identity.IssuersRaw == nil { cfg.Admin.Identity.IssuersRaw = []json.RawMessage{ json.RawMessage(`{"module": "acme"}`), } } // load and provision issuer modules if cfg.Admin.Identity.IssuersRaw != nil { val, err := ctx.LoadModule(cfg.Admin.Identity, "IssuersRaw") if err != nil { return fmt.Errorf("loading identity issuer modules: %s", err) } for _, issVal := range val.([]any) { cfg.Admin.Identity.issuers = append(cfg.Admin.Identity.issuers, issVal.(certmagic.Issuer)) } } // we'll make a new cache when we make the CertMagic config, so stop any previous cache if identityCertCache != nil { identityCertCache.Stop() } logger := Log().Named("admin.identity") cmCfg := cfg.Admin.Identity.certmagicConfig(logger, true) // issuers have circular dependencies with the configs because, // as explained in the caddytls package, they need access to the // correct storage and cache to solve ACME challenges for _, issuer := range cfg.Admin.Identity.issuers { // avoid import cycle with caddytls package, so manually duplicate the interface here, yuck if annoying, ok := issuer.(interface{ SetConfig(cfg *certmagic.Config) }); ok { annoying.SetConfig(cmCfg) } } // obtain and renew server identity certificate(s) return cmCfg.ManageAsync(ctx, cfg.Admin.Identity.Identifiers) } // replaceRemoteAdminServer replaces the running remote admin server // according to the relevant configuration in cfg. It stops any previous // remote admin server and only starts a new one if configured. func replaceRemoteAdminServer(ctx Context, cfg *Config) error { if cfg == nil { return nil } remoteLogger := Log().Named("admin.remote") oldAdminServer := remoteAdminServer defer func() { if oldAdminServer != nil { go func(oldAdminServer *http.Server) { err := stopAdminServer(oldAdminServer) if err != nil { Log().Named("admin").Error("stopping current secure admin endpoint", zap.Error(err)) } }(oldAdminServer) } }() if cfg.Admin == nil || cfg.Admin.Remote == nil { return nil } addr, err := parseAdminListenAddr(cfg.Admin.Remote.Listen, DefaultRemoteAdminListen) if err != nil { return err } // make the HTTP handler but disable Host/Origin enforcement // because we are using TLS authentication instead handler := cfg.Admin.newAdminHandler(addr, true, ctx) // run the provisioners for loaded modules to make sure local // state is properly re-initialized in the new admin server err = cfg.Admin.provisionAdminRouters(ctx) if err != nil { return err } // create client certificate pool for TLS mutual auth, and extract public keys // so that we can enforce access controls at the application layer clientCertPool := x509.NewCertPool() for i, accessControl := range cfg.Admin.Remote.AccessControl { for j, certBase64 := range accessControl.PublicKeys { cert, err := decodeBase64DERCert(certBase64) if err != nil { return fmt.Errorf("access control %d public key %d: parsing base64 certificate DER: %v", i, j, err) } accessControl.publicKeys = append(accessControl.publicKeys, cert.PublicKey) clientCertPool.AddCert(cert) } } // create TLS config that will enforce mutual authentication if identityCertCache == nil { return fmt.Errorf("cannot enable remote admin without a certificate cache; configure identity management to initialize a certificate cache") } cmCfg := cfg.Admin.Identity.certmagicConfig(remoteLogger, false) tlsConfig := cmCfg.TLSConfig() tlsConfig.NextProtos = nil // this server does not solve ACME challenges tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert tlsConfig.ClientCAs = clientCertPool // convert logger to stdlib so it can be used by HTTP server serverLogger, err := zap.NewStdLogAt(remoteLogger, zap.DebugLevel) if err != nil { return err } serverMu.Lock() // create secure HTTP server remoteAdminServer = &http.Server{ Addr: addr.String(), // for logging purposes only Handler: handler, TLSConfig: tlsConfig, ReadTimeout: 10 * time.Second, ReadHeaderTimeout: 5 * time.Second, IdleTimeout: 60 * time.Second, MaxHeaderBytes: 1024 * 64, ErrorLog: serverLogger, } serverMu.Unlock() // start listener lnAny, err := addr.Listen(ctx, 0, net.ListenConfig{}) if err != nil { return err } ln := lnAny.(net.Listener) ln = tls.NewListener(ln, tlsConfig) go func() { serverMu.Lock() server := remoteAdminServer serverMu.Unlock() if err := server.Serve(ln); !errors.Is(err, http.ErrServerClosed) { remoteLogger.Error("admin remote server shutdown for unknown reason", zap.Error(err)) } }() remoteLogger.Info("secure admin remote control endpoint started", zap.String("address", addr.String())) return nil } func (ident *IdentityConfig) certmagicConfig(logger *zap.Logger, makeCache bool) *certmagic.Config { var cmCfg *certmagic.Config if ident == nil { // user might not have configured identity; that's OK, we can still make a // certmagic config, although it'll be mostly useless for remote management ident = new(IdentityConfig) } // Choose storage: prefer the package-level test override when present, // otherwise use the configured DefaultStorage. Tests may set an override // to divert storage into a temporary location. Otherwise, in production // we use the DefaultStorage since we don't want to act as part of a // cluster; this storage is for the server's local identity only. var storage certmagic.Storage if testCertMagicStorageOverride != nil { storage = testCertMagicStorageOverride } else { storage = DefaultStorage } template := certmagic.Config{ Storage: storage, Logger: logger, Issuers: ident.issuers, } if makeCache { identityCertCache = certmagic.NewCache(certmagic.CacheOptions{ GetConfigForCert: func(certmagic.Certificate) (*certmagic.Config, error) { return cmCfg, nil }, Logger: logger.Named("cache"), }) } cmCfg = certmagic.New(identityCertCache, template) return cmCfg } // IdentityCredentials returns this instance's configured, managed identity credentials // that can be used in TLS client authentication. func (ctx Context) IdentityCredentials(logger *zap.Logger) ([]tls.Certificate, error) { if ctx.cfg == nil || ctx.cfg.Admin == nil || ctx.cfg.Admin.Identity == nil { return nil, fmt.Errorf("no server identity configured") } ident := ctx.cfg.Admin.Identity if len(ident.Identifiers) == 0 { return nil, fmt.Errorf("no identifiers configured") } if logger == nil { logger = Log() } magic := ident.certmagicConfig(logger, false) return magic.ClientCredentials(ctx, ident.Identifiers) } // enforceAccessControls enforces application-layer access controls for r based on remote. // It expects that the TLS server has already established at least one verified chain of // trust, and then looks for a matching, authorized public key that is allowed to access // the defined path(s) using the defined method(s). func (remote RemoteAdmin) enforceAccessControls(r *http.Request) error { for _, chain := range r.TLS.VerifiedChains { for _, peerCert := range chain { for _, adminAccess := range remote.AccessControl { for _, allowedKey := range adminAccess.publicKeys { // see if we found a matching public key; the TLS server already verified the chain // so we know the client possesses the associated private key; this handy interface // doesn't appear to be defined anywhere in the std lib, but was implemented here: // https://github.com/golang/go/commit/b5f2c0f50297fa5cd14af668ddd7fd923626cf8c comparer, ok := peerCert.PublicKey.(interface{ Equal(crypto.PublicKey) bool }) if !ok || !comparer.Equal(allowedKey) { continue } // key recognized; make sure its HTTP request is permitted for _, accessPerm := range adminAccess.Permissions { // verify method methodFound := accessPerm.Methods == nil || slices.Contains(accessPerm.Methods, r.Method) if !methodFound { return APIError{ HTTPStatus: http.StatusForbidden, Message: "not authorized to use this method", } } // verify path pathFound := accessPerm.Paths == nil for _, allowedPath := range accessPerm.Paths { if strings.HasPrefix(r.URL.Path, allowedPath) { pathFound = true break } } if !pathFound { return APIError{ HTTPStatus: http.StatusForbidden, Message: "not authorized to access this path", } } } // public key authorized, method and path allowed return nil } } } } // in theory, this should never happen; with an unverified chain, the TLS server // should not accept the connection in the first place, and the acceptable cert // pool is configured using the same list of public keys we verify against return APIError{ HTTPStatus: http.StatusUnauthorized, Message: "client identity not authorized", } } func stopAdminServer(srv *http.Server) error { if srv == nil { return fmt.Errorf("no admin server") } timeout := 10 * time.Second ctx, cancel := context.WithTimeoutCause(context.Background(), timeout, fmt.Errorf("stopping admin server: %ds timeout", int(timeout.Seconds()))) defer cancel() err := srv.Shutdown(ctx) if err != nil { if cause := context.Cause(ctx); cause != nil && errors.Is(err, context.DeadlineExceeded) { err = cause } return fmt.Errorf("shutting down admin server: %v", err) } Log().Named("admin").Info("stopped previous server", zap.String("address", srv.Addr)) return nil } // AdminRouter is a type which can return routes for the admin API. type AdminRouter interface { Routes() []AdminRoute } // AdminRoute represents a route for the admin endpoint. type AdminRoute struct { Pattern string Handler AdminHandler } type adminHandler struct { mux *http.ServeMux // security for local/plaintext endpoint enforceOrigin bool enforceHost bool allowedOrigins []*url.URL // security for remote/encrypted endpoint remoteControl *RemoteAdmin } // ServeHTTP is the external entry point for API requests. // It will only be called once per request. func (h adminHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ip, port, err := net.SplitHostPort(r.RemoteAddr) if err != nil { ip = r.RemoteAddr port = "" } log := Log().Named("admin.api").With( zap.String("method", r.Method), zap.String("host", r.Host), zap.String("uri", r.RequestURI), zap.String("remote_ip", ip), zap.String("remote_port", port), zap.Reflect("headers", r.Header), ) if r.TLS != nil { log = log.With( zap.Bool("secure", true), zap.Int("verified_chains", len(r.TLS.VerifiedChains)), ) } if r.RequestURI == "/metrics" { log.Debug("received request") } else { log.Info("received request") } h.serveHTTP(w, r) } // serveHTTP is the internal entry point for API requests. It may // be called more than once per request, for example if a request // is rewritten (i.e. internal redirect). func (h adminHandler) serveHTTP(w http.ResponseWriter, r *http.Request) { if h.remoteControl != nil { // enforce access controls on secure endpoint if err := h.remoteControl.enforceAccessControls(r); err != nil { h.handleError(w, r, err) return } } // common mitigations in browser contexts if strings.Contains(r.Header.Get("Upgrade"), "websocket") { // I've never been able demonstrate a vulnerability myself, but apparently // WebSocket connections originating from browsers aren't subject to CORS // restrictions, so we'll just be on the safe side h.handleError(w, r, APIError{ HTTPStatus: http.StatusBadRequest, Err: errors.New("websocket connections aren't allowed"), Message: "WebSocket connections aren't allowed.", }) return } if strings.Contains(r.Header.Get("Sec-Fetch-Mode"), "no-cors") { // turns out web pages can just disable the same-origin policy (!???!?) // but at least browsers let us know that's the case, holy heck h.handleError(w, r, APIError{ HTTPStatus: http.StatusBadRequest, Err: errors.New("client attempted to make request by disabling same-origin policy using no-cors mode"), Message: "Disabling same-origin restrictions is not allowed.", }) return } if r.Header.Get("Origin") == "null" { // bug in Firefox in certain cross-origin situations (yikes?) // (not strictly a security vuln on its own, but it's red flaggy, // since it seems to manifest in cross-origin contexts) h.handleError(w, r, APIError{ HTTPStatus: http.StatusBadRequest, Err: errors.New("invalid origin 'null'"), Message: "Buggy browser is sending null Origin header.", }) } if h.enforceHost { // DNS rebinding mitigation err := h.checkHost(r) if err != nil { h.handleError(w, r, err) return } } _, hasOriginHeader := r.Header["Origin"] _, hasSecHeader := r.Header["Sec-Fetch-Mode"] if h.enforceOrigin || hasOriginHeader || hasSecHeader { // cross-site mitigation origin, err := h.checkOrigin(r) if err != nil { h.handleError(w, r, err) return } if r.Method == http.MethodOptions { w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, GET, POST, PUT, PATCH, DELETE") w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Cache-Control") w.Header().Set("Access-Control-Allow-Credentials", "true") } w.Header().Set("Access-Control-Allow-Origin", origin) } h.mux.ServeHTTP(w, r) } func (h adminHandler) handleError(w http.ResponseWriter, r *http.Request, err error) { if err == nil { return } if err == errInternalRedir { h.serveHTTP(w, r) return } apiErr, ok := err.(APIError) if !ok { apiErr = APIError{ HTTPStatus: http.StatusInternalServerError, Err: err, } } if apiErr.HTTPStatus == 0 { apiErr.HTTPStatus = http.StatusInternalServerError } if apiErr.Message == "" && apiErr.Err != nil { apiErr.Message = apiErr.Err.Error() } Log().Named("admin.api").Error("request error", zap.Error(err), zap.Int("status_code", apiErr.HTTPStatus), ) w.Header().Set("Content-Type", "application/json") w.WriteHeader(apiErr.HTTPStatus) encErr := json.NewEncoder(w).Encode(apiErr) if encErr != nil { Log().Named("admin.api").Error("failed to encode error response", zap.Error(encErr)) } } // checkHost returns a handler that wraps next such that // it will only be called if the request's Host header matches // a trustworthy/expected value. This helps to mitigate DNS // rebinding attacks. func (h adminHandler) checkHost(r *http.Request) error { allowed := slices.ContainsFunc(h.allowedOrigins, func(u *url.URL) bool { return r.Host == u.Host }) if !allowed { return APIError{ HTTPStatus: http.StatusForbidden, Err: fmt.Errorf("host not allowed: %s", r.Host), } } return nil } // checkOrigin ensures that the Origin header, if // set, matches the intended target; prevents arbitrary // sites from issuing requests to our listener. It // returns the origin that was obtained from r. func (h adminHandler) checkOrigin(r *http.Request) (string, error) { originStr, origin := h.getOrigin(r) if origin == nil { return "", APIError{ HTTPStatus: http.StatusForbidden, Err: fmt.Errorf("required Origin header is missing or invalid"), } } if !h.originAllowed(origin) { return "", APIError{ HTTPStatus: http.StatusForbidden, Err: fmt.Errorf("client is not allowed to access from origin '%s'", originStr), } } return origin.String(), nil } func (h adminHandler) getOrigin(r *http.Request) (string, *url.URL) { origin := r.Header.Get("Origin") if origin == "" { origin = r.Header.Get("Referer") } originURL, err := url.Parse(origin) if err != nil { return origin, nil } originURL.Path = "" originURL.RawPath = "" originURL.Fragment = "" originURL.RawFragment = "" originURL.RawQuery = "" return origin, originURL } func (h adminHandler) originAllowed(origin *url.URL) bool { for _, allowedOrigin := range h.allowedOrigins { if allowedOrigin.Scheme != "" && origin.Scheme != allowedOrigin.Scheme { continue } if origin.Host == allowedOrigin.Host { return true } } return false } // etagHasher returns the hasher we used on the config to both // produce and verify ETags. func etagHasher() hash.Hash { return xxhash.New() } // makeEtag returns an Etag header value (including quotes) for // the given config path and hash of contents at that path. func makeEtag(path string, hash hash.Hash) string { return fmt.Sprintf(`"%s %x"`, path, hash.Sum(nil)) } // This buffer pool is used to keep buffers for // reading the config file during eTag header generation var bufferPool = sync.Pool{ New: func() any { return new(bytes.Buffer) }, } func handleConfig(w http.ResponseWriter, r *http.Request) error { switch r.Method { case http.MethodGet: w.Header().Set("Content-Type", "application/json") hash := etagHasher() // Read the config into a buffer instead of writing directly to // the response writer, as we want to set the ETag as the header, // not the trailer. buf := bufferPool.Get().(*bytes.Buffer) buf.Reset() defer bufferPool.Put(buf) configWriter := io.MultiWriter(buf, hash) err := readConfig(r.URL.Path, configWriter) if err != nil { return APIError{HTTPStatus: http.StatusBadRequest, Err: err} } // we could consider setting up a sync.Pool for the summed // hashes to reduce GC pressure. w.Header().Set("Etag", makeEtag(r.URL.Path, hash)) _, err = w.Write(buf.Bytes()) if err != nil { return APIError{HTTPStatus: http.StatusInternalServerError, Err: err} } return nil case http.MethodPost, http.MethodPut, http.MethodPatch, http.MethodDelete: // DELETE does not use a body, but the others do var body []byte if r.Method != http.MethodDelete { if ct := r.Header.Get("Content-Type"); !strings.Contains(ct, "/json") { return APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("unacceptable content-type: %v; 'application/json' required", ct), } } buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) _, err := io.Copy(buf, r.Body) if err != nil { return APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("reading request body: %v", err), } } body = buf.Bytes() } forceReload := r.Header.Get("Cache-Control") == "must-revalidate" err := changeConfig(r.Method, r.URL.Path, body, r.Header.Get("If-Match"), forceReload) if err != nil && !errors.Is(err, errSameConfig) { return err } // If this request changed the config, clear the last // config info we have stored, if it is different from // the original source. ClearLastConfigIfDifferent( r.Header.Get("Caddy-Config-Source-File"), r.Header.Get("Caddy-Config-Source-Adapter")) default: return APIError{ HTTPStatus: http.StatusMethodNotAllowed, Err: fmt.Errorf("method %s not allowed", r.Method), } } return nil } func handleConfigID(w http.ResponseWriter, r *http.Request) error { idPath := r.URL.Path parts := strings.Split(idPath, "/") if len(parts) < 3 || parts[2] == "" { return APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("request path is missing object ID"), } } if parts[0] != "" || parts[1] != "id" { return APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("malformed object path"), } } id := parts[2] // map the ID to the expanded path rawCfgMu.RLock() expanded, ok := rawCfgIndex[id] rawCfgMu.RUnlock() if !ok { return APIError{ HTTPStatus: http.StatusNotFound, Err: fmt.Errorf("unknown object ID '%s'", id), } } // piece the full URL path back together parts = append([]string{expanded}, parts[3:]...) r.URL.Path = path.Join(parts...) return errInternalRedir } func handleStop(w http.ResponseWriter, r *http.Request) error { if r.Method != http.MethodPost { return APIError{ HTTPStatus: http.StatusMethodNotAllowed, Err: fmt.Errorf("method not allowed"), } } exitProcess(context.Background(), Log().Named("admin.api")) return nil } // unsyncedConfigAccess traverses into the current config and performs // the operation at path according to method, using body and out as // needed. This is a low-level, unsynchronized function; most callers // will want to use changeConfig or readConfig instead. This requires a // read or write lock on currentCtxMu, depending on method (GET needs // only a read lock; all others need a write lock). func unsyncedConfigAccess(method, path string, body []byte, out io.Writer) error { var err error var val any // if there is a request body, decode it into the // variable that will be set in the config according // to method and path if len(body) > 0 { err = json.Unmarshal(body, &val) if err != nil { if jsonErr, ok := err.(*json.SyntaxError); ok { return fmt.Errorf("decoding request body: %w, at offset %d", jsonErr, jsonErr.Offset) } return fmt.Errorf("decoding request body: %w", err) } } enc := json.NewEncoder(out) cleanPath := strings.Trim(path, "/") if cleanPath == "" { return fmt.Errorf("no traversable path") } parts := strings.Split(cleanPath, "/") if len(parts) == 0 { return fmt.Errorf("path missing") } // A path that ends with "..." implies: // 1) the part before it is an array // 2) the payload is an array // and means that the user wants to expand the elements // in the payload array and append each one into the // destination array, like so: // array = append(array, elems...) // This special case is handled below. ellipses := parts[len(parts)-1] == "..." if ellipses { parts = parts[:len(parts)-1] } var ptr any = rawCfg traverseLoop: for i, part := range parts { switch v := ptr.(type) { case map[string]any: // if the next part enters a slice, and the slice is our destination, // handle it specially (because appending to the slice copies the slice // header, which does not replace the original one like we want) if arr, ok := v[part].([]any); ok && i == len(parts)-2 { var idx int if method != http.MethodPost { idxStr := parts[len(parts)-1] idx, err = strconv.Atoi(idxStr) if err != nil { return fmt.Errorf("[%s] invalid array index '%s': %v", path, idxStr, err) } if idx < 0 || (method != http.MethodPut && idx >= len(arr)) || idx > len(arr) { return fmt.Errorf("[%s] array index out of bounds: %s", path, idxStr) } } switch method { case http.MethodGet: err = enc.Encode(arr[idx]) if err != nil { return fmt.Errorf("encoding config: %v", err) } case http.MethodPost: if ellipses { valArray, ok := val.([]any) if !ok { return fmt.Errorf("final element is not an array") } v[part] = append(arr, valArray...) } else { v[part] = append(arr, val) } case http.MethodPut: // avoid creation of new slice and a second copy (see // https://github.com/golang/go/wiki/SliceTricks#insert) arr = append(arr, nil) copy(arr[idx+1:], arr[idx:]) arr[idx] = val v[part] = arr case http.MethodPatch: arr[idx] = val case http.MethodDelete: v[part] = append(arr[:idx], arr[idx+1:]...) default: return fmt.Errorf("unrecognized method %s", method) } break traverseLoop } if i == len(parts)-1 { switch method { case http.MethodGet: err = enc.Encode(v[part]) if err != nil { return fmt.Errorf("encoding config: %v", err) } case http.MethodPost: // if the part is an existing list, POST appends to // it, otherwise it just sets or creates the value if arr, ok := v[part].([]any); ok { if ellipses { valArray, ok := val.([]any) if !ok { return fmt.Errorf("final element is not an array") } v[part] = append(arr, valArray...) } else { v[part] = append(arr, val) } } else { v[part] = val } case http.MethodPut: if _, ok := v[part]; ok { return APIError{ HTTPStatus: http.StatusConflict, Err: fmt.Errorf("[%s] key already exists: %s", path, part), } } v[part] = val case http.MethodPatch: if _, ok := v[part]; !ok { return APIError{ HTTPStatus: http.StatusNotFound, Err: fmt.Errorf("[%s] key does not exist: %s", path, part), } } v[part] = val case http.MethodDelete: if _, ok := v[part]; !ok { return APIError{ HTTPStatus: http.StatusNotFound, Err: fmt.Errorf("[%s] key does not exist: %s", path, part), } } delete(v, part) default: return fmt.Errorf("unrecognized method %s", method) } } else { // if we are "PUTting" a new resource, the key(s) in its path // might not exist yet; that's OK but we need to make them as // we go, while we still have a pointer from the level above if v[part] == nil && method == http.MethodPut { v[part] = make(map[string]any) } ptr = v[part] } case []any: partInt, err := strconv.Atoi(part) if err != nil { return fmt.Errorf("[/%s] invalid array index '%s': %v", strings.Join(parts[:i+1], "/"), part, err) } if partInt < 0 || partInt >= len(v) { return fmt.Errorf("[/%s] array index out of bounds: %s", strings.Join(parts[:i+1], "/"), part) } ptr = v[partInt] default: return fmt.Errorf("invalid traversal path at: %s", strings.Join(parts[:i+1], "/")) } } return nil } // RemoveMetaFields removes meta fields like "@id" from a JSON message // by using a simple regular expression. (An alternate way to do this // would be to delete them from the raw, map[string]any // representation as they are indexed, then iterate the index we made // and add them back after encoding as JSON, but this is simpler.) func RemoveMetaFields(rawJSON []byte) []byte { return idRegexp.ReplaceAllFunc(rawJSON, func(in []byte) []byte { // matches with a comma on both sides (when "@id" property is // not the first or last in the object) need to keep exactly // one comma for correct JSON syntax comma := []byte{','} if bytes.HasPrefix(in, comma) && bytes.HasSuffix(in, comma) { return comma } return []byte{} }) } // AdminHandler is like http.Handler except ServeHTTP may return an error. // // If any handler encounters an error, it should be returned for proper // handling. type AdminHandler interface { ServeHTTP(http.ResponseWriter, *http.Request) error } // AdminHandlerFunc is a convenience type like http.HandlerFunc. type AdminHandlerFunc func(http.ResponseWriter, *http.Request) error // ServeHTTP implements the Handler interface. func (f AdminHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) error { return f(w, r) } // APIError is a structured error that every API // handler should return for consistency in logging // and client responses. If Message is unset, then // Err.Error() will be serialized in its place. type APIError struct { HTTPStatus int `json:"-"` Err error `json:"-"` Message string `json:"error"` } func (e APIError) Error() string { if e.Err != nil { return e.Err.Error() } return e.Message } // parseAdminListenAddr extracts a singular listen address from either addr // or defaultAddr, returning the network and the address of the listener. func parseAdminListenAddr(addr string, defaultAddr string) (NetworkAddress, error) { input, err := NewReplacer().ReplaceOrErr(addr, true, true) if err != nil { return NetworkAddress{}, fmt.Errorf("replacing listen address: %v", err) } if input == "" { input = defaultAddr } listenAddr, err := ParseNetworkAddress(input) if err != nil { return NetworkAddress{}, fmt.Errorf("parsing listener address: %v", err) } if listenAddr.PortRangeSize() != 1 { return NetworkAddress{}, fmt.Errorf("must be exactly one listener address; cannot listen on: %s", listenAddr) } return listenAddr, nil } // decodeBase64DERCert base64-decodes, then DER-decodes, certStr. func decodeBase64DERCert(certStr string) (*x509.Certificate, error) { derBytes, err := base64.StdEncoding.DecodeString(certStr) if err != nil { return nil, err } return x509.ParseCertificate(derBytes) } type loggableURLArray []*url.URL func (ua loggableURLArray) MarshalLogArray(enc zapcore.ArrayEncoder) error { if ua == nil { return nil } for _, u := range ua { enc.AppendString(u.String()) } return nil } var ( // DefaultAdminListen is the address for the local admin // listener, if none is specified at startup. DefaultAdminListen = "localhost:2019" // DefaultRemoteAdminListen is the address for the remote // (TLS-authenticated) admin listener, if enabled and not // specified otherwise. DefaultRemoteAdminListen = ":2021" ) // PIDFile writes a pidfile to the file at filename. It // will get deleted before the process gracefully exits. func PIDFile(filename string) error { pid := []byte(strconv.Itoa(os.Getpid()) + "\n") err := os.WriteFile(filename, pid, 0o600) if err != nil { return err } pidfile = filename return nil } // idRegexp is used to match ID fields and their associated values // in the config. It also matches adjacent commas so that syntax // can be preserved no matter where in the object the field appears. // It supports string and most numeric values. var idRegexp = regexp.MustCompile(`(?m),?\s*"` + idKey + `"\s*:\s*(-?[0-9]+(\.[0-9]+)?|(?U)".*")\s*,?`) // pidfile is the name of the pidfile, if any. var pidfile string // errInternalRedir indicates an internal redirect // and is useful when admin API handlers rewrite // the request; in that case, authentication and // authorization needs to happen again for the // rewritten request. var errInternalRedir = fmt.Errorf("internal redirect; re-authorization required") const ( rawConfigKey = "config" idKey = "@id" ) var bufPool = sync.Pool{ New: func() any { return new(bytes.Buffer) }, } // keep a reference to admin endpoint singletons while they're active var ( serverMu sync.Mutex localAdminServer, remoteAdminServer *http.Server identityCertCache *certmagic.Cache ) ================================================ FILE: admin_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "context" "crypto/x509" "encoding/json" "fmt" "maps" "net/http" "net/http/httptest" "os" "reflect" "sync" "testing" "time" "github.com/caddyserver/certmagic" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" ) var testCfg = []byte(`{ "apps": { "http": { "servers": { "myserver": { "listen": ["tcp/localhost:8080-8084"], "read_timeout": "30s" }, "yourserver": { "listen": ["127.0.0.1:5000"], "read_header_timeout": "15s" } } } } } `) func TestUnsyncedConfigAccess(t *testing.T) { // each test is performed in sequence, so // each change builds on the previous ones; // the config is not reset between tests for i, tc := range []struct { method string path string // rawConfigKey will be prepended payload string expect string // JSON representation of what the whole config is expected to be after the request shouldErr bool }{ { method: "POST", path: "", payload: `{"foo": "bar", "list": ["a", "b", "c"]}`, // starting value expect: `{"foo": "bar", "list": ["a", "b", "c"]}`, }, { method: "POST", path: "/foo", payload: `"jet"`, expect: `{"foo": "jet", "list": ["a", "b", "c"]}`, }, { method: "POST", path: "/bar", payload: `{"aa": "bb", "qq": "zz"}`, expect: `{"foo": "jet", "bar": {"aa": "bb", "qq": "zz"}, "list": ["a", "b", "c"]}`, }, { method: "DELETE", path: "/bar/qq", expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c"]}`, }, { method: "DELETE", path: "/bar/qq", expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c"]}`, shouldErr: true, }, { method: "POST", path: "/list", payload: `"e"`, expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c", "e"]}`, }, { method: "PUT", path: "/list/3", payload: `"d"`, expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c", "d", "e"]}`, }, { method: "DELETE", path: "/list/3", expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c", "e"]}`, }, { method: "PATCH", path: "/list/3", payload: `"d"`, expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c", "d"]}`, }, { method: "POST", path: "/list/...", payload: `["e", "f", "g"]`, expect: `{"foo": "jet", "bar": {"aa": "bb"}, "list": ["a", "b", "c", "d", "e", "f", "g"]}`, }, } { err := unsyncedConfigAccess(tc.method, rawConfigKey+tc.path, []byte(tc.payload), nil) if tc.shouldErr && err == nil { t.Fatalf("Test %d: Expected error return value, but got: %v", i, err) } if !tc.shouldErr && err != nil { t.Fatalf("Test %d: Should not have had error return value, but got: %v", i, err) } // decode the expected config so we can do a convenient DeepEqual var expectedDecoded any err = json.Unmarshal([]byte(tc.expect), &expectedDecoded) if err != nil { t.Fatalf("Test %d: Unmarshaling expected config: %v", i, err) } // make sure the resulting config is as we expect it if !reflect.DeepEqual(rawCfg[rawConfigKey], expectedDecoded) { t.Fatalf("Test %d:\nExpected:\n\t%#v\nActual:\n\t%#v", i, expectedDecoded, rawCfg[rawConfigKey]) } } } // TestLoadConcurrent exercises Load under concurrent conditions // and is most useful under test with `-race` enabled. func TestLoadConcurrent(t *testing.T) { var wg sync.WaitGroup for i := 0; i < 100; i++ { wg.Go(func() { _ = Load(testCfg, true) }) } wg.Wait() } type fooModule struct { IntField int StrField string } func (fooModule) CaddyModule() ModuleInfo { return ModuleInfo{ ID: "foo", New: func() Module { return new(fooModule) }, } } func (fooModule) Start() error { return nil } func (fooModule) Stop() error { return nil } func TestETags(t *testing.T) { RegisterModule(fooModule{}) if err := Load([]byte(`{"admin": {"listen": "localhost:2999"}, "apps": {"foo": {"strField": "abc", "intField": 0}}}`), true); err != nil { t.Fatalf("loading: %s", err) } const key = "/" + rawConfigKey + "/apps/foo" // try update the config with the wrong etag err := changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 1}}`), fmt.Sprintf(`"/%s not_an_etag"`, rawConfigKey), false) if apiErr, ok := err.(APIError); !ok || apiErr.HTTPStatus != http.StatusPreconditionFailed { t.Fatalf("expected precondition failed; got %v", err) } // get the etag hash := etagHasher() if err := readConfig(key, hash); err != nil { t.Fatalf("reading: %s", err) } // do the same update with the correct key err = changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 1}`), makeEtag(key, hash), false) if err != nil { t.Fatalf("expected update to work; got %v", err) } // now try another update. The hash should no longer match and we should get precondition failed err = changeConfig(http.MethodPost, key, []byte(`{"strField": "abc", "intField": 2}`), makeEtag(key, hash), false) if apiErr, ok := err.(APIError); !ok || apiErr.HTTPStatus != http.StatusPreconditionFailed { t.Fatalf("expected precondition failed; got %v", err) } } func BenchmarkLoad(b *testing.B) { for b.Loop() { Load(testCfg, true) } } func TestAdminHandlerErrorHandling(t *testing.T) { initAdminMetrics() handler := adminHandler{ mux: http.NewServeMux(), } handler.mux.Handle("/error", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { err := fmt.Errorf("test error") handler.handleError(w, r, err) })) req := httptest.NewRequest(http.MethodGet, "/error", nil) rr := httptest.NewRecorder() handler.ServeHTTP(rr, req) if rr.Code == http.StatusOK { t.Error("expected error response, got success") } var apiErr APIError if err := json.NewDecoder(rr.Body).Decode(&apiErr); err != nil { t.Fatalf("decoding response: %v", err) } if apiErr.Message != "test error" { t.Errorf("expected error message 'test error', got '%s'", apiErr.Message) } } func initAdminMetrics() { if adminMetrics.requestErrors != nil { prometheus.Unregister(adminMetrics.requestErrors) } if adminMetrics.requestCount != nil { prometheus.Unregister(adminMetrics.requestCount) } adminMetrics.requestErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "caddy", Subsystem: "admin_http", Name: "request_errors_total", Help: "Number of errors that occurred handling admin endpoint requests", }, []string{"handler", "path", "method"}) adminMetrics.requestCount = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "caddy", Subsystem: "admin_http", Name: "requests_total", Help: "Count of requests to the admin endpoint", }, []string{"handler", "path", "code", "method"}) // Added code and method labels prometheus.MustRegister(adminMetrics.requestErrors) prometheus.MustRegister(adminMetrics.requestCount) } func TestAdminHandlerBuiltinRouteErrors(t *testing.T) { initAdminMetrics() cfg := &Config{ Admin: &AdminConfig{ Listen: "localhost:2019", }, } // Build the admin handler directly (no listener active) addr, err := ParseNetworkAddress("localhost:2019") if err != nil { t.Fatalf("Failed to parse address: %v", err) } handler := cfg.Admin.newAdminHandler(addr, false, Context{}) tests := []struct { name string path string method string expectedStatus int }{ { name: "stop endpoint wrong method", path: "/stop", method: http.MethodGet, expectedStatus: http.StatusMethodNotAllowed, }, { name: "config endpoint wrong content-type", path: "/config/", method: http.MethodPost, expectedStatus: http.StatusBadRequest, }, { name: "config ID missing ID", path: "/id/", method: http.MethodGet, expectedStatus: http.StatusBadRequest, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { req := httptest.NewRequest(test.method, fmt.Sprintf("http://localhost:2019%s", test.path), nil) rr := httptest.NewRecorder() handler.ServeHTTP(rr, req) if rr.Code != test.expectedStatus { t.Errorf("expected status %d but got %d", test.expectedStatus, rr.Code) } metricValue := testGetMetricValue(map[string]string{ "path": test.path, "handler": "admin", "method": test.method, }) if metricValue != 1 { t.Errorf("expected error metric to be incremented once, got %v", metricValue) } }) } } func testGetMetricValue(labels map[string]string) float64 { promLabels := prometheus.Labels{} maps.Copy(promLabels, labels) metric, err := adminMetrics.requestErrors.GetMetricWith(promLabels) if err != nil { return 0 } pb := &dto.Metric{} metric.Write(pb) return pb.GetCounter().GetValue() } type mockRouter struct { routes []AdminRoute } func (m mockRouter) Routes() []AdminRoute { return m.routes } type mockModule struct { mockRouter } func (m *mockModule) CaddyModule() ModuleInfo { return ModuleInfo{ ID: "admin.api.mock", New: func() Module { mm := &mockModule{ mockRouter: mockRouter{ routes: m.routes, }, } return mm }, } } func TestNewAdminHandlerRouterRegistration(t *testing.T) { originalModules := make(map[string]ModuleInfo) maps.Copy(originalModules, modules) defer func() { modules = originalModules }() mockRoute := AdminRoute{ Pattern: "/mock", Handler: AdminHandlerFunc(func(w http.ResponseWriter, r *http.Request) error { w.WriteHeader(http.StatusOK) return nil }), } mock := &mockModule{ mockRouter: mockRouter{ routes: []AdminRoute{mockRoute}, }, } RegisterModule(mock) addr, err := ParseNetworkAddress("localhost:2019") if err != nil { t.Fatalf("Failed to parse address: %v", err) } admin := &AdminConfig{ EnforceOrigin: false, } handler := admin.newAdminHandler(addr, false, Context{}) req := httptest.NewRequest("GET", "/mock", nil) req.Host = "localhost:2019" rr := httptest.NewRecorder() handler.ServeHTTP(rr, req) if rr.Code != http.StatusOK { t.Errorf("Expected status code %d but got %d", http.StatusOK, rr.Code) t.Logf("Response body: %s", rr.Body.String()) } if len(admin.routers) != 1 { t.Errorf("Expected 1 router to be stored, got %d", len(admin.routers)) } } type mockProvisionableRouter struct { mockRouter provisionErr error provisioned bool } func (m *mockProvisionableRouter) Provision(Context) error { m.provisioned = true return m.provisionErr } type mockProvisionableModule struct { *mockProvisionableRouter } func (m *mockProvisionableModule) CaddyModule() ModuleInfo { return ModuleInfo{ ID: "admin.api.mock_provision", New: func() Module { mm := &mockProvisionableModule{ mockProvisionableRouter: &mockProvisionableRouter{ mockRouter: m.mockRouter, provisionErr: m.provisionErr, }, } return mm }, } } func TestAdminRouterProvisioning(t *testing.T) { tests := []struct { name string provisionErr error wantErr bool routersAfter int // expected number of routers after provisioning }{ { name: "successful provisioning", provisionErr: nil, wantErr: false, routersAfter: 0, }, { name: "provisioning error", provisionErr: fmt.Errorf("provision failed"), wantErr: true, routersAfter: 1, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { originalModules := make(map[string]ModuleInfo) maps.Copy(originalModules, modules) defer func() { modules = originalModules }() mockRoute := AdminRoute{ Pattern: "/mock", Handler: AdminHandlerFunc(func(w http.ResponseWriter, r *http.Request) error { return nil }), } // Create provisionable module mock := &mockProvisionableModule{ mockProvisionableRouter: &mockProvisionableRouter{ mockRouter: mockRouter{ routes: []AdminRoute{mockRoute}, }, provisionErr: test.provisionErr, }, } RegisterModule(mock) admin := &AdminConfig{} addr, err := ParseNetworkAddress("localhost:2019") if err != nil { t.Fatalf("Failed to parse address: %v", err) } _ = admin.newAdminHandler(addr, false, Context{}) err = admin.provisionAdminRouters(Context{}) if test.wantErr { if err == nil { t.Error("Expected error but got nil") } } else { if err != nil { t.Errorf("Expected no error but got: %v", err) } } if len(admin.routers) != test.routersAfter { t.Errorf("Expected %d routers after provisioning, got %d", test.routersAfter, len(admin.routers)) } }) } } func TestAllowedOriginsUnixSocket(t *testing.T) { // see comment in allowedOrigins() as to why we do not fill out allowed origins for UDS tests := []struct { name string addr NetworkAddress origins []string expectOrigins []string }{ { name: "unix socket with default origins", addr: NetworkAddress{ Network: "unix", Host: "/tmp/caddy.sock", }, origins: nil, // default origins expectOrigins: []string{}, }, { name: "unix socket with custom origins", addr: NetworkAddress{ Network: "unix", Host: "/tmp/caddy.sock", }, origins: []string{"example.com"}, expectOrigins: []string{ "example.com", }, }, { name: "tcp socket on localhost gets all loopback addresses", addr: NetworkAddress{ Network: "tcp", Host: "localhost", StartPort: 2019, EndPort: 2019, }, origins: nil, expectOrigins: []string{ "localhost:2019", "[::1]:2019", "127.0.0.1:2019", }, }, } for i, test := range tests { t.Run(test.name, func(t *testing.T) { admin := AdminConfig{ Origins: test.origins, } got := admin.allowedOrigins(test.addr) var gotOrigins []string for _, u := range got { gotOrigins = append(gotOrigins, u.Host) } if len(gotOrigins) != len(test.expectOrigins) { t.Errorf("%d: Expected %d origins but got %d", i, len(test.expectOrigins), len(gotOrigins)) return } expectMap := make(map[string]struct{}) for _, origin := range test.expectOrigins { expectMap[origin] = struct{}{} } gotMap := make(map[string]struct{}) for _, origin := range gotOrigins { gotMap[origin] = struct{}{} } if !reflect.DeepEqual(expectMap, gotMap) { t.Errorf("%d: Origins mismatch.\nExpected: %v\nGot: %v", i, test.expectOrigins, gotOrigins) } }) } } func TestReplaceRemoteAdminServer(t *testing.T) { const testCert = `MIIDCTCCAfGgAwIBAgIUXsqJ1mY8pKlHQtI3HJ23x2eZPqwwDQYJKoZIhvcNAQEL BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTIzMDEwMTAwMDAwMFoXDTI0MDEw MTAwMDAwMFowFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF AAOCAQ8AMIIBCgKCAQEA4O4S6BSoYcoxvRqI+h7yPOjF6KjntjzVVm9M+uHK4lzX F1L3pSxJ2nDD4wZEV3FJ5yFOHVFqkG2vXG3BIczOlYG7UeNmKbQnKc5kZj3HGUrS VGEktA4OJbeZhhWP15gcXN5eDM2eH3g9BFXVX6AURxLiUXzhNBUEZuj/OEyH9yEF /qPCE+EjzVvWxvBXwgz/io4r4yok/Vq/bxJ6FlV6R7DX5oJSXyO0VEHZPi9DIyNU kK3F/r4U1sWiJGWOs8i3YQWZ2ejh1C0aLFZpPcCGGgMNpoF31gyYP6ZuPDUyCXsE g36UUw1JHNtIXYcLhnXuqj4A8TybTDpgXLqvwA9DBQIDAQABo1MwUTAdBgNVHQ4E FgQUc13z30pFC63rr/HGKOE7E82vjXwwHwYDVR0jBBgwFoAUc13z30pFC63rr/HG KOE7E82vjXwwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAHO3j oeiUXXJ7xD4P8Wj5t9d+E8lE1Xv1Dk3Z+EdG5+dan+RcToE42JJp9zB7FIh5Qz8g W77LAjqh5oyqz3A2VJcyVgfE3uJP1R1mJM7JfGHf84QH4TZF2Q1RZY4SZs0VQ6+q 5wSlIZ4NXDy4Q4XkIJBGS61wT8IzYFXYBpx4PCP1Qj0PIE4sevEGwjsBIgxK307o BxF8AWe6N6e4YZmQLGjQ+SeH0iwZb6vpkHyAY8Kj2hvK+cq2P7vU3VGi0t3r1F8L IvrXHCvO2BMNJ/1UK1M4YNX8LYJqQhg9hEsIROe1OE/m3VhxIYMJI+qZXk9yHfgJ vq+SH04xKhtFudVBAQ==` tests := []struct { name string cfg *Config wantErr bool }{ { name: "nil config", cfg: nil, wantErr: false, }, { name: "nil admin config", cfg: &Config{ Admin: nil, }, wantErr: false, }, { name: "nil remote config", cfg: &Config{ Admin: &AdminConfig{}, }, wantErr: false, }, { name: "invalid listen address", cfg: &Config{ Admin: &AdminConfig{ Remote: &RemoteAdmin{ Listen: "invalid:address", }, }, }, wantErr: true, }, { name: "valid config", cfg: &Config{ Admin: &AdminConfig{ Identity: &IdentityConfig{}, Remote: &RemoteAdmin{ Listen: "localhost:2021", AccessControl: []*AdminAccess{ { PublicKeys: []string{testCert}, Permissions: []AdminPermissions{{Methods: []string{"GET"}, Paths: []string{"/test"}}}, }, }, }, }, }, wantErr: false, }, { name: "invalid certificate", cfg: &Config{ Admin: &AdminConfig{ Identity: &IdentityConfig{}, Remote: &RemoteAdmin{ Listen: "localhost:2021", AccessControl: []*AdminAccess{ { PublicKeys: []string{"invalid-cert-data"}, Permissions: []AdminPermissions{{Methods: []string{"GET"}, Paths: []string{"/test"}}}, }, }, }, }, }, wantErr: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := Context{ Context: context.Background(), cfg: test.cfg, } if test.cfg != nil { test.cfg.storage = &certmagic.FileStorage{Path: t.TempDir()} } if test.cfg != nil && test.cfg.Admin != nil && test.cfg.Admin.Identity != nil { identityCertCache = certmagic.NewCache(certmagic.CacheOptions{ GetConfigForCert: func(certmagic.Certificate) (*certmagic.Config, error) { return &certmagic.Config{}, nil }, }) } err := replaceRemoteAdminServer(ctx, test.cfg) if test.wantErr { if err == nil { t.Error("Expected error but got nil") } } else { if err != nil { t.Errorf("Expected no error but got: %v", err) } } // Clean up if remoteAdminServer != nil { _ = stopAdminServer(remoteAdminServer) } }) } } type mockIssuer struct { configSet *certmagic.Config } func (m *mockIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) { return &certmagic.IssuedCertificate{ Certificate: []byte(csr.Raw), }, nil } func (m *mockIssuer) SetConfig(cfg *certmagic.Config) { m.configSet = cfg } func (m *mockIssuer) IssuerKey() string { return "mock" } type mockIssuerModule struct { *mockIssuer } func (m *mockIssuerModule) CaddyModule() ModuleInfo { return ModuleInfo{ ID: "tls.issuance.acme", New: func() Module { return &mockIssuerModule{mockIssuer: new(mockIssuer)} }, } } func TestManageIdentity(t *testing.T) { originalModules := make(map[string]ModuleInfo) maps.Copy(originalModules, modules) defer func() { modules = originalModules }() RegisterModule(&mockIssuerModule{}) certPEM := []byte(`-----BEGIN CERTIFICATE----- MIIDujCCAqKgAwIBAgIIE31FZVaPXTUwDQYJKoZIhvcNAQEFBQAwSTELMAkGA1UE BhMCVVMxEzARBgNVBAoTCkdvb2dsZSBJbmMxJTAjBgNVBAMTHEdvb2dsZSBJbnRl cm5ldCBBdXRob3JpdHkgRzIwHhcNMTQwMTI5MTMyNzQzWhcNMTQwNTI5MDAwMDAw WjBpMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwN TW91bnRhaW4gVmlldzETMBEGA1UECgwKR29vZ2xlIEluYzEYMBYGA1UEAwwPbWFp bC5nb29nbGUuY29tMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE3lcub2pUwkjC 5GJQA2ZZfJJi6d1QHhEmkX9VxKYGp6gagZuRqJWy9TXP6++1ZzQQxqZLD0TkuxZ9 8i9Nz00000CCBjCCAQQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGgG CCsGAQUFBwEBBFwwWjArBggrBgEFBQcwAoYfaHR0cDovL3BraS5nb29nbGUuY29t L0dJQUcyLmNydDArBggrBgEFBQcwAYYfaHR0cDovL2NsaWVudHMxLmdvb2dsZS5j b20vb2NzcDAdBgNVHQ4EFgQUiJxtimAuTfwb+aUtBn5UYKreKvMwDAYDVR0TAQH/ BAIwADAfBgNVHSMEGDAWgBRK3QYWG7z2aLV29YG2u2IaulqBLzAXBgNVHREEEDAO ggxtYWlsLmdvb2dsZTANBgkqhkiG9w0BAQUFAAOCAQEAMP6IWgNGZE8wP9TjFjSZ 3mmW3A1eIr0CuPwNZ2LJ5ZD1i70ojzcj4I9IdP5yPg9CAEV4hNASbM1LzfC7GmJE tPzW5tRmpKVWZGRgTgZI8Hp/xZXMwLh9ZmXV4kESFAGj5G5FNvJyUV7R5Eh+7OZX 7G4jJ4ZGJh+5jzN9HdJJHQHGYNIYOzC7+HH9UMwCjX9vhQ4RjwFZJThS2Yb+y7pb 9yxTJZoXC6J0H5JpnZb7kZEJ+Xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx -----END CERTIFICATE-----`) keyPEM := []byte(`-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDRS0LmTwUT0iwP ... -----END PRIVATE KEY-----`) tmpDir, err := os.MkdirTemp("", "TestManageIdentity-") if err != nil { t.Fatal(err) } testStorage := certmagic.FileStorage{Path: tmpDir} // Clean up the temp dir after the test finishes. Ensure any background // certificate maintenance is stopped first to avoid RemoveAll races. t.Cleanup(func() { if identityCertCache != nil { identityCertCache.Stop() identityCertCache = nil } // Give goroutines a moment to exit and release file handles. time.Sleep(50 * time.Millisecond) _ = os.RemoveAll(tmpDir) }) err = testStorage.Store(context.Background(), "localhost/localhost.crt", certPEM) if err != nil { t.Fatal(err) } err = testStorage.Store(context.Background(), "localhost/localhost.key", keyPEM) if err != nil { t.Fatal(err) } tests := []struct { name string cfg *Config wantErr bool checkState func(*testing.T, *Config) }{ { name: "nil config", cfg: nil, }, { name: "nil admin config", cfg: &Config{ Admin: nil, }, }, { name: "nil identity config", cfg: &Config{ Admin: &AdminConfig{}, }, }, { name: "default issuer when none specified", cfg: &Config{ Admin: &AdminConfig{ Identity: &IdentityConfig{ Identifiers: []string{"localhost"}, }, }, storage: &testStorage, }, checkState: func(t *testing.T, cfg *Config) { if len(cfg.Admin.Identity.issuers) == 0 { t.Error("Expected at least 1 issuer to be configured") return } if _, ok := cfg.Admin.Identity.issuers[0].(*mockIssuerModule); !ok { t.Error("Expected mock issuer to be configured") } }, }, { name: "custom issuer", cfg: &Config{ Admin: &AdminConfig{ Identity: &IdentityConfig{ Identifiers: []string{"localhost"}, IssuersRaw: []json.RawMessage{ json.RawMessage(`{"module": "acme"}`), }, }, }, storage: &testStorage, }, checkState: func(t *testing.T, cfg *Config) { if len(cfg.Admin.Identity.issuers) != 1 { t.Fatalf("Expected 1 issuer, got %d", len(cfg.Admin.Identity.issuers)) } mockIss, ok := cfg.Admin.Identity.issuers[0].(*mockIssuerModule) if !ok { t.Fatal("Expected mock issuer") } if mockIss.configSet == nil { t.Error("Issuer config was not set") } }, }, { name: "invalid issuer module", cfg: &Config{ Admin: &AdminConfig{ Identity: &IdentityConfig{ Identifiers: []string{"localhost"}, IssuersRaw: []json.RawMessage{ json.RawMessage(`{"module": "doesnt_exist"}`), }, }, }, }, wantErr: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { if identityCertCache != nil { // Reset the cert cache before each test identityCertCache.Stop() identityCertCache = nil } // Ensure any cache started by manageIdentity is stopped at the end defer func() { if identityCertCache != nil { identityCertCache.Stop() identityCertCache = nil } }() ctx := Context{ Context: context.Background(), cfg: test.cfg, moduleInstances: make(map[string][]Module), } // If this test provided a FileStorage, set the package-level // testCertMagicStorageOverride so certmagicConfig will use it. if test.cfg != nil && test.cfg.storage != nil { testCertMagicStorageOverride = test.cfg.storage defer func() { testCertMagicStorageOverride = nil }() } err := manageIdentity(ctx, test.cfg) if test.wantErr { if err == nil { t.Error("Expected error but got nil") } return } if err != nil { t.Fatalf("Expected no error but got: %v", err) } if test.checkState != nil { test.checkState(t, test.cfg) } }) } } ================================================ FILE: caddy.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "bytes" "context" "encoding/hex" "encoding/json" "errors" "fmt" "io" "io/fs" "log" "net/http" "os" "path" "path/filepath" "runtime/debug" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/caddyserver/certmagic" "github.com/google/uuid" "go.uber.org/zap" "github.com/caddyserver/caddy/v2/internal/filesystems" "github.com/caddyserver/caddy/v2/notify" ) // Config is the top (or beginning) of the Caddy configuration structure. // Caddy config is expressed natively as a JSON document. If you prefer // not to work with JSON directly, there are [many config adapters](/docs/config-adapters) // available that can convert various inputs into Caddy JSON. // // Many parts of this config are extensible through the use of Caddy modules. // Fields which have a json.RawMessage type and which appear as dots (•••) in // the online docs can be fulfilled by modules in a certain module // namespace. The docs show which modules can be used in a given place. // // Whenever a module is used, its name must be given either inline as part of // the module, or as the key to the module's value. The docs will make it clear // which to use. // // Generally, all config settings are optional, as it is Caddy convention to // have good, documented default values. If a parameter is required, the docs // should say so. // // Go programs which are directly building a Config struct value should take // care to populate the JSON-encodable fields of the struct (i.e. the fields // with `json` struct tags) if employing the module lifecycle (e.g. Provision // method calls). type Config struct { Admin *AdminConfig `json:"admin,omitempty"` Logging *Logging `json:"logging,omitempty"` // StorageRaw is a storage module that defines how/where Caddy // stores assets (such as TLS certificates). The default storage // module is `caddy.storage.file_system` (the local file system), // and the default path // [depends on the OS and environment](/docs/conventions#data-directory). StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"` // AppsRaw are the apps that Caddy will load and run. The // app module name is the key, and the app's config is the // associated value. AppsRaw ModuleMap `json:"apps,omitempty" caddy:"namespace="` apps map[string]App // failedApps is a map of apps that failed to provision with their underlying error. failedApps map[string]error storage certmagic.Storage eventEmitter eventEmitter cancelFunc context.CancelCauseFunc // fileSystems is a dict of fileSystems that will later be loaded from and added to. fileSystems FileSystems } // App is a thing that Caddy runs. type App interface { Start() error Stop() error } // Run runs the given config, replacing any existing config. func Run(cfg *Config) error { cfgJSON, err := json.Marshal(cfg) if err != nil { return err } return Load(cfgJSON, true) } // Load loads the given config JSON and runs it only // if it is different from the current config or // forceReload is true. func Load(cfgJSON []byte, forceReload bool) error { if err := notify.Reloading(); err != nil { Log().Error("unable to notify service manager of reloading state", zap.Error(err)) } // after reload, notify system of success or, if // failure, update with status (error message) var err error defer func() { if err != nil { if notifyErr := notify.Error(err, 0); notifyErr != nil { Log().Error("unable to notify to service manager of reload error", zap.Error(notifyErr), zap.String("reload_err", err.Error())) } return } if err := notify.Ready(); err != nil { Log().Error("unable to notify to service manager of ready state", zap.Error(err)) } }() err = changeConfig(http.MethodPost, "/"+rawConfigKey, cfgJSON, "", forceReload) if errors.Is(err, errSameConfig) { err = nil // not really an error } return err } // changeConfig changes the current config (rawCfg) according to the // method, traversed via the given path, and uses the given input as // the new value (if applicable; i.e. "DELETE" doesn't have an input). // If the resulting config is the same as the previous, no reload will // occur unless forceReload is true. If the config is unchanged and not // forcefully reloaded, then errConfigUnchanged is returned. This function // is safe for concurrent use. // The ifMatchHeader can optionally be given a string of the format: // // " " // // where is the absolute path in the config and is the expected hash of // the config at that path. If the hash in the ifMatchHeader doesn't match // the hash of the config, then an APIError with status 412 will be returned. func changeConfig(method, path string, input []byte, ifMatchHeader string, forceReload bool) error { switch method { case http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodConnect, http.MethodTrace: return fmt.Errorf("method not allowed") } rawCfgMu.Lock() defer rawCfgMu.Unlock() if ifMatchHeader != "" { // expect the first and last character to be quotes if len(ifMatchHeader) < 2 || ifMatchHeader[0] != '"' || ifMatchHeader[len(ifMatchHeader)-1] != '"' { return APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("malformed If-Match header; expect quoted string"), } } // read out the parts parts := strings.Fields(ifMatchHeader[1 : len(ifMatchHeader)-1]) if len(parts) != 2 { return APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("malformed If-Match header; expect format \" \""), } } // get the current hash of the config // at the given path hash := etagHasher() err := unsyncedConfigAccess(http.MethodGet, parts[0], nil, hash) if err != nil { return err } if hex.EncodeToString(hash.Sum(nil)) != parts[1] { return APIError{ HTTPStatus: http.StatusPreconditionFailed, Err: fmt.Errorf("If-Match header did not match current config hash"), } } } err := unsyncedConfigAccess(method, path, input, nil) if err != nil { return err } // the mutation is complete, so encode the entire config as JSON newCfg, err := json.Marshal(rawCfg[rawConfigKey]) if err != nil { return APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("encoding new config: %v", err), } } // if nothing changed, no need to do a whole reload unless the client forces it if !forceReload && bytes.Equal(rawCfgJSON, newCfg) { Log().Info("config is unchanged") return errSameConfig } // find any IDs in this config and index them idx := make(map[string]string) err = indexConfigObjects(rawCfg[rawConfigKey], "/"+rawConfigKey, idx) if err != nil { if len(rawCfgJSON) > 0 { var oldCfg any err2 := json.Unmarshal(rawCfgJSON, &oldCfg) if err2 != nil { err = fmt.Errorf("%v; additionally, restoring old config: %v", err, err2) } rawCfg[rawConfigKey] = oldCfg } else { rawCfg[rawConfigKey] = nil } return APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("indexing config: %v", err), } } // load this new config; if it fails, we need to revert to // our old representation of caddy's actual config err = unsyncedDecodeAndRun(newCfg, true) if err != nil { if len(rawCfgJSON) > 0 { // restore old config state to keep it consistent // with what caddy is still running; we need to // unmarshal it again because it's likely that // pointers deep in our rawCfg map were modified var oldCfg any err2 := json.Unmarshal(rawCfgJSON, &oldCfg) if err2 != nil { err = fmt.Errorf("%v; additionally, restoring old config: %v", err, err2) } rawCfg[rawConfigKey] = oldCfg } else { rawCfg[rawConfigKey] = nil } return fmt.Errorf("loading new config: %v", err) } // success, so update our stored copy of the encoded // config to keep it consistent with what caddy is now // running (storing an encoded copy is not strictly // necessary, but avoids an extra json.Marshal for // each config change) rawCfgJSON = newCfg rawCfgIndex = idx return nil } // readConfig traverses the current config to path // and writes its JSON encoding to out. func readConfig(path string, out io.Writer) error { rawCfgMu.RLock() defer rawCfgMu.RUnlock() return unsyncedConfigAccess(http.MethodGet, path, nil, out) } // indexConfigObjects recursively searches ptr for object fields named // "@id" and maps that ID value to the full configPath in the index. // This function is NOT safe for concurrent access; obtain a write lock // on currentCtxMu. func indexConfigObjects(ptr any, configPath string, index map[string]string) error { switch val := ptr.(type) { case map[string]any: for k, v := range val { if k == idKey { var idStr string switch idVal := v.(type) { case string: idStr = idVal case float64: // all JSON numbers decode as float64 idStr = fmt.Sprintf("%v", idVal) default: return fmt.Errorf("%s: %s field must be a string or number", configPath, idKey) } if existingPath, ok := index[idStr]; ok { return fmt.Errorf("duplicate ID '%s' found at %s and %s", idStr, existingPath, configPath) } index[idStr] = configPath continue } // traverse this object property recursively err := indexConfigObjects(val[k], path.Join(configPath, k), index) if err != nil { return err } } case []any: // traverse each element of the array recursively for i := range val { err := indexConfigObjects(val[i], path.Join(configPath, strconv.Itoa(i)), index) if err != nil { return err } } } return nil } // unsyncedDecodeAndRun removes any meta fields (like @id tags) // from cfgJSON, decodes the result into a *Config, and runs // it as the new config, replacing any other current config. // It does NOT update the raw config state, as this is a // lower-level function; most callers will want to use Load // instead. A write lock on rawCfgMu is required! If // allowPersist is false, it will not be persisted to disk, // even if it is configured to. func unsyncedDecodeAndRun(cfgJSON []byte, allowPersist bool) error { // remove any @id fields from the JSON, which would cause // loading to break since the field wouldn't be recognized strippedCfgJSON := RemoveMetaFields(cfgJSON) var newCfg *Config err := StrictUnmarshalJSON(strippedCfgJSON, &newCfg) if err != nil { return err } // prevent recursive config loads; that is a user error, and // although frequent config loads should be safe, we cannot // guarantee that in the presence of third party plugins, nor // do we want this error to go unnoticed (we assume it was a // pulled config if we're not allowed to persist it) if !allowPersist && newCfg != nil && newCfg.Admin != nil && newCfg.Admin.Config != nil && newCfg.Admin.Config.LoadRaw != nil && newCfg.Admin.Config.LoadDelay <= 0 { return fmt.Errorf("recursive config loading detected: pulled configs cannot pull other configs without positive load_delay") } // run the new config and start all its apps ctx, err := run(newCfg, true) if err != nil { return err } // swap old context (including its config) with the new one currentCtxMu.Lock() oldCtx := currentCtx currentCtx = ctx currentCtxMu.Unlock() // Stop, Cleanup each old app unsyncedStop(oldCtx) // autosave a non-nil config, if not disabled if allowPersist && newCfg != nil && (newCfg.Admin == nil || newCfg.Admin.Config == nil || newCfg.Admin.Config.Persist == nil || *newCfg.Admin.Config.Persist) { dir := filepath.Dir(ConfigAutosavePath) err := os.MkdirAll(dir, 0o700) if err != nil { Log().Error("unable to create folder for config autosave", zap.String("dir", dir), zap.Error(err)) } else { err := os.WriteFile(ConfigAutosavePath, cfgJSON, 0o600) if err == nil { Log().Info("autosaved config (load with --resume flag)", zap.String("file", ConfigAutosavePath)) } else { Log().Error("unable to autosave config", zap.String("file", ConfigAutosavePath), zap.Error(err)) } } } return nil } // run runs newCfg and starts all its apps if // start is true. If any errors happen, cleanup // is performed if any modules were provisioned; // apps that were started already will be stopped, // so this function should not leak resources if // an error is returned. However, if no error is // returned and start == false, you should cancel // the config if you are not going to start it, // so that each provisioned module will be // cleaned up. // // This is a low-level function; most callers // will want to use Run instead, which also // updates the config's raw state. func run(newCfg *Config, start bool) (Context, error) { ctx, err := provisionContext(newCfg, start) if err != nil { globalMetrics.configSuccess.Set(0) return ctx, err } if !start { return ctx, nil } defer func() { // if newCfg fails to start completely, clean up the already provisioned modules // partially copied from provisionContext if err != nil { globalMetrics.configSuccess.Set(0) ctx.cfg.cancelFunc(fmt.Errorf("configuration start error: %w", err)) if currentCtx.cfg != nil { certmagic.Default.Storage = currentCtx.cfg.storage } } }() // Provision any admin routers which may need to access // some of the other apps at runtime err = ctx.cfg.Admin.provisionAdminRouters(ctx) if err != nil { return ctx, err } // Start err = func() error { started := make([]string, 0, len(ctx.cfg.apps)) for name, a := range ctx.cfg.apps { err := a.Start() if err != nil { // an app failed to start, so we need to stop // all other apps that were already started for _, otherAppName := range started { err2 := ctx.cfg.apps[otherAppName].Stop() if err2 != nil { err = fmt.Errorf("%v; additionally, aborting app %s: %v", err, otherAppName, err2) } } return fmt.Errorf("%s app module: start: %v", name, err) } started = append(started, name) } return nil }() if err != nil { return ctx, err } globalMetrics.configSuccess.Set(1) globalMetrics.configSuccessTime.SetToCurrentTime() // TODO: This event is experimental and subject to change. ctx.emitEvent("started", nil) // now that the user's config is running, finish setting up anything else, // such as remote admin endpoint, config loader, etc. err = finishSettingUp(ctx, ctx.cfg) return ctx, err } // provisionContext creates a new context from the given configuration and provisions // storage and apps. // If `newCfg` is nil a new empty configuration will be created. // If `replaceAdminServer` is true any currently active admin server will be replaced // with a new admin server based on the provided configuration. func provisionContext(newCfg *Config, replaceAdminServer bool) (Context, error) { // because we will need to roll back any state // modifications if this function errors, we // keep a single error value and scope all // sub-operations to their own functions to // ensure this error value does not get // overridden or missed when it should have // been set by a short assignment var err error if newCfg == nil { newCfg = new(Config) } // create a context within which to load // modules - essentially our new config's // execution environment; be sure that // cleanup occurs when we return if there // was an error; if no error, it will get // cleaned up on next config cycle ctx, cancelCause := NewContextWithCause(Context{Context: context.Background(), cfg: newCfg}) defer func() { if err != nil { globalMetrics.configSuccess.Set(0) // if there were any errors during startup, // we should cancel the new context we created // since the associated config won't be used; // this will cause all modules that were newly // provisioned to clean themselves up cancelCause(fmt.Errorf("configuration error: %w", err)) // also undo any other state changes we made if currentCtx.cfg != nil { certmagic.Default.Storage = currentCtx.cfg.storage } } }() newCfg.cancelFunc = cancelCause // clean up later // set up logging before anything bad happens if newCfg.Logging == nil { newCfg.Logging = new(Logging) } err = newCfg.Logging.openLogs(ctx) if err != nil { return ctx, err } // create the new filesystem map newCfg.fileSystems = &filesystems.FileSystemMap{} // prepare the new config for use newCfg.apps = make(map[string]App) newCfg.failedApps = make(map[string]error) // set up global storage and make it CertMagic's default storage, too err = func() error { if newCfg.StorageRaw != nil { val, err := ctx.LoadModule(newCfg, "StorageRaw") if err != nil { return fmt.Errorf("loading storage module: %v", err) } stor, err := val.(StorageConverter).CertMagicStorage() if err != nil { return fmt.Errorf("creating storage value: %v", err) } newCfg.storage = stor } if newCfg.storage == nil { newCfg.storage = DefaultStorage } certmagic.Default.Storage = newCfg.storage return nil }() if err != nil { return ctx, err } // start the admin endpoint (and stop any prior one) if replaceAdminServer { err = replaceLocalAdminServer(newCfg, ctx) if err != nil { return ctx, fmt.Errorf("starting caddy administration endpoint: %v", err) } } // Load and Provision each app and their submodules err = func() error { for appName := range newCfg.AppsRaw { if _, err := ctx.App(appName); err != nil { return err } } return nil }() return ctx, err } // ProvisionContext creates a new context from the configuration and provisions storage // and app modules. // The function is intended for testing and advanced use cases only, typically `Run` should be // use to ensure a fully functional caddy instance. // EXPERIMENTAL: While this is public the interface and implementation details of this function may change. func ProvisionContext(newCfg *Config) (Context, error) { return provisionContext(newCfg, false) } // finishSettingUp should be run after all apps have successfully started. func finishSettingUp(ctx Context, cfg *Config) error { // establish this server's identity (only after apps are loaded // so that cert management of this endpoint doesn't prevent user's // servers from starting which likely also use HTTP/HTTPS ports; // but before remote management which may depend on these creds) err := manageIdentity(ctx, cfg) if err != nil { return fmt.Errorf("provisioning remote admin endpoint: %v", err) } // replace any remote admin endpoint err = replaceRemoteAdminServer(ctx, cfg) if err != nil { return fmt.Errorf("provisioning remote admin endpoint: %v", err) } // if dynamic config is requested, set that up and run it if cfg != nil && cfg.Admin != nil && cfg.Admin.Config != nil && cfg.Admin.Config.LoadRaw != nil { val, err := ctx.LoadModule(cfg.Admin.Config, "LoadRaw") if err != nil { return fmt.Errorf("loading config loader module: %s", err) } logger := Log().Named("config_loader").With( zap.String("module", val.(Module).CaddyModule().ID.Name()), zap.Int("load_delay", int(cfg.Admin.Config.LoadDelay))) runLoadedConfig := func(config []byte) error { logger.Info("applying dynamically-loaded config") err := changeConfig(http.MethodPost, "/"+rawConfigKey, config, "", false) if errors.Is(err, errSameConfig) { return err } if err != nil { logger.Error("failed to run dynamically-loaded config", zap.Error(err)) return err } logger.Info("successfully applied dynamically-loaded config") return nil } if cfg.Admin.Config.LoadDelay > 0 { go func() { // the loop is here to iterate ONLY if there is an error, a no-op config load, // or an unchanged config; in which case we simply wait the delay and try again for { timer := time.NewTimer(time.Duration(cfg.Admin.Config.LoadDelay)) select { case <-timer.C: loadedConfig, err := val.(ConfigLoader).LoadConfig(ctx) if err != nil { logger.Error("failed loading dynamic config; will retry", zap.Error(err)) continue } if loadedConfig == nil { logger.Info("dynamically-loaded config was nil; will retry") continue } err = runLoadedConfig(loadedConfig) if errors.Is(err, errSameConfig) { logger.Info("dynamically-loaded config was unchanged; will retry") continue } case <-ctx.Done(): if !timer.Stop() { <-timer.C } logger.Info("stopping dynamic config loading") } break } }() } else { // if no LoadDelay is provided, will load config synchronously loadedConfig, err := val.(ConfigLoader).LoadConfig(ctx) if err != nil { return fmt.Errorf("loading dynamic config from %T: %v", val, err) } // do this in a goroutine so current config can finish being loaded; otherwise deadlock go func() { _ = runLoadedConfig(loadedConfig) }() } } return nil } // ConfigLoader is a type that can load a Caddy config. If // the return value is non-nil, it must be valid Caddy JSON; // if nil or with non-nil error, it is considered to be a // no-op load and may be retried later. type ConfigLoader interface { LoadConfig(Context) ([]byte, error) } // Stop stops running the current configuration. // It is the antithesis of Run(). This function // will log any errors that occur during the // stopping of individual apps and continue to // stop the others. Stop should only be called // if not replacing with a new config. func Stop() error { currentCtxMu.RLock() ctx := currentCtx currentCtxMu.RUnlock() rawCfgMu.Lock() unsyncedStop(ctx) currentCtxMu.Lock() currentCtx = Context{} currentCtxMu.Unlock() rawCfgJSON = nil rawCfgIndex = nil rawCfg[rawConfigKey] = nil rawCfgMu.Unlock() return nil } // unsyncedStop stops ctx from running, but has // no locking around ctx. It is a no-op if ctx has a // nil cfg. If any app returns an error when stopping, // it is logged and the function continues stopping // the next app. This function assumes all apps in // ctx were successfully started first. // // A lock on rawCfgMu is required, even though this // function does not access rawCfg, that lock // synchronizes the stop/start of apps. func unsyncedStop(ctx Context) { if ctx.cfg == nil { return } // TODO: This event is experimental and subject to change. ctx.emitEvent("stopping", nil) // stop each app for name, a := range ctx.cfg.apps { err := a.Stop() if err != nil { log.Printf("[ERROR] stop %s: %v", name, err) } } // clean up all modules ctx.cfg.cancelFunc(fmt.Errorf("stopping apps")) } // Validate loads, provisions, and validates // cfg, but does not start running it. func Validate(cfg *Config) error { _, err := run(cfg, false) if err == nil { cfg.cancelFunc(fmt.Errorf("validation complete")) // call Cleanup on all modules } return err } // exitProcess exits the process as gracefully as possible, // but it always exits, even if there are errors doing so. // It stops all apps, cleans up external locks, removes any // PID file, and shuts down admin endpoint(s) in a goroutine. // Errors are logged along the way, and an appropriate exit // code is emitted. func exitProcess(ctx context.Context, logger *zap.Logger) { // let the rest of the program know we're quitting; only do it once if !atomic.CompareAndSwapInt32(exiting, 0, 1) { return } // give the OS or service/process manager our 2 weeks' notice: we quit if err := notify.Stopping(); err != nil { Log().Error("unable to notify service manager of stopping state", zap.Error(err)) } if logger == nil { logger = Log() } logger.Warn("exiting; byeee!! 👋") exitCode := ExitCodeSuccess lastContext := ActiveContext() // stop all apps if err := Stop(); err != nil { logger.Error("failed to stop apps", zap.Error(err)) exitCode = ExitCodeFailedQuit } // clean up certmagic locks certmagic.CleanUpOwnLocks(ctx, logger) // remove pidfile if pidfile != "" { err := os.Remove(pidfile) if err != nil { logger.Error("cleaning up PID file:", zap.String("pidfile", pidfile), zap.Error(err)) exitCode = ExitCodeFailedQuit } } // execute any process-exit callbacks for _, exitFunc := range lastContext.exitFuncs { exitFunc(ctx) } exitFuncsMu.Lock() for _, exitFunc := range exitFuncs { exitFunc(ctx) } exitFuncsMu.Unlock() // shut down admin endpoint(s) in goroutines so that // if this function was called from an admin handler, // it has a chance to return gracefully // use goroutine so that we can finish responding to API request go func() { defer func() { logger = logger.With(zap.Int("exit_code", exitCode)) if exitCode == ExitCodeSuccess { logger.Info("shutdown complete") } else { logger.Error("unclean shutdown") } os.Exit(exitCode) }() if remoteAdminServer != nil { err := stopAdminServer(remoteAdminServer) if err != nil { exitCode = ExitCodeFailedQuit logger.Error("failed to stop remote admin server gracefully", zap.Error(err)) } } if localAdminServer != nil { err := stopAdminServer(localAdminServer) if err != nil { exitCode = ExitCodeFailedQuit logger.Error("failed to stop local admin server gracefully", zap.Error(err)) } } }() } var exiting = new(int32) // accessed atomically // Exiting returns true if the process is exiting. // EXPERIMENTAL API: subject to change or removal. func Exiting() bool { return atomic.LoadInt32(exiting) == 1 } // OnExit registers a callback to invoke during process exit. // This registration is PROCESS-GLOBAL, meaning that each // function should only be registered once forever, NOT once // per config load (etc). // // EXPERIMENTAL API: subject to change or removal. func OnExit(f func(context.Context)) { exitFuncsMu.Lock() exitFuncs = append(exitFuncs, f) exitFuncsMu.Unlock() } var ( exitFuncs []func(context.Context) exitFuncsMu sync.Mutex ) // Duration can be an integer or a string. An integer is // interpreted as nanoseconds. If a string, it is a Go // time.Duration value such as `300ms`, `1.5h`, or `2h45m`; // valid units are `ns`, `us`/`µs`, `ms`, `s`, `m`, `h`, and `d`. type Duration time.Duration // UnmarshalJSON satisfies json.Unmarshaler. func (d *Duration) UnmarshalJSON(b []byte) error { if len(b) == 0 { return io.EOF } var dur time.Duration var err error if b[0] == byte('"') && b[len(b)-1] == byte('"') { dur, err = ParseDuration(strings.Trim(string(b), `"`)) } else { err = json.Unmarshal(b, &dur) } *d = Duration(dur) return err } // ParseDuration parses a duration string, adding // support for the "d" unit meaning number of days, // where a day is assumed to be 24h. The maximum // input string length is 1024. func ParseDuration(s string) (time.Duration, error) { if len(s) > 1024 { return 0, fmt.Errorf("parsing duration: input string too long") } var inNumber bool var numStart int for i := 0; i < len(s); i++ { ch := s[i] if ch == 'd' { daysStr := s[numStart:i] days, err := strconv.ParseFloat(daysStr, 64) if err != nil { return 0, err } hours := days * 24.0 hoursStr := strconv.FormatFloat(hours, 'f', -1, 64) s = s[:numStart] + hoursStr + "h" + s[i+1:] i-- continue } if !inNumber { numStart = i } inNumber = (ch >= '0' && ch <= '9') || ch == '.' || ch == '-' || ch == '+' } return time.ParseDuration(s) } // InstanceID returns the UUID for this instance, and generates one if it // does not already exist. The UUID is stored in the local data directory, // regardless of storage configuration, since each instance is intended to // have its own unique ID. func InstanceID() (uuid.UUID, error) { appDataDir := AppDataDir() uuidFilePath := filepath.Join(appDataDir, "instance.uuid") uuidFileBytes, err := os.ReadFile(uuidFilePath) if errors.Is(err, fs.ErrNotExist) { uuid, err := uuid.NewRandom() if err != nil { return uuid, err } err = os.MkdirAll(appDataDir, 0o700) if err != nil { return uuid, err } err = os.WriteFile(uuidFilePath, []byte(uuid.String()), 0o600) return uuid, err } else if err != nil { return [16]byte{}, err } return uuid.ParseBytes(uuidFileBytes) } // CustomVersion is an optional string that overrides Caddy's // reported version. It can be helpful when downstream packagers // need to manually set Caddy's version. If no other version // information is available, the short form version (see // Version()) will be set to CustomVersion, and the full version // will include CustomVersion at the beginning. // // Set this variable during `go build` with `-ldflags`: // // -ldflags '-X github.com/caddyserver/caddy/v2.CustomVersion=v2.6.2' // // for example. var CustomVersion string // CustomBinaryName is an optional string that overrides the root // command name from the default of "caddy". This is useful for // downstream projects that embed Caddy but use a different binary // name. Shell completions and help text will use this name instead // of "caddy". // // Set this variable during `go build` with `-ldflags`: // // -ldflags '-X github.com/caddyserver/caddy/v2.CustomBinaryName=my_custom_caddy' // // for example. var CustomBinaryName string // CustomLongDescription is an optional string that overrides the // long description of the root Cobra command. This is useful for // downstream projects that embed Caddy but want different help // output. // // Set this variable in an init() function of a package that is // imported by your main: // // func init() { // caddy.CustomLongDescription = "My custom server based on Caddy..." // } // // for example. var CustomLongDescription string // Version returns the Caddy version in a simple/short form, and // a full version string. The short form will not have spaces and // is intended for User-Agent strings and similar, but may be // omitting valuable information. Note that Caddy must be compiled // in a special way to properly embed complete version information. // First this function tries to get the version from the embedded // build info provided by go.mod dependencies; then it tries to // get info from embedded VCS information, which requires having // built Caddy from a git repository. If no version is available, // this function returns "(devel)" because Go uses that, but for // the simple form we change it to "unknown". If still no version // is available (e.g. no VCS repo), then it will use CustomVersion; // CustomVersion is always prepended to the full version string. // // See relevant Go issues: https://github.com/golang/go/issues/29228 // and https://github.com/golang/go/issues/50603. // // This function is experimental and subject to change or removal. func Version() (simple, full string) { // the currently-recommended way to build Caddy involves // building it as a dependency so we can extract version // information from go.mod tooling; once the upstream // Go issues are fixed, we should just be able to use // bi.Main... hopefully. var module *debug.Module bi, ok := debug.ReadBuildInfo() if !ok { if CustomVersion != "" { full = CustomVersion simple = CustomVersion return simple, full } full = "unknown" simple = "unknown" return simple, full } // find the Caddy module in the dependency list for _, dep := range bi.Deps { if dep.Path == ImportPath { module = dep break } } if module != nil { simple, full = module.Version, module.Version if module.Sum != "" { full += " " + module.Sum } if module.Replace != nil { full += " => " + module.Replace.Path if module.Replace.Version != "" { simple = module.Replace.Version + "_custom" full += "@" + module.Replace.Version } if module.Replace.Sum != "" { full += " " + module.Replace.Sum } } } if full == "" { var vcsRevision string var vcsTime time.Time var vcsModified bool for _, setting := range bi.Settings { switch setting.Key { case "vcs.revision": vcsRevision = setting.Value case "vcs.time": vcsTime, _ = time.Parse(time.RFC3339, setting.Value) case "vcs.modified": vcsModified, _ = strconv.ParseBool(setting.Value) } } if vcsRevision != "" { var modified string if vcsModified { modified = "+modified" } full = fmt.Sprintf("%s%s (%s)", vcsRevision, modified, vcsTime.Format(time.RFC822)) simple = vcsRevision // use short checksum for simple, if hex-only if _, err := hex.DecodeString(simple); err == nil { simple = simple[:8] } // append date to simple since it can be convenient // to know the commit date as part of the version if !vcsTime.IsZero() { simple += "-" + vcsTime.Format("20060102") } } } if full == "" { if CustomVersion != "" { full = CustomVersion } else { full = "unknown" } } else if CustomVersion != "" { full = CustomVersion + " " + full } if simple == "" || simple == "(devel)" { if CustomVersion != "" { simple = CustomVersion } else { simple = "unknown" } } return simple, full } // Event represents something that has happened or is happening. // An Event value is not synchronized, so it should be copied if // being used in goroutines. // // EXPERIMENTAL: Events are subject to change. type Event struct { // If non-nil, the event has been aborted, meaning // propagation has stopped to other handlers and // the code should stop what it was doing. Emitters // may choose to use this as a signal to adjust their // code path appropriately. Aborted error // The data associated with the event. Usually the // original emitter will be the only one to set or // change these values, but the field is exported // so handlers can have full access if needed. // However, this map is not synchronized, so // handlers must not use this map directly in new // goroutines; instead, copy the map to use it in a // goroutine. Data may be nil. Data map[string]any id uuid.UUID ts time.Time name string origin Module } // NewEvent creates a new event, but does not emit the event. To emit an // event, call Emit() on the current instance of the caddyevents app instead. // // EXPERIMENTAL: Subject to change. func NewEvent(ctx Context, name string, data map[string]any) (Event, error) { id, err := uuid.NewRandom() if err != nil { return Event{}, fmt.Errorf("generating new event ID: %v", err) } name = strings.ToLower(name) return Event{ Data: data, id: id, ts: time.Now(), name: name, origin: ctx.Module(), }, nil } func (e Event) ID() uuid.UUID { return e.id } func (e Event) Timestamp() time.Time { return e.ts } func (e Event) Name() string { return e.name } func (e Event) Origin() Module { return e.origin } // Returns the module that originated the event. May be nil, usually if caddy core emits the event. // CloudEvent exports event e as a structure that, when // serialized as JSON, is compatible with the // CloudEvents spec. func (e Event) CloudEvent() CloudEvent { dataJSON, _ := json.Marshal(e.Data) var source string if e.Origin() == nil { source = "caddy" } else { source = string(e.Origin().CaddyModule().ID) } return CloudEvent{ ID: e.id.String(), Source: source, SpecVersion: "1.0", Type: e.name, Time: e.ts, DataContentType: "application/json", Data: dataJSON, } } // CloudEvent is a JSON-serializable structure that // is compatible with the CloudEvents specification. // See https://cloudevents.io. // EXPERIMENTAL: Subject to change. type CloudEvent struct { ID string `json:"id"` Source string `json:"source"` SpecVersion string `json:"specversion"` Type string `json:"type"` Time time.Time `json:"time"` DataContentType string `json:"datacontenttype,omitempty"` Data json.RawMessage `json:"data,omitempty"` } // ErrEventAborted cancels an event. var ErrEventAborted = errors.New("event aborted") // ActiveContext returns the currently-active context. // This function is experimental and might be changed // or removed in the future. func ActiveContext() Context { currentCtxMu.RLock() defer currentCtxMu.RUnlock() return currentCtx } // CtxKey is a value type for use with context.WithValue. type CtxKey string // This group of variables pertains to the current configuration. var ( // currentCtx is the root context for the currently-running // configuration, which can be accessed through this value. // If the Config contained in this value is not nil, then // a config is currently active/running. currentCtx Context currentCtxMu sync.RWMutex // rawCfg is the current, generic-decoded configuration; // we initialize it as a map with one field ("config") // to maintain parity with the API endpoint and to avoid // the special case of having to access/mutate the variable // directly without traversing into it. rawCfg = map[string]any{ rawConfigKey: nil, } // rawCfgJSON is the JSON-encoded form of rawCfg. Keeping // this around avoids an extra Marshal call during changes. rawCfgJSON []byte // rawCfgIndex is the map of user-assigned ID to expanded // path, for converting /id/ paths to /config/ paths. rawCfgIndex map[string]string // rawCfgMu protects all the rawCfg fields and also // essentially synchronizes config changes/reloads. rawCfgMu sync.RWMutex ) // lastConfigFile and lastConfigAdapter remember the source config // file and adapter used when Caddy was started via the CLI "run" command. // These are consulted by the SIGUSR1 handler to attempt reloading from // the same source. They are intentionally not set for other entrypoints // such as "caddy start" or subcommands like file-server. var ( lastConfigMu sync.RWMutex lastConfigFile string lastConfigAdapter string ) // reloadFromSourceFunc is the type of stored callback // which is called when we receive a SIGUSR1 signal. type reloadFromSourceFunc func(file, adapter string) error // reloadFromSourceCallback is the stored callback // which is called when we receive a SIGUSR1 signal. var reloadFromSourceCallback reloadFromSourceFunc // errReloadFromSourceUnavailable is returned when no reload-from-source callback is set. var errReloadFromSourceUnavailable = errors.New("reload from source unavailable in this process") //nolint:unused // SetLastConfig records the given source file and adapter as the // last-known external configuration source. Intended to be called // only when starting via "caddy run --config --adapter ". func SetLastConfig(file, adapter string, fn reloadFromSourceFunc) { lastConfigMu.Lock() lastConfigFile = file lastConfigAdapter = adapter reloadFromSourceCallback = fn lastConfigMu.Unlock() } // ClearLastConfigIfDifferent clears the recorded last-config if the provided // source file/adapter do not match the recorded last-config. If both srcFile // and srcAdapter are empty, the last-config is cleared. func ClearLastConfigIfDifferent(srcFile, srcAdapter string) { if (srcFile != "" || srcAdapter != "") && lastConfigMatches(srcFile, srcAdapter) { return } SetLastConfig("", "", nil) } // getLastConfig returns the last-known config file and adapter. func getLastConfig() (file, adapter string, fn reloadFromSourceFunc) { lastConfigMu.RLock() f, a, cb := lastConfigFile, lastConfigAdapter, reloadFromSourceCallback lastConfigMu.RUnlock() return f, a, cb } // lastConfigMatches returns true if the provided source file and/or adapter // matches the recorded last-config. Matching rules (in priority order): // 1. If srcAdapter is provided and differs from the recorded adapter, no match. // 2. If srcFile exactly equals the recorded file, match. // 3. If both sides can be made absolute and equal, match. // 4. If basenames are equal, match. func lastConfigMatches(srcFile, srcAdapter string) bool { lf, la, _ := getLastConfig() // If adapter is provided, it must match. if srcAdapter != "" && srcAdapter != la { return false } // Quick equality check. if srcFile == lf { return true } // Try absolute path comparison. sAbs, sErr := filepath.Abs(srcFile) lAbs, lErr := filepath.Abs(lf) if sErr == nil && lErr == nil && sAbs == lAbs { return true } // Final fallback: basename equality. if filepath.Base(srcFile) == filepath.Base(lf) { return true } return false } // errSameConfig is returned if the new config is the same // as the old one. This isn't usually an actual, actionable // error; it's mostly a sentinel value. var errSameConfig = errors.New("config is unchanged") // ImportPath is the package import path for Caddy core. // This identifier may be removed in the future. const ImportPath = "github.com/caddyserver/caddy/v2" ================================================ FILE: caddy_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "context" "testing" "time" ) func TestParseDuration(t *testing.T) { const day = 24 * time.Hour for i, tc := range []struct { input string expect time.Duration }{ { input: "3h", expect: 3 * time.Hour, }, { input: "1d", expect: day, }, { input: "1d30m", expect: day + 30*time.Minute, }, { input: "1m2d", expect: time.Minute + day*2, }, { input: "1m2d30s", expect: time.Minute + day*2 + 30*time.Second, }, { input: "1d2d", expect: 3 * day, }, { input: "1.5d", expect: time.Duration(1.5 * float64(day)), }, { input: "4m1.25d", expect: 4*time.Minute + time.Duration(1.25*float64(day)), }, { input: "-1.25d12h", expect: time.Duration(-1.25*float64(day)) - 12*time.Hour, }, } { actual, err := ParseDuration(tc.input) if err != nil { t.Errorf("Test %d ('%s'): Got error: %v", i, tc.input, err) continue } if actual != tc.expect { t.Errorf("Test %d ('%s'): Expected=%s Actual=%s", i, tc.input, tc.expect, actual) } } } func TestEvent_CloudEvent_NilOrigin(t *testing.T) { ctx, _ := NewContext(Context{Context: context.Background()}) // module will be nil by default event, err := NewEvent(ctx, "started", nil) if err != nil { t.Fatalf("NewEvent() error = %v", err) } // This should not panic ce := event.CloudEvent() if ce.Source != "caddy" { t.Errorf("Expected CloudEvent Source to be 'caddy', got '%s'", ce.Source) } if ce.Type != "started" { t.Errorf("Expected CloudEvent Type to be 'started', got '%s'", ce.Type) } } ================================================ FILE: caddyconfig/caddyfile/adapter.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyfile import ( "bytes" "encoding/json" "fmt" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" ) // Adapter adapts Caddyfile to Caddy JSON. type Adapter struct { ServerType ServerType } // Adapt converts the Caddyfile config in body to Caddy JSON. func (a Adapter) Adapt(body []byte, options map[string]any) ([]byte, []caddyconfig.Warning, error) { if a.ServerType == nil { return nil, nil, fmt.Errorf("no server type") } if options == nil { options = make(map[string]any) } filename, _ := options["filename"].(string) if filename == "" { filename = "Caddyfile" } serverBlocks, err := Parse(filename, body) if err != nil { return nil, nil, err } cfg, warnings, err := a.ServerType.Setup(serverBlocks, options) if err != nil { return nil, warnings, err } // lint check: see if input was properly formatted; sometimes messy files parse // successfully but result in logical errors (the Caddyfile is a bad format, I'm sorry) if warning, different := FormattingDifference(filename, body); different { warnings = append(warnings, warning) } result, err := json.Marshal(cfg) return result, warnings, err } // FormattingDifference returns a warning and true if the formatted version // is any different from the input; empty warning and false otherwise. // TODO: also perform this check on imported files func FormattingDifference(filename string, body []byte) (caddyconfig.Warning, bool) { // replace windows-style newlines to normalize comparison normalizedBody := bytes.ReplaceAll(body, []byte("\r\n"), []byte("\n")) formatted := Format(normalizedBody) if bytes.Equal(formatted, normalizedBody) { return caddyconfig.Warning{}, false } // find where the difference is line := 1 for i, ch := range normalizedBody { if i >= len(formatted) || ch != formatted[i] { break } if ch == '\n' { line++ } } return caddyconfig.Warning{ File: filename, Line: line, Message: "Caddyfile input is not formatted; run 'caddy fmt --overwrite' to fix inconsistencies", }, true } // Unmarshaler is a type that can unmarshal Caddyfile tokens to // set itself up for a JSON encoding. The goal of an unmarshaler // is not to set itself up for actual use, but to set itself up for // being marshaled into JSON. Caddyfile-unmarshaled values will not // be used directly; they will be encoded as JSON and then used from // that. Implementations _may_ be able to support multiple segments // (instances of their directive or batch of tokens); typically this // means wrapping parsing logic in a loop: `for d.Next() { ... }`. // More commonly, only a single segment is supported, so a simple // `d.Next()` at the start should be used to consume the module // identifier token (directive name, etc). type Unmarshaler interface { UnmarshalCaddyfile(d *Dispenser) error } // ServerType is a type that can evaluate a Caddyfile and set up a caddy config. type ServerType interface { // Setup takes the server blocks which contain tokens, // as well as options (e.g. CLI flags) and creates a // Caddy config, along with any warnings or an error. Setup([]ServerBlock, map[string]any) (*caddy.Config, []caddyconfig.Warning, error) } // UnmarshalModule instantiates a module with the given ID and invokes // UnmarshalCaddyfile on the new value using the immediate next segment // of d as input. In other words, d's next token should be the first // token of the module's Caddyfile input. // // This function is used when the next segment of Caddyfile tokens // belongs to another Caddy module. The returned value is often // type-asserted to the module's associated type for practical use // when setting up a config. func UnmarshalModule(d *Dispenser, moduleID string) (Unmarshaler, error) { mod, err := caddy.GetModule(moduleID) if err != nil { return nil, d.Errf("getting module named '%s': %v", moduleID, err) } inst := mod.New() unm, ok := inst.(Unmarshaler) if !ok { return nil, d.Errf("module %s is not a Caddyfile unmarshaler; is %T", mod.ID, inst) } err = unm.UnmarshalCaddyfile(d.NewFromNextSegment()) if err != nil { return nil, err } return unm, nil } // Interface guard var _ caddyconfig.Adapter = (*Adapter)(nil) ================================================ FILE: caddyconfig/caddyfile/dispenser.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyfile import ( "errors" "fmt" "io" "log" "strconv" "strings" ) // Dispenser is a type that dispenses tokens, similarly to a lexer, // except that it can do so with some notion of structure. An empty // Dispenser is invalid; call NewDispenser to make a proper instance. type Dispenser struct { tokens []Token cursor int nesting int // A map of arbitrary context data that can be used // to pass through some information to unmarshalers. context map[string]any } // NewDispenser returns a Dispenser filled with the given tokens. func NewDispenser(tokens []Token) *Dispenser { return &Dispenser{ tokens: tokens, cursor: -1, } } // NewTestDispenser parses input into tokens and creates a new // Dispenser for test purposes only; any errors are fatal. func NewTestDispenser(input string) *Dispenser { tokens, err := allTokens("Testfile", []byte(input)) if err != nil && err != io.EOF { log.Fatalf("getting all tokens from input: %v", err) } return NewDispenser(tokens) } // Next loads the next token. Returns true if a token // was loaded; false otherwise. If false, all tokens // have been consumed. func (d *Dispenser) Next() bool { if d.cursor < len(d.tokens)-1 { d.cursor++ return true } return false } // Prev moves to the previous token. It does the inverse // of Next(), except this function may decrement the cursor // to -1 so that the next call to Next() points to the // first token; this allows dispensing to "start over". This // method returns true if the cursor ends up pointing to a // valid token. func (d *Dispenser) Prev() bool { if d.cursor > -1 { d.cursor-- return d.cursor > -1 } return false } // NextArg loads the next token if it is on the same // line and if it is not a block opening (open curly // brace). Returns true if an argument token was // loaded; false otherwise. If false, all tokens on // the line have been consumed except for potentially // a block opening. It handles imported tokens // correctly. func (d *Dispenser) NextArg() bool { if !d.nextOnSameLine() { return false } if d.Val() == "{" { // roll back; a block opening is not an argument d.cursor-- return false } return true } // nextOnSameLine advances the cursor if the next // token is on the same line of the same file. func (d *Dispenser) nextOnSameLine() bool { if d.cursor < 0 { d.cursor++ return true } if d.cursor >= len(d.tokens)-1 { return false } curr := d.tokens[d.cursor] next := d.tokens[d.cursor+1] if !isNextOnNewLine(curr, next) { d.cursor++ return true } return false } // NextLine loads the next token only if it is not on the same // line as the current token, and returns true if a token was // loaded; false otherwise. If false, there is not another token // or it is on the same line. It handles imported tokens correctly. func (d *Dispenser) NextLine() bool { if d.cursor < 0 { d.cursor++ return true } if d.cursor >= len(d.tokens)-1 { return false } curr := d.tokens[d.cursor] next := d.tokens[d.cursor+1] if isNextOnNewLine(curr, next) { d.cursor++ return true } return false } // NextBlock can be used as the condition of a for loop // to load the next token as long as it opens a block or // is already in a block nested more than initialNestingLevel. // In other words, a loop over NextBlock() will iterate // all tokens in the block assuming the next token is an // open curly brace, until the matching closing brace. // The open and closing brace tokens for the outer-most // block will be consumed internally and omitted from // the iteration. // // Proper use of this method looks like this: // // for nesting := d.Nesting(); d.NextBlock(nesting); { // } // // However, in simple cases where it is known that the // Dispenser is new and has not already traversed state // by a loop over NextBlock(), this will do: // // for d.NextBlock(0) { // } // // As with other token parsing logic, a loop over // NextBlock() should be contained within a loop over // Next(), as it is usually prudent to skip the initial // token. func (d *Dispenser) NextBlock(initialNestingLevel int) bool { if d.nesting > initialNestingLevel { if !d.Next() { return false // should be EOF error } if d.Val() == "}" && !d.nextOnSameLine() { d.nesting-- } else if d.Val() == "{" && !d.nextOnSameLine() { d.nesting++ } return d.nesting > initialNestingLevel } if !d.nextOnSameLine() { // block must open on same line return false } if d.Val() != "{" { d.cursor-- // roll back if not opening brace return false } d.Next() // consume open curly brace if d.Val() == "}" { return false // open and then closed right away } d.nesting++ return true } // Nesting returns the current nesting level. Necessary // if using NextBlock() func (d *Dispenser) Nesting() int { return d.nesting } // Val gets the text of the current token. If there is no token // loaded, it returns empty string. func (d *Dispenser) Val() string { if d.cursor < 0 || d.cursor >= len(d.tokens) { return "" } return d.tokens[d.cursor].Text } // ValRaw gets the raw text of the current token (including quotes). // If the token was a heredoc, then the delimiter is not included, // because that is not relevant to any unmarshaling logic at this time. // If there is no token loaded, it returns empty string. func (d *Dispenser) ValRaw() string { if d.cursor < 0 || d.cursor >= len(d.tokens) { return "" } quote := d.tokens[d.cursor].wasQuoted if quote > 0 && quote != '<' { // string literal return string(quote) + d.tokens[d.cursor].Text + string(quote) } return d.tokens[d.cursor].Text } // ScalarVal gets value of the current token, converted to the closest // scalar type. If there is no token loaded, it returns nil. func (d *Dispenser) ScalarVal() any { if d.cursor < 0 || d.cursor >= len(d.tokens) { return nil } quote := d.tokens[d.cursor].wasQuoted text := d.tokens[d.cursor].Text if quote > 0 { return text // string literal } if num, err := strconv.Atoi(text); err == nil { return num } if num, err := strconv.ParseFloat(text, 64); err == nil { return num } if bool, err := strconv.ParseBool(text); err == nil { return bool } return text } // Line gets the line number of the current token. // If there is no token loaded, it returns 0. func (d *Dispenser) Line() int { if d.cursor < 0 || d.cursor >= len(d.tokens) { return 0 } return d.tokens[d.cursor].Line } // File gets the filename where the current token originated. func (d *Dispenser) File() string { if d.cursor < 0 || d.cursor >= len(d.tokens) { return "" } return d.tokens[d.cursor].File } // Args is a convenience function that loads the next arguments // (tokens on the same line) into an arbitrary number of strings // pointed to in targets. If there are not enough argument tokens // available to fill targets, false is returned and the remaining // targets are left unchanged. If all the targets are filled, // then true is returned. func (d *Dispenser) Args(targets ...*string) bool { for i := range targets { if !d.NextArg() { return false } *targets[i] = d.Val() } return true } // AllArgs is like Args, but if there are more argument tokens // available than there are targets, false is returned. The // number of available argument tokens must match the number of // targets exactly to return true. func (d *Dispenser) AllArgs(targets ...*string) bool { if !d.Args(targets...) { return false } if d.NextArg() { d.Prev() return false } return true } // CountRemainingArgs counts the amount of remaining arguments // (tokens on the same line) without consuming the tokens. func (d *Dispenser) CountRemainingArgs() int { count := 0 for d.NextArg() { count++ } for i := 0; i < count; i++ { d.Prev() } return count } // RemainingArgs loads any more arguments (tokens on the same line) // into a slice of strings and returns them. Open curly brace tokens // also indicate the end of arguments, and the curly brace is not // included in the return value nor is it loaded. func (d *Dispenser) RemainingArgs() []string { var args []string for d.NextArg() { args = append(args, d.Val()) } return args } // RemainingArgsRaw loads any more arguments (tokens on the same line, // retaining quotes) into a slice of strings and returns them. // Open curly brace tokens also indicate the end of arguments, // and the curly brace is not included in the return value nor is it loaded. func (d *Dispenser) RemainingArgsRaw() []string { var args []string for d.NextArg() { args = append(args, d.ValRaw()) } return args } // RemainingArgsAsTokens loads any more arguments (tokens on the same line) // into a slice of Token-structs and returns them. Open curly brace tokens // also indicate the end of arguments, and the curly brace is not included // in the return value nor is it loaded. func (d *Dispenser) RemainingArgsAsTokens() []Token { var args []Token for d.NextArg() { args = append(args, d.Token()) } return args } // NewFromNextSegment returns a new dispenser with a copy of // the tokens from the current token until the end of the // "directive" whether that be to the end of the line or // the end of a block that starts at the end of the line; // in other words, until the end of the segment. func (d *Dispenser) NewFromNextSegment() *Dispenser { return NewDispenser(d.NextSegment()) } // NextSegment returns a copy of the tokens from the current // token until the end of the line or block that starts at // the end of the line. func (d *Dispenser) NextSegment() Segment { tkns := Segment{d.Token()} for d.NextArg() { tkns = append(tkns, d.Token()) } var openedBlock bool for nesting := d.Nesting(); d.NextBlock(nesting); { if !openedBlock { // because NextBlock() consumes the initial open // curly brace, we rewind here to append it, since // our case is special in that we want the new // dispenser to have all the tokens including // surrounding curly braces d.Prev() tkns = append(tkns, d.Token()) d.Next() openedBlock = true } tkns = append(tkns, d.Token()) } if openedBlock { // include closing brace tkns = append(tkns, d.Token()) // do not consume the closing curly brace; the // next iteration of the enclosing loop will // call Next() and consume it } return tkns } // Token returns the current token. func (d *Dispenser) Token() Token { if d.cursor < 0 || d.cursor >= len(d.tokens) { return Token{} } return d.tokens[d.cursor] } // Reset sets d's cursor to the beginning, as // if this was a new and unused dispenser. func (d *Dispenser) Reset() { d.cursor = -1 d.nesting = 0 } // ArgErr returns an argument error, meaning that another // argument was expected but not found. In other words, // a line break or open curly brace was encountered instead of // an argument. func (d *Dispenser) ArgErr() error { if d.Val() == "{" { return d.Err("unexpected token '{', expecting argument") } return d.Errf("wrong argument count or unexpected line ending after '%s'", d.Val()) } // SyntaxErr creates a generic syntax error which explains what was // found and what was expected. func (d *Dispenser) SyntaxErr(expected string) error { msg := fmt.Sprintf("syntax error: unexpected token '%s', expecting '%s', at %s:%d import chain: ['%s']", d.Val(), expected, d.File(), d.Line(), strings.Join(d.Token().imports, "','")) return errors.New(msg) } // EOFErr returns an error indicating that the dispenser reached // the end of the input when searching for the next token. func (d *Dispenser) EOFErr() error { return d.Errf("unexpected EOF") } // Err generates a custom parse-time error with a message of msg. func (d *Dispenser) Err(msg string) error { return d.WrapErr(errors.New(msg)) } // Errf is like Err, but for formatted error messages func (d *Dispenser) Errf(format string, args ...any) error { return d.WrapErr(fmt.Errorf(format, args...)) } // WrapErr takes an existing error and adds the Caddyfile file and line number. func (d *Dispenser) WrapErr(err error) error { if len(d.Token().imports) > 0 { return fmt.Errorf("%w, at %s:%d import chain ['%s']", err, d.File(), d.Line(), strings.Join(d.Token().imports, "','")) } return fmt.Errorf("%w, at %s:%d", err, d.File(), d.Line()) } // Delete deletes the current token and returns the updated slice // of tokens. The cursor is not advanced to the next token. // Because deletion modifies the underlying slice, this method // should only be called if you have access to the original slice // of tokens and/or are using the slice of tokens outside this // Dispenser instance. If you do not re-assign the slice with the // return value of this method, inconsistencies in the token // array will become apparent (or worse, hide from you like they // did me for 3 and a half freaking hours late one night). func (d *Dispenser) Delete() []Token { if d.cursor >= 0 && d.cursor <= len(d.tokens)-1 { d.tokens = append(d.tokens[:d.cursor], d.tokens[d.cursor+1:]...) d.cursor-- } return d.tokens } // DeleteN is the same as Delete, but can delete many tokens at once. // If there aren't N tokens available to delete, none are deleted. func (d *Dispenser) DeleteN(amount int) []Token { if amount > 0 && d.cursor >= (amount-1) && d.cursor <= len(d.tokens)-1 { d.tokens = append(d.tokens[:d.cursor-(amount-1)], d.tokens[d.cursor+1:]...) d.cursor -= amount } return d.tokens } // SetContext sets a key-value pair in the context map. func (d *Dispenser) SetContext(key string, value any) { if d.context == nil { d.context = make(map[string]any) } d.context[key] = value } // GetContext gets the value of a key in the context map. func (d *Dispenser) GetContext(key string) any { if d.context == nil { return nil } return d.context[key] } // GetContextString gets the value of a key in the context map // as a string, or an empty string if the key does not exist. func (d *Dispenser) GetContextString(key string) string { if d.context == nil { return "" } if val, ok := d.context[key].(string); ok { return val } return "" } // isNewLine determines whether the current token is on a different // line (higher line number) than the previous token. It handles imported // tokens correctly. If there isn't a previous token, it returns true. func (d *Dispenser) isNewLine() bool { if d.cursor < 1 { return true } if d.cursor > len(d.tokens)-1 { return false } prev := d.tokens[d.cursor-1] curr := d.tokens[d.cursor] return isNextOnNewLine(prev, curr) } // isNextOnNewLine determines whether the current token is on a different // line (higher line number) than the next token. It handles imported // tokens correctly. If there isn't a next token, it returns true. func (d *Dispenser) isNextOnNewLine() bool { if d.cursor < 0 { return false } if d.cursor >= len(d.tokens)-1 { return true } curr := d.tokens[d.cursor] next := d.tokens[d.cursor+1] return isNextOnNewLine(curr, next) } const MatcherNameCtxKey = "matcher_name" ================================================ FILE: caddyconfig/caddyfile/dispenser_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyfile import ( "errors" "reflect" "strings" "testing" ) func TestDispenser_Val_Next(t *testing.T) { input := `host:port dir1 arg1 dir2 arg2 arg3 dir3` d := NewTestDispenser(input) if val := d.Val(); val != "" { t.Fatalf("Val(): Should return empty string when no token loaded; got '%s'", val) } assertNext := func(shouldLoad bool, expectedCursor int, expectedVal string) { if loaded := d.Next(); loaded != shouldLoad { t.Errorf("Next(): Expected %v but got %v instead (val '%s')", shouldLoad, loaded, d.Val()) } if d.cursor != expectedCursor { t.Errorf("Expected cursor to be %d, but was %d", expectedCursor, d.cursor) } if d.nesting != 0 { t.Errorf("Nesting should be 0, was %d instead", d.nesting) } if val := d.Val(); val != expectedVal { t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val) } } assertNext(true, 0, "host:port") assertNext(true, 1, "dir1") assertNext(true, 2, "arg1") assertNext(true, 3, "dir2") assertNext(true, 4, "arg2") assertNext(true, 5, "arg3") assertNext(true, 6, "dir3") // Note: This next test simply asserts existing behavior. // If desired, we may wish to empty the token value after // reading past the EOF. Open an issue if you want this change. assertNext(false, 6, "dir3") } func TestDispenser_NextArg(t *testing.T) { input := `dir1 arg1 dir2 arg2 arg3 dir3` d := NewTestDispenser(input) assertNext := func(shouldLoad bool, expectedVal string, expectedCursor int) { if d.Next() != shouldLoad { t.Errorf("Next(): Should load token but got false instead (val: '%s')", d.Val()) } if d.cursor != expectedCursor { t.Errorf("Next(): Expected cursor to be at %d, but it was %d", expectedCursor, d.cursor) } if val := d.Val(); val != expectedVal { t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val) } } assertNextArg := func(expectedVal string, loadAnother bool, expectedCursor int) { if !d.NextArg() { t.Error("NextArg(): Should load next argument but got false instead") } if d.cursor != expectedCursor { t.Errorf("NextArg(): Expected cursor to be at %d, but it was %d", expectedCursor, d.cursor) } if val := d.Val(); val != expectedVal { t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val) } if !loadAnother { if d.NextArg() { t.Fatalf("NextArg(): Should NOT load another argument, but got true instead (val: '%s')", d.Val()) } if d.cursor != expectedCursor { t.Errorf("NextArg(): Expected cursor to remain at %d, but it was %d", expectedCursor, d.cursor) } } } assertNext(true, "dir1", 0) assertNextArg("arg1", false, 1) assertNext(true, "dir2", 2) assertNextArg("arg2", true, 3) assertNextArg("arg3", false, 4) assertNext(true, "dir3", 5) assertNext(false, "dir3", 5) } func TestDispenser_NextLine(t *testing.T) { input := `host:port dir1 arg1 dir2 arg2 arg3` d := NewTestDispenser(input) assertNextLine := func(shouldLoad bool, expectedVal string, expectedCursor int) { if d.NextLine() != shouldLoad { t.Errorf("NextLine(): Should load token but got false instead (val: '%s')", d.Val()) } if d.cursor != expectedCursor { t.Errorf("NextLine(): Expected cursor to be %d, instead was %d", expectedCursor, d.cursor) } if val := d.Val(); val != expectedVal { t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val) } } assertNextLine(true, "host:port", 0) assertNextLine(true, "dir1", 1) assertNextLine(false, "dir1", 1) d.Next() // arg1 assertNextLine(true, "dir2", 3) assertNextLine(false, "dir2", 3) d.Next() // arg2 assertNextLine(false, "arg2", 4) d.Next() // arg3 assertNextLine(false, "arg3", 5) } func TestDispenser_NextBlock(t *testing.T) { input := `foobar1 { sub1 arg1 sub2 } foobar2 { }` d := NewTestDispenser(input) assertNextBlock := func(shouldLoad bool, expectedCursor, expectedNesting int) { if loaded := d.NextBlock(0); loaded != shouldLoad { t.Errorf("NextBlock(): Should return %v but got %v", shouldLoad, loaded) } if d.cursor != expectedCursor { t.Errorf("NextBlock(): Expected cursor to be %d, was %d", expectedCursor, d.cursor) } if d.nesting != expectedNesting { t.Errorf("NextBlock(): Nesting should be %d, not %d", expectedNesting, d.nesting) } } assertNextBlock(false, -1, 0) d.Next() // foobar1 assertNextBlock(true, 2, 1) assertNextBlock(true, 3, 1) assertNextBlock(true, 4, 1) assertNextBlock(false, 5, 0) d.Next() // foobar2 assertNextBlock(false, 8, 0) // empty block is as if it didn't exist } func TestDispenser_Args(t *testing.T) { var s1, s2, s3 string input := `dir1 arg1 arg2 arg3 dir2 arg4 arg5 dir3 arg6 arg7 dir4` d := NewTestDispenser(input) d.Next() // dir1 // As many strings as arguments if all := d.Args(&s1, &s2, &s3); !all { t.Error("Args(): Expected true, got false") } if s1 != "arg1" { t.Errorf("Args(): Expected s1 to be 'arg1', got '%s'", s1) } if s2 != "arg2" { t.Errorf("Args(): Expected s2 to be 'arg2', got '%s'", s2) } if s3 != "arg3" { t.Errorf("Args(): Expected s3 to be 'arg3', got '%s'", s3) } d.Next() // dir2 // More strings than arguments if all := d.Args(&s1, &s2, &s3); all { t.Error("Args(): Expected false, got true") } if s1 != "arg4" { t.Errorf("Args(): Expected s1 to be 'arg4', got '%s'", s1) } if s2 != "arg5" { t.Errorf("Args(): Expected s2 to be 'arg5', got '%s'", s2) } if s3 != "arg3" { t.Errorf("Args(): Expected s3 to be unchanged ('arg3'), instead got '%s'", s3) } // (quick cursor check just for kicks and giggles) if d.cursor != 6 { t.Errorf("Cursor should be 6, but is %d", d.cursor) } d.Next() // dir3 // More arguments than strings if all := d.Args(&s1); !all { t.Error("Args(): Expected true, got false") } if s1 != "arg6" { t.Errorf("Args(): Expected s1 to be 'arg6', got '%s'", s1) } d.Next() // dir4 // No arguments or strings if all := d.Args(); !all { t.Error("Args(): Expected true, got false") } // No arguments but at least one string if all := d.Args(&s1); all { t.Error("Args(): Expected false, got true") } } func TestDispenser_RemainingArgs(t *testing.T) { input := `dir1 arg1 arg2 arg3 dir2 arg4 arg5 dir3 arg6 { arg7 dir4` d := NewTestDispenser(input) d.Next() // dir1 args := d.RemainingArgs() if expected := []string{"arg1", "arg2", "arg3"}; !reflect.DeepEqual(args, expected) { t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args) } d.Next() // dir2 args = d.RemainingArgs() if expected := []string{"arg4", "arg5"}; !reflect.DeepEqual(args, expected) { t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args) } d.Next() // dir3 args = d.RemainingArgs() if expected := []string{"arg6"}; !reflect.DeepEqual(args, expected) { t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args) } d.Next() // { d.Next() // arg7 d.Next() // dir4 args = d.RemainingArgs() if len(args) != 0 { t.Errorf("RemainingArgs(): Expected %v, got %v", []string{}, args) } } func TestDispenser_RemainingArgsAsTokens(t *testing.T) { input := `dir1 arg1 arg2 arg3 dir2 arg4 arg5 dir3 arg6 { arg7 dir4` d := NewTestDispenser(input) d.Next() // dir1 args := d.RemainingArgsAsTokens() tokenTexts := make([]string, 0, len(args)) for _, arg := range args { tokenTexts = append(tokenTexts, arg.Text) } if expected := []string{"arg1", "arg2", "arg3"}; !reflect.DeepEqual(tokenTexts, expected) { t.Errorf("RemainingArgsAsTokens(): Expected %v, got %v", expected, tokenTexts) } d.Next() // dir2 args = d.RemainingArgsAsTokens() tokenTexts = tokenTexts[:0] for _, arg := range args { tokenTexts = append(tokenTexts, arg.Text) } if expected := []string{"arg4", "arg5"}; !reflect.DeepEqual(tokenTexts, expected) { t.Errorf("RemainingArgsAsTokens(): Expected %v, got %v", expected, tokenTexts) } d.Next() // dir3 args = d.RemainingArgsAsTokens() tokenTexts = tokenTexts[:0] for _, arg := range args { tokenTexts = append(tokenTexts, arg.Text) } if expected := []string{"arg6"}; !reflect.DeepEqual(tokenTexts, expected) { t.Errorf("RemainingArgsAsTokens(): Expected %v, got %v", expected, tokenTexts) } d.Next() // { d.Next() // arg7 d.Next() // dir4 args = d.RemainingArgsAsTokens() tokenTexts = tokenTexts[:0] for _, arg := range args { tokenTexts = append(tokenTexts, arg.Text) } if len(args) != 0 { t.Errorf("RemainingArgsAsTokens(): Expected %v, got %v", []string{}, tokenTexts) } } func TestDispenser_ArgErr_Err(t *testing.T) { input := `dir1 { } dir2 arg1 arg2` d := NewTestDispenser(input) d.cursor = 1 // { if err := d.ArgErr(); err == nil || !strings.Contains(err.Error(), "{") { t.Errorf("ArgErr(): Expected an error message with { in it, but got '%v'", err) } d.cursor = 5 // arg2 if err := d.ArgErr(); err == nil || !strings.Contains(err.Error(), "arg2") { t.Errorf("ArgErr(): Expected an error message with 'arg2' in it; got '%v'", err) } err := d.Err("foobar") if err == nil { t.Fatalf("Err(): Expected an error, got nil") } if !strings.Contains(err.Error(), "Testfile:3") { t.Errorf("Expected error message with filename:line in it; got '%v'", err) } if !strings.Contains(err.Error(), "foobar") { t.Errorf("Expected error message with custom message in it ('foobar'); got '%v'", err) } ErrBarIsFull := errors.New("bar is full") bookingError := d.Errf("unable to reserve: %w", ErrBarIsFull) if !errors.Is(bookingError, ErrBarIsFull) { t.Errorf("Errf(): should be able to unwrap the error chain") } } ================================================ FILE: caddyconfig/caddyfile/formatter.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyfile import ( "bytes" "io" "slices" "strings" "unicode" ) // Format formats the input Caddyfile to a standard, nice-looking // appearance. It works by reading each rune of the input and taking // control over all the bracing and whitespace that is written; otherwise, // words, comments, placeholders, and escaped characters are all treated // literally and written as they appear in the input. func Format(input []byte) []byte { input = bytes.TrimSpace(input) out := new(bytes.Buffer) rdr := bytes.NewReader(input) type heredocState int const ( heredocClosed heredocState = 0 heredocOpening heredocState = 1 heredocOpened heredocState = 2 ) var ( last rune // the last character that was written to the result space = true // whether current/previous character was whitespace (beginning of input counts as space) beginningOfLine = true // whether we are at beginning of line openBrace bool // whether current word/token is or started with open curly brace openBraceWritten bool // if openBrace, whether that brace was written or not openBraceSpace bool // whether there was a non-newline space before open brace newLines int // count of newlines consumed comment bool // whether we're in a comment quotes string // encountered quotes ('', '`', '"', '"`', '`"') escaped bool // whether current char is escaped heredoc heredocState // whether we're in a heredoc heredocEscaped bool // whether heredoc is escaped heredocMarker []rune heredocClosingMarker []rune nesting int // indentation level ) write := func(ch rune) { out.WriteRune(ch) last = ch } indent := func() { for tabs := nesting; tabs > 0; tabs-- { write('\t') } } nextLine := func() { write('\n') beginningOfLine = true } for { ch, _, err := rdr.ReadRune() if err != nil { if err == io.EOF { break } panic(err) } // detect whether we have the start of a heredoc if quotes == "" && (heredoc == heredocClosed && !heredocEscaped) && space && last == '<' && ch == '<' { write(ch) heredoc = heredocOpening space = false continue } if heredoc == heredocOpening { if ch == '\n' { if len(heredocMarker) > 0 && heredocMarkerRegexp.MatchString(string(heredocMarker)) { heredoc = heredocOpened } else { heredocMarker = nil heredoc = heredocClosed nextLine() continue } write(ch) continue } if unicode.IsSpace(ch) { // a space means it's just a regular token and not a heredoc heredocMarker = nil heredoc = heredocClosed } else { heredocMarker = append(heredocMarker, ch) write(ch) continue } } // if we're in a heredoc, all characters are read&write as-is if heredoc == heredocOpened { heredocClosingMarker = append(heredocClosingMarker, ch) if len(heredocClosingMarker) > len(heredocMarker)+1 { // We assert that the heredocClosingMarker is followed by a unicode.Space heredocClosingMarker = heredocClosingMarker[1:] } // check if we're done if unicode.IsSpace(ch) && slices.Equal(heredocClosingMarker[:len(heredocClosingMarker)-1], heredocMarker) { heredocMarker = nil heredocClosingMarker = nil heredoc = heredocClosed } else { write(ch) if ch == '\n' { heredocClosingMarker = heredocClosingMarker[:0] } continue } } if last == '<' && space { space = false } if comment { if ch == '\n' { comment = false space = true nextLine() continue } else { write(ch) continue } } if !escaped && ch == '\\' { if space { write(' ') space = false } write(ch) escaped = true continue } if escaped { if ch == '<' { heredocEscaped = true } write(ch) escaped = false continue } if ch == '`' { switch quotes { case "\"`": quotes = "\"" case "`": quotes = "" case "\"": quotes = "\"`" default: quotes = "`" } } if quotes == "\"" { if ch == '"' { quotes = "" } write(ch) continue } if ch == '"' { switch quotes { case "": if space { quotes = "\"" } case "`\"": quotes = "`" case "\"`": quotes = "" } } if strings.Contains(quotes, "`") { if ch == '`' && space && !beginningOfLine { write(' ') } write(ch) space = false continue } if unicode.IsSpace(ch) { space = true heredocEscaped = false if ch == '\n' { newLines++ } continue } spacePrior := space space = false ////////////////////////////////////////////////////////// // I find it helpful to think of the formatting loop in two // main sections; by the time we reach this point, we // know we are in a "regular" part of the file: we know // the character is not a space, not in a literal segment // like a comment or quoted, it's not escaped, etc. ////////////////////////////////////////////////////////// if ch == '#' { comment = true } if openBrace && spacePrior && !openBraceWritten { if nesting == 0 && last == '}' { nextLine() nextLine() } openBrace = false if beginningOfLine { indent() } else if !openBraceSpace || !unicode.IsSpace(last) { write(' ') } write('{') openBraceWritten = true nextLine() newLines = 0 // prevent infinite nesting from ridiculous inputs (issue #4169) if nesting < 10 { nesting++ } } switch { case ch == '{': openBrace = true openBraceSpace = spacePrior && !beginningOfLine if openBraceSpace && newLines == 0 { write(' ') } openBraceWritten = false if quotes == "`" { write('{') openBraceWritten = true continue } continue case ch == '}' && (spacePrior || !openBrace): if quotes == "`" { write('}') continue } if last != '\n' { nextLine() } if nesting > 0 { nesting-- } indent() write('}') newLines = 0 continue } if newLines > 2 { newLines = 2 } for i := 0; i < newLines; i++ { nextLine() } newLines = 0 if beginningOfLine { indent() } if nesting == 0 && last == '}' && beginningOfLine { nextLine() nextLine() } if !beginningOfLine && spacePrior { write(' ') } if openBrace && !openBraceWritten { write('{') openBraceWritten = true } if spacePrior && ch == '<' { space = true } write(ch) beginningOfLine = false } // the Caddyfile does not need any leading or trailing spaces, but... trimmedResult := bytes.TrimSpace(out.Bytes()) // ...Caddyfiles should, however, end with a newline because // newlines are significant to the syntax of the file return append(trimmedResult, '\n') } ================================================ FILE: caddyconfig/caddyfile/formatter_fuzz.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build gofuzz package caddyfile import "bytes" func FuzzFormat(input []byte) int { formatted := Format(input) if bytes.Equal(formatted, Format(formatted)) { return 1 } return 0 } ================================================ FILE: caddyconfig/caddyfile/formatter_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyfile import ( "strings" "testing" ) func TestFormatter(t *testing.T) { for i, tc := range []struct { description string input string expect string }{ { description: "very simple", input: `abc def g hi jkl mn`, expect: `abc def g hi jkl mn`, }, { description: "basic indentation, line breaks, and nesting", input: ` a b c { d } e { f } g { h { i } } j { k { l } } m { n { o } p { q r s } } { { t u v w } }`, expect: `a b c { d } e { f } g { h { i } } j { k { l } } m { n { o } p { q r s } } { { t u v w } }`, }, { description: "block spacing", input: `a{ b } c{ d }`, expect: `a { b } c { d }`, }, { description: "advanced spacing", input: `abc { def }ghi{ jkl mno pqr}`, expect: `abc { def } ghi { jkl mno pqr }`, }, { description: "env var placeholders", input: `{$A} b { {$C} } d { {$E} } { {$F} } `, expect: `{$A} b { {$C} } d { {$E} } { {$F} }`, }, { description: "env var placeholders with port", input: `:{$PORT}`, expect: `:{$PORT}`, }, { description: "comments", input: `#a "\n" #b { c } d { e#f # g } h { # i }`, expect: `#a "\n" #b { c } d { e#f # g } h { # i }`, }, { description: "quotes and escaping", input: `"a \"b\" "#c d e { "f" } g { "h" } i { "foo bar" } j { "\"k\" l m" }`, expect: `"a \"b\" "#c d e { "f" } g { "h" } i { "foo bar" } j { "\"k\" l m" }`, }, { description: "bad nesting (too many open)", input: `a { { }`, expect: `a { { } `, }, { description: "bad nesting (too many close)", input: `a { { }}}`, expect: `a { { } } } `, }, { description: "json", input: `foo bar "{\"key\":34}" `, expect: `foo bar "{\"key\":34}"`, }, { description: "escaping after spaces", input: `foo \"literal\"`, expect: `foo \"literal\"`, }, { description: "simple placeholders as standalone tokens", input: `foo {bar}`, expect: `foo {bar}`, }, { description: "simple placeholders within tokens", input: `foo{bar} foo{bar}baz`, expect: `foo{bar} foo{bar}baz`, }, { description: "placeholders and malformed braces", input: `foo{bar} foo{ bar}baz`, expect: `foo{bar} foo { bar } baz`, }, { description: "hash within string is not a comment", input: `redir / /some/#/path`, expect: `redir / /some/#/path`, }, { description: "brace does not fold into comment above", input: `# comment { foo }`, expect: `# comment { foo }`, }, { description: "matthewpi/vscode-caddyfile-support#13", input: `{ email {$ACMEEMAIL} #debug } block { } `, expect: `{ email {$ACMEEMAIL} #debug } block { } `, }, { description: "matthewpi/vscode-caddyfile-support#13 - bad formatting", input: `{ email {$ACMEEMAIL} #debug } block { } `, expect: `{ email {$ACMEEMAIL} #debug } block { } `, }, { description: "keep heredoc as-is", input: `block { heredoc < endIndex || endIndex > argCount { caddy.Log().Named("caddyfile").Warn( "Variadic placeholder "+token.Text+" indices are out of bounds, only "+strconv.Itoa(argCount)+" argument(s) exist", zap.String("file", token.File+":"+strconv.Itoa(token.Line)), zap.Strings("import_chain", token.imports)) return false, 0, 0 } return true, startIndex, endIndex } // makeArgsReplacer prepares a Replacer which can replace // non-variadic args placeholders in imported tokens. func makeArgsReplacer(args []string) *caddy.Replacer { repl := caddy.NewEmptyReplacer() repl.Map(func(key string) (any, bool) { // TODO: Remove the deprecated {args.*} placeholder // support at some point in the future if matches := argsRegexpIndexDeprecated.FindStringSubmatch(key); len(matches) > 0 { // What's matched may be a substring of the key if matches[0] != key { return nil, false } value, err := strconv.Atoi(matches[1]) if err != nil { caddy.Log().Named("caddyfile").Warn( "Placeholder {args." + matches[1] + "} has an invalid index") return nil, false } if value >= len(args) { caddy.Log().Named("caddyfile").Warn( "Placeholder {args." + matches[1] + "} index is out of bounds, only " + strconv.Itoa(len(args)) + " argument(s) exist") return nil, false } caddy.Log().Named("caddyfile").Warn( "Placeholder {args." + matches[1] + "} deprecated, use {args[" + matches[1] + "]} instead") return args[value], true } // Handle args[*] form if matches := argsRegexpIndex.FindStringSubmatch(key); len(matches) > 0 { // What's matched may be a substring of the key if matches[0] != key { return nil, false } if strings.Contains(matches[1], ":") { caddy.Log().Named("caddyfile").Warn( "Variadic placeholder {args[" + matches[1] + "]} must be a token on its own") return nil, false } value, err := strconv.Atoi(matches[1]) if err != nil { caddy.Log().Named("caddyfile").Warn( "Placeholder {args[" + matches[1] + "]} has an invalid index") return nil, false } if value >= len(args) { caddy.Log().Named("caddyfile").Warn( "Placeholder {args[" + matches[1] + "]} index is out of bounds, only " + strconv.Itoa(len(args)) + " argument(s) exist") return nil, false } return args[value], true } // Not an args placeholder, ignore return nil, false }) return repl } var ( argsRegexpIndexDeprecated = regexp.MustCompile(`args\.(.+)`) argsRegexpIndex = regexp.MustCompile(`args\[(.+)]`) ) ================================================ FILE: caddyconfig/caddyfile/importgraph.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyfile import ( "fmt" "slices" ) type adjacency map[string][]string type importGraph struct { nodes map[string]struct{} edges adjacency } func (i *importGraph) addNode(name string) { if i.nodes == nil { i.nodes = make(map[string]struct{}) } if _, exists := i.nodes[name]; exists { return } i.nodes[name] = struct{}{} } func (i *importGraph) addNodes(names []string) { for _, name := range names { i.addNode(name) } } func (i *importGraph) removeNode(name string) { delete(i.nodes, name) } func (i *importGraph) removeNodes(names []string) { for _, name := range names { i.removeNode(name) } } func (i *importGraph) addEdge(from, to string) error { if !i.exists(from) || !i.exists(to) { return fmt.Errorf("one of the nodes does not exist") } if i.willCycle(to, from) { return fmt.Errorf("a cycle of imports exists between %s and %s", from, to) } if i.areConnected(from, to) { // if connected, there's nothing to do return nil } if i.nodes == nil { i.nodes = make(map[string]struct{}) } if i.edges == nil { i.edges = make(adjacency) } i.edges[from] = append(i.edges[from], to) return nil } func (i *importGraph) addEdges(from string, tos []string) error { for _, to := range tos { err := i.addEdge(from, to) if err != nil { return err } } return nil } func (i *importGraph) areConnected(from, to string) bool { al, ok := i.edges[from] if !ok { return false } return slices.Contains(al, to) } func (i *importGraph) willCycle(from, to string) bool { collector := make(map[string]bool) var visit func(string) visit = func(start string) { if !collector[start] { collector[start] = true for _, v := range i.edges[start] { visit(v) } } } for _, v := range i.edges[from] { visit(v) } for k := range collector { if to == k { return true } } return false } func (i *importGraph) exists(key string) bool { _, exists := i.nodes[key] return exists } ================================================ FILE: caddyconfig/caddyfile/lexer.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyfile import ( "bufio" "bytes" "fmt" "io" "regexp" "strings" "unicode" ) type ( // lexer is a utility which can get values, token by // token, from a Reader. A token is a word, and tokens // are separated by whitespace. A word can be enclosed // in quotes if it contains whitespace. lexer struct { reader *bufio.Reader token Token line int skippedLines int } // Token represents a single parsable unit. Token struct { File string imports []string Line int Text string wasQuoted rune // enclosing quote character, if any heredocMarker string snippetName string } ) // Tokenize takes bytes as input and lexes it into // a list of tokens that can be parsed as a Caddyfile. // Also takes a filename to fill the token's File as // the source of the tokens, which is important to // determine relative paths for `import` directives. func Tokenize(input []byte, filename string) ([]Token, error) { l := lexer{} if err := l.load(bytes.NewReader(input)); err != nil { return nil, err } var tokens []Token for { found, err := l.next() if err != nil { return nil, err } if !found { break } l.token.File = filename tokens = append(tokens, l.token) } return tokens, nil } // load prepares the lexer to scan an input for tokens. // It discards any leading byte order mark. func (l *lexer) load(input io.Reader) error { l.reader = bufio.NewReader(input) l.line = 1 // discard byte order mark, if present firstCh, _, err := l.reader.ReadRune() if err != nil { return err } if firstCh != 0xFEFF { err := l.reader.UnreadRune() if err != nil { return err } } return nil } // next loads the next token into the lexer. // A token is delimited by whitespace, unless // the token starts with a quotes character (") // in which case the token goes until the closing // quotes (the enclosing quotes are not included). // Inside quoted strings, quotes may be escaped // with a preceding \ character. No other chars // may be escaped. The rest of the line is skipped // if a "#" character is read in. Returns true if // a token was loaded; false otherwise. func (l *lexer) next() (bool, error) { var val []rune var comment, quoted, btQuoted, inHeredoc, heredocEscaped, escaped bool var heredocMarker string makeToken := func(quoted rune) bool { l.token.Text = string(val) l.token.wasQuoted = quoted l.token.heredocMarker = heredocMarker return true } for { // Read a character in; if err then if we had // read some characters, make a token. If we // reached EOF, then no more tokens to read. // If no EOF, then we had a problem. ch, _, err := l.reader.ReadRune() if err != nil { if len(val) > 0 { if inHeredoc { return false, fmt.Errorf("incomplete heredoc <<%s on line #%d, expected ending marker %s", heredocMarker, l.line+l.skippedLines, heredocMarker) } return makeToken(0), nil } if err == io.EOF { return false, nil } return false, err } // detect whether we have the start of a heredoc if (!quoted && !btQuoted) && (!inHeredoc && !heredocEscaped) && len(val) > 1 && string(val[:2]) == "<<" { // a space means it's just a regular token and not a heredoc if ch == ' ' { return makeToken(0), nil } // skip CR, we only care about LF if ch == '\r' { continue } // after hitting a newline, we know that the heredoc marker // is the characters after the two << and the newline. // we reset the val because the heredoc is syntax we don't // want to keep. if ch == '\n' { if len(val) == 2 { return false, fmt.Errorf("missing opening heredoc marker on line #%d; must contain only alpha-numeric characters, dashes and underscores; got empty string", l.line) } // check if there's too many < if string(val[:3]) == "<<<" { return false, fmt.Errorf("too many '<' for heredoc on line #%d; only use two, for example <= len(heredocMarker) && heredocMarker == string(val[len(val)-len(heredocMarker):]) { // set the final value val, err = l.finalizeHeredoc(val, heredocMarker) if err != nil { return false, err } // set the line counter, and make the token l.line += l.skippedLines l.skippedLines = 0 return makeToken('<'), nil } // stay in the heredoc until we find the ending marker continue } // track whether we found an escape '\' for the next // iteration to be contextually aware if !escaped && !btQuoted && ch == '\\' { escaped = true continue } if quoted || btQuoted { if quoted && escaped { // all is literal in quoted area, // so only escape quotes if ch != '"' { val = append(val, '\\') } escaped = false } else { if (quoted && ch == '"') || (btQuoted && ch == '`') { return makeToken(ch), nil } } // allow quoted text to wrap continue on multiple lines if ch == '\n' { l.line += 1 + l.skippedLines l.skippedLines = 0 } // collect this character as part of the quoted token val = append(val, ch) continue } if unicode.IsSpace(ch) { // ignore CR altogether, we only actually care about LF (\n) if ch == '\r' { continue } // end of the line if ch == '\n' { // newlines can be escaped to chain arguments // onto multiple lines; else, increment the line count if escaped { l.skippedLines++ escaped = false } else { l.line += 1 + l.skippedLines l.skippedLines = 0 } // comments (#) are single-line only comment = false } // any kind of space means we're at the end of this token if len(val) > 0 { return makeToken(0), nil } continue } // comments must be at the start of a token, // in other words, preceded by space or newline if ch == '#' && len(val) == 0 { comment = true } if comment { continue } if len(val) == 0 { l.token = Token{Line: l.line} if ch == '"' { quoted = true continue } if ch == '`' { btQuoted = true continue } } if escaped { // allow escaping the first < to skip the heredoc syntax if ch == '<' { heredocEscaped = true } else { val = append(val, '\\') } escaped = false } val = append(val, ch) } } // finalizeHeredoc takes the runes read as the heredoc text and the marker, // and processes the text to strip leading whitespace, returning the final // value without the leading whitespace. func (l *lexer) finalizeHeredoc(val []rune, marker string) ([]rune, error) { stringVal := string(val) // find the last newline of the heredoc, which is where the contents end lastNewline := strings.LastIndex(stringVal, "\n") // collapse the content, then split into separate lines lines := strings.Split(stringVal[:lastNewline+1], "\n") // figure out how much whitespace we need to strip from the front of every line // by getting the string that precedes the marker, on the last line paddingToStrip := stringVal[lastNewline+1 : len(stringVal)-len(marker)] // iterate over each line and strip the whitespace from the front var out string for lineNum, lineText := range lines[:len(lines)-1] { if lineText == "" || lineText == "\r" { out += "\n" continue } // find an exact match for the padding index := strings.Index(lineText, paddingToStrip) // if the padding doesn't match exactly at the start then we can't safely strip if index != 0 { cleanLineText := strings.TrimRight(lineText, "\r\n") return nil, fmt.Errorf("mismatched leading whitespace in heredoc <<%s on line #%d [%s], expected whitespace [%s] to match the closing marker", marker, l.line+lineNum+1, cleanLineText, paddingToStrip) } // strip, then append the line, with the newline, to the output. // also removes all "\r" because Windows. out += strings.ReplaceAll(lineText[len(paddingToStrip):]+"\n", "\r", "") } // Remove the trailing newline from the loop if len(out) > 0 && out[len(out)-1] == '\n' { out = out[:len(out)-1] } // return the final value return []rune(out), nil } // Quoted returns true if the token was enclosed in quotes // (i.e. double quotes, backticks, or heredoc). func (t Token) Quoted() bool { return t.wasQuoted > 0 } // NumLineBreaks counts how many line breaks are in the token text. func (t Token) NumLineBreaks() int { lineBreaks := strings.Count(t.Text, "\n") if t.wasQuoted == '<' { // heredocs have an extra linebreak because the opening // delimiter is on its own line and is not included in the // token Text itself, and the trailing newline is removed. lineBreaks += 2 } return lineBreaks } // Clone returns a deep copy of the token. func (t Token) Clone() Token { return Token{ File: t.File, imports: append([]string{}, t.imports...), Line: t.Line, Text: t.Text, wasQuoted: t.wasQuoted, heredocMarker: t.heredocMarker, snippetName: t.snippetName, } } var heredocMarkerRegexp = regexp.MustCompile("^[A-Za-z0-9_-]+$") // isNextOnNewLine tests whether t2 is on a different line from t1 func isNextOnNewLine(t1, t2 Token) bool { // If the second token is from a different file, // we can assume it's from a different line if t1.File != t2.File { return true } // If the second token is from a different import chain, // we can assume it's from a different line if len(t1.imports) != len(t2.imports) { return true } for i, im := range t1.imports { if im != t2.imports[i] { return true } } // If the first token (incl line breaks) ends // on a line earlier than the next token, // then the second token is on a new line return t1.Line+t1.NumLineBreaks() < t2.Line } ================================================ FILE: caddyconfig/caddyfile/lexer_fuzz.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build gofuzz package caddyfile func FuzzTokenize(input []byte) int { tokens, err := Tokenize(input, "Caddyfile") if err != nil { return 0 } if len(tokens) == 0 { return -1 } return 1 } ================================================ FILE: caddyconfig/caddyfile/lexer_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyfile import ( "testing" ) func TestLexer(t *testing.T) { testCases := []struct { input []byte expected []Token expectErr bool errorMessage string }{ { input: []byte(`host:123`), expected: []Token{ {Line: 1, Text: "host:123"}, }, }, { input: []byte(`host:123 directive`), expected: []Token{ {Line: 1, Text: "host:123"}, {Line: 3, Text: "directive"}, }, }, { input: []byte(`host:123 { directive }`), expected: []Token{ {Line: 1, Text: "host:123"}, {Line: 1, Text: "{"}, {Line: 2, Text: "directive"}, {Line: 3, Text: "}"}, }, }, { input: []byte(`host:123 { directive }`), expected: []Token{ {Line: 1, Text: "host:123"}, {Line: 1, Text: "{"}, {Line: 1, Text: "directive"}, {Line: 1, Text: "}"}, }, }, { input: []byte(`host:123 { #comment directive # comment foobar # another comment }`), expected: []Token{ {Line: 1, Text: "host:123"}, {Line: 1, Text: "{"}, {Line: 3, Text: "directive"}, {Line: 5, Text: "foobar"}, {Line: 6, Text: "}"}, }, }, { input: []byte(`host:123 { # hash inside string is not a comment redir / /some/#/path }`), expected: []Token{ {Line: 1, Text: "host:123"}, {Line: 1, Text: "{"}, {Line: 3, Text: "redir"}, {Line: 3, Text: "/"}, {Line: 3, Text: "/some/#/path"}, {Line: 4, Text: "}"}, }, }, { input: []byte("# comment at beginning of file\n# comment at beginning of line\nhost:123"), expected: []Token{ {Line: 3, Text: "host:123"}, }, }, { input: []byte(`a "quoted value" b foobar`), expected: []Token{ {Line: 1, Text: "a"}, {Line: 1, Text: "quoted value"}, {Line: 1, Text: "b"}, {Line: 2, Text: "foobar"}, }, }, { input: []byte(`A "quoted \"value\" inside" B`), expected: []Token{ {Line: 1, Text: "A"}, {Line: 1, Text: `quoted "value" inside`}, {Line: 1, Text: "B"}, }, }, { input: []byte("An escaped \"newline\\\ninside\" quotes"), expected: []Token{ {Line: 1, Text: "An"}, {Line: 1, Text: "escaped"}, {Line: 1, Text: "newline\\\ninside"}, {Line: 2, Text: "quotes"}, }, }, { input: []byte("An escaped newline\\\noutside quotes"), expected: []Token{ {Line: 1, Text: "An"}, {Line: 1, Text: "escaped"}, {Line: 1, Text: "newline"}, {Line: 1, Text: "outside"}, {Line: 1, Text: "quotes"}, }, }, { input: []byte("line1\\\nescaped\nline2\nline3"), expected: []Token{ {Line: 1, Text: "line1"}, {Line: 1, Text: "escaped"}, {Line: 3, Text: "line2"}, {Line: 4, Text: "line3"}, }, }, { input: []byte("line1\\\nescaped1\\\nescaped2\nline4\nline5"), expected: []Token{ {Line: 1, Text: "line1"}, {Line: 1, Text: "escaped1"}, {Line: 1, Text: "escaped2"}, {Line: 4, Text: "line4"}, {Line: 5, Text: "line5"}, }, }, { input: []byte(`"unescapable\ in quotes"`), expected: []Token{ {Line: 1, Text: `unescapable\ in quotes`}, }, }, { input: []byte(`"don't\escape"`), expected: []Token{ {Line: 1, Text: `don't\escape`}, }, }, { input: []byte(`"don't\\escape"`), expected: []Token{ {Line: 1, Text: `don't\\escape`}, }, }, { input: []byte(`un\escapable`), expected: []Token{ {Line: 1, Text: `un\escapable`}, }, }, { input: []byte(`A "quoted value with line break inside" { foobar }`), expected: []Token{ {Line: 1, Text: "A"}, {Line: 1, Text: "quoted value with line\n\t\t\t\t\tbreak inside"}, {Line: 2, Text: "{"}, {Line: 3, Text: "foobar"}, {Line: 4, Text: "}"}, }, }, { input: []byte(`"C:\php\php-cgi.exe"`), expected: []Token{ {Line: 1, Text: `C:\php\php-cgi.exe`}, }, }, { input: []byte(`empty "" string`), expected: []Token{ {Line: 1, Text: `empty`}, {Line: 1, Text: ``}, {Line: 1, Text: `string`}, }, }, { input: []byte("skip those\r\nCR characters"), expected: []Token{ {Line: 1, Text: "skip"}, {Line: 1, Text: "those"}, {Line: 2, Text: "CR"}, {Line: 2, Text: "characters"}, }, }, { input: []byte("\xEF\xBB\xBF:8080"), // test with leading byte order mark expected: []Token{ {Line: 1, Text: ":8080"}, }, }, { input: []byte("simple `backtick quoted` string"), expected: []Token{ {Line: 1, Text: `simple`}, {Line: 1, Text: `backtick quoted`}, {Line: 1, Text: `string`}, }, }, { input: []byte("multiline `backtick\nquoted\n` string"), expected: []Token{ {Line: 1, Text: `multiline`}, {Line: 1, Text: "backtick\nquoted\n"}, {Line: 3, Text: `string`}, }, }, { input: []byte("nested `\"quotes inside\" backticks` string"), expected: []Token{ {Line: 1, Text: `nested`}, {Line: 1, Text: `"quotes inside" backticks`}, {Line: 1, Text: `string`}, }, }, { input: []byte("reverse-nested \"`backticks` inside\" quotes"), expected: []Token{ {Line: 1, Text: `reverse-nested`}, {Line: 1, Text: "`backticks` inside"}, {Line: 1, Text: `quotes`}, }, }, { input: []byte(`heredoc <>`), expected: []Token{ {Line: 1, Text: `escaped-heredoc`}, {Line: 1, Text: `<<`}, {Line: 1, Text: `>>`}, }, }, { input: []byte(`not-a-heredoc >"`), expected: []Token{ {Line: 1, Text: `not-a-heredoc`}, {Line: 1, Text: `<<`}, {Line: 1, Text: `>>`}, }, }, { input: []byte(`not-a-heredoc << >>`), expected: []Token{ {Line: 1, Text: `not-a-heredoc`}, {Line: 1, Text: `<<`}, {Line: 1, Text: `>>`}, }, }, { input: []byte(`not-a-heredoc < 0 || len(p.block.Segments) > 0 { blocks = append(blocks, p.block) } if p.nesting > 0 { return blocks, p.EOFErr() } } return blocks, nil } func (p *parser) parseOne() error { p.block = ServerBlock{} return p.begin() } func (p *parser) begin() error { if len(p.tokens) == 0 { return nil } err := p.addresses() if err != nil { return err } if p.eof { // this happens if the Caddyfile consists of only // a line of addresses and nothing else return nil } if ok, name := p.isNamedRoute(); ok { // we just need a dummy leading token to ease parsing later nameToken := p.Token() nameToken.Text = name // named routes only have one key, the route name p.block.Keys = []Token{nameToken} p.block.IsNamedRoute = true // get all the tokens from the block, including the braces tokens, err := p.blockTokens(true) if err != nil { return err } tokens = append([]Token{nameToken}, tokens...) p.block.Segments = []Segment{tokens} return nil } if ok, name := p.isSnippet(); ok { if p.definedSnippets == nil { p.definedSnippets = map[string][]Token{} } if _, found := p.definedSnippets[name]; found { return p.Errf("redeclaration of previously declared snippet %s", name) } // consume all tokens til matched close brace tokens, err := p.blockTokens(false) if err != nil { return err } // Just as we need to track which file the token comes from, we need to // keep track of which snippet the token comes from. This is helpful // in tracking import cycles across files/snippets by namespacing them. // Without this, we end up with false-positives in cycle-detection. for k, v := range tokens { v.snippetName = name tokens[k] = v } p.definedSnippets[name] = tokens // empty block keys so we don't save this block as a real server. p.block.Keys = nil return nil } return p.blockContents() } func (p *parser) addresses() error { var expectingAnother bool for { value := p.Val() token := p.Token() // Reject request matchers if trying to define them globally if strings.HasPrefix(value, "@") { return p.Errf("request matchers may not be defined globally, they must be in a site block; found %s", value) } // Special case: import directive replaces tokens during parse-time if value == "import" && p.isNewLine() { err := p.doImport(0) if err != nil { return err } continue } // Open brace definitely indicates end of addresses if value == "{" { if expectingAnother { return p.Errf("Expected another address but had '%s' - check for extra comma", value) } // Mark this server block as being defined with braces. // This is used to provide a better error message when // the user may have tried to define two server blocks // without having used braces, which are required in // that case. p.block.HasBraces = true break } // Users commonly forget to place a space between the address and the '{' if strings.HasSuffix(value, "{") { return p.Errf("Site addresses cannot end with a curly brace: '%s' - put a space between the token and the brace", value) } if value != "" { // empty token possible if user typed "" // Trailing comma indicates another address will follow, which // may possibly be on the next line if value[len(value)-1] == ',' { value = value[:len(value)-1] expectingAnother = true } else { expectingAnother = false // but we may still see another one on this line } // If there's a comma here, it's probably because they didn't use a space // between their two domains, e.g. "foo.com,bar.com", which would not be // parsed as two separate site addresses. if strings.Contains(value, ",") { return p.Errf("Site addresses cannot contain a comma ',': '%s' - put a space after the comma to separate site addresses", value) } // After the above, a comma surrounded by spaces would result // in an empty token which we should ignore if value != "" { // Add the token as a site address token.Text = value p.block.Keys = append(p.block.Keys, token) } } // Advance token and possibly break out of loop or return error hasNext := p.Next() if expectingAnother && !hasNext { return p.EOFErr() } if !hasNext { p.eof = true break // EOF } if !expectingAnother && p.isNewLine() { break } } return nil } func (p *parser) blockContents() error { errOpenCurlyBrace := p.openCurlyBrace() if errOpenCurlyBrace != nil { // single-server configs don't need curly braces p.cursor-- } err := p.directives() if err != nil { return err } // only look for close curly brace if there was an opening if errOpenCurlyBrace == nil { err = p.closeCurlyBrace() if err != nil { return err } } return nil } // directives parses through all the lines for directives // and it expects the next token to be the first // directive. It goes until EOF or closing curly brace // which ends the server block. func (p *parser) directives() error { for p.Next() { // end of server block if p.Val() == "}" { // p.nesting has already been decremented break } // special case: import directive replaces tokens during parse-time if p.Val() == "import" { err := p.doImport(1) if err != nil { return err } p.cursor-- // cursor is advanced when we continue, so roll back one more continue } // normal case: parse a directive as a new segment // (a "segment" is a line which starts with a directive // and which ends at the end of the line or at the end of // the block that is opened at the end of the line) if err := p.directive(); err != nil { return err } } return nil } // doImport swaps out the import directive and its argument // (a total of 2 tokens) with the tokens in the specified file // or globbing pattern. When the function returns, the cursor // is on the token before where the import directive was. In // other words, call Next() to access the first token that was // imported. func (p *parser) doImport(nesting int) error { // syntax checks if !p.NextArg() { return p.ArgErr() } importPattern := p.Val() if importPattern == "" { return p.Err("Import requires a non-empty filepath") } // grab remaining args as placeholder replacements args := p.RemainingArgs() // set up a replacer for non-variadic args replacement repl := makeArgsReplacer(args) // grab all the tokens (if it exists) from within a block that follows the import var blockTokens []Token for currentNesting := p.Nesting(); p.NextBlock(currentNesting); { blockTokens = append(blockTokens, p.Token()) } // initialize with size 1 blockMapping := make(map[string][]Token, 1) if len(blockTokens) > 0 { // use such tokens to create a new dispenser, and then use it to parse each block bd := NewDispenser(blockTokens) // one iteration processes one sub-block inside the import for bd.Next() { currentMappingKey := bd.Val() if currentMappingKey == "{" { return p.Err("anonymous blocks are not supported") } // load up all arguments (if there even are any) currentMappingTokens := bd.RemainingArgsAsTokens() // load up the entire block for mappingNesting := bd.Nesting(); bd.NextBlock(mappingNesting); { currentMappingTokens = append(currentMappingTokens, bd.Token()) } blockMapping[currentMappingKey] = currentMappingTokens } } // splice out the import directive and its arguments // (2 tokens, plus the length of args) tokensBefore := p.tokens[:p.cursor-1-len(args)-len(blockTokens)] tokensAfter := p.tokens[p.cursor+1:] var importedTokens []Token var nodes []string // first check snippets. That is a simple, non-recursive replacement if p.definedSnippets != nil && p.definedSnippets[importPattern] != nil { importedTokens = p.definedSnippets[importPattern] if len(importedTokens) > 0 { // just grab the first one nodes = append(nodes, fmt.Sprintf("%s:%s", importedTokens[0].File, importedTokens[0].snippetName)) } } else { // make path relative to the file of the _token_ being processed rather // than current working directory (issue #867) and then use glob to get // list of matching filenames absFile, err := caddy.FastAbs(p.Dispenser.File()) if err != nil { return p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.File(), err) } var matches []string var globPattern string if !filepath.IsAbs(importPattern) { globPattern = filepath.Join(filepath.Dir(absFile), importPattern) } else { globPattern = importPattern } if strings.Count(globPattern, "*") > 1 || strings.Count(globPattern, "?") > 1 || (strings.Contains(globPattern, "[") && strings.Contains(globPattern, "]")) { // See issue #2096 - a pattern with many glob expansions can hang for too long return p.Errf("Glob pattern may only contain one wildcard (*), but has others: %s", globPattern) } matches, err = filepath.Glob(globPattern) if err != nil { return p.Errf("Failed to use import pattern %s: %v", importPattern, err) } if len(matches) == 0 { if strings.ContainsAny(globPattern, "*?[]") { caddy.Log().Warn("No files matching import glob pattern", zap.String("pattern", importPattern)) } else { return p.Errf("File to import not found: %s", importPattern) } } else { // See issue #5295 - should skip any files that start with a . when iterating over them. sep := string(filepath.Separator) segGlobPattern := strings.Split(globPattern, sep) if strings.HasPrefix(segGlobPattern[len(segGlobPattern)-1], "*") { var tmpMatches []string for _, m := range matches { seg := strings.Split(m, sep) if !strings.HasPrefix(seg[len(seg)-1], ".") { tmpMatches = append(tmpMatches, m) } } matches = tmpMatches } } // collect all the imported tokens for _, importFile := range matches { newTokens, err := p.doSingleImport(importFile) if err != nil { return err } importedTokens = append(importedTokens, newTokens...) } nodes = matches } nodeName := p.File() if p.Token().snippetName != "" { nodeName += fmt.Sprintf(":%s", p.Token().snippetName) } p.importGraph.addNode(nodeName) p.importGraph.addNodes(nodes) if err := p.importGraph.addEdges(nodeName, nodes); err != nil { p.importGraph.removeNodes(nodes) return err } // copy the tokens so we don't overwrite p.definedSnippets tokensCopy := make([]Token, 0, len(importedTokens)) var ( maybeSnippet bool maybeSnippetId bool index int ) // run the argument replacer on the tokens // golang for range slice return a copy of value // similarly, append also copy value for i, token := range importedTokens { // update the token's imports to refer to import directive filename, line number and snippet name if there is one if token.snippetName != "" { token.imports = append(token.imports, fmt.Sprintf("%s:%d (import %s)", p.File(), p.Line(), token.snippetName)) } else { token.imports = append(token.imports, fmt.Sprintf("%s:%d (import)", p.File(), p.Line())) } // naive way of determine snippets, as snippets definition can only follow name + block // format, won't check for nesting correctness or any other error, that's what parser does. if !maybeSnippet && nesting == 0 { // first of the line if i == 0 || isNextOnNewLine(tokensCopy[len(tokensCopy)-1], token) { index = 0 } else { index++ } if index == 0 && len(token.Text) >= 3 && strings.HasPrefix(token.Text, "(") && strings.HasSuffix(token.Text, ")") { maybeSnippetId = true } } switch token.Text { case "{": nesting++ if index == 1 && maybeSnippetId && nesting == 1 { maybeSnippet = true maybeSnippetId = false } case "}": nesting-- if nesting == 0 && maybeSnippet { maybeSnippet = false } } // if it is {block}, we substitute with all tokens in the block // if it is {blocks.*}, we substitute with the tokens in the mapping for the * var tokensToAdd []Token foundBlockDirective := false switch { case token.Text == "{block}": foundBlockDirective = true tokensToAdd = blockTokens case strings.HasPrefix(token.Text, "{blocks.") && strings.HasSuffix(token.Text, "}"): foundBlockDirective = true // {blocks.foo.bar} will be extracted to key `foo.bar` blockKey := strings.TrimPrefix(strings.TrimSuffix(token.Text, "}"), "{blocks.") val, ok := blockMapping[blockKey] if ok { tokensToAdd = val } } if foundBlockDirective { tokensCopy = append(tokensCopy, tokensToAdd...) continue } if maybeSnippet { tokensCopy = append(tokensCopy, token) continue } foundVariadic, startIndex, endIndex := parseVariadic(token, len(args)) if foundVariadic { for _, arg := range args[startIndex:endIndex] { token.Text = arg tokensCopy = append(tokensCopy, token) } } else { token.Text = repl.ReplaceKnown(token.Text, "") tokensCopy = append(tokensCopy, token) } } // splice the imported tokens in the place of the import statement // and rewind cursor so Next() will land on first imported token p.tokens = append(tokensBefore, append(tokensCopy, tokensAfter...)...) p.cursor -= len(args) + len(blockTokens) + 1 return nil } // doSingleImport lexes the individual file at importFile and returns // its tokens or an error, if any. func (p *parser) doSingleImport(importFile string) ([]Token, error) { file, err := os.Open(importFile) if err != nil { return nil, p.Errf("Could not import %s: %v", importFile, err) } defer file.Close() if info, err := file.Stat(); err != nil { return nil, p.Errf("Could not import %s: %v", importFile, err) } else if info.IsDir() { return nil, p.Errf("Could not import %s: is a directory", importFile) } input, err := io.ReadAll(file) if err != nil { return nil, p.Errf("Could not read imported file %s: %v", importFile, err) } // only warning in case of empty files if len(input) == 0 || len(strings.TrimSpace(string(input))) == 0 { caddy.Log().Warn("Import file is empty", zap.String("file", importFile)) return []Token{}, nil } importedTokens, err := allTokens(importFile, input) if err != nil { return nil, p.Errf("Could not read tokens while importing %s: %v", importFile, err) } // Tack the file path onto these tokens so errors show the imported file's name // (we use full, absolute path to avoid bugs: issue #1892) filename, err := caddy.FastAbs(importFile) if err != nil { return nil, p.Errf("Failed to get absolute path of file: %s: %v", importFile, err) } for i := range importedTokens { importedTokens[i].File = filename } return importedTokens, nil } // directive collects tokens until the directive's scope // closes (either end of line or end of curly brace block). // It expects the currently-loaded token to be a directive // (or } that ends a server block). The collected tokens // are loaded into the current server block for later use // by directive setup functions. func (p *parser) directive() error { // a segment is a list of tokens associated with this directive var segment Segment // the directive itself is appended as a relevant token segment = append(segment, p.Token()) for p.Next() { if p.Val() == "{" { p.nesting++ if !p.isNextOnNewLine() && p.Token().wasQuoted == 0 { return p.Err("Unexpected next token after '{' on same line") } if p.isNewLine() { return p.Err("Unexpected '{' on a new line; did you mean to place the '{' on the previous line?") } } else if p.Val() == "{}" { if p.isNextOnNewLine() && p.Token().wasQuoted == 0 { return p.Err("Unexpected '{}' at end of line") } } else if p.isNewLine() && p.nesting == 0 { p.cursor-- // read too far break } else if p.Val() == "}" && p.nesting > 0 { p.nesting-- } else if p.Val() == "}" && p.nesting == 0 { return p.Err("Unexpected '}' because no matching opening brace") } else if p.Val() == "import" && p.isNewLine() { if err := p.doImport(1); err != nil { return err } p.cursor-- // cursor is advanced when we continue, so roll back one more continue } segment = append(segment, p.Token()) } p.block.Segments = append(p.block.Segments, segment) if p.nesting > 0 { return p.EOFErr() } return nil } // openCurlyBrace expects the current token to be an // opening curly brace. This acts like an assertion // because it returns an error if the token is not // a opening curly brace. It does NOT advance the token. func (p *parser) openCurlyBrace() error { if p.Val() != "{" { return p.SyntaxErr("{") } return nil } // closeCurlyBrace expects the current token to be // a closing curly brace. This acts like an assertion // because it returns an error if the token is not // a closing curly brace. It does NOT advance the token. func (p *parser) closeCurlyBrace() error { if p.Val() != "}" { return p.SyntaxErr("}") } return nil } func (p *parser) isNamedRoute() (bool, string) { keys := p.block.Keys // A named route block is a single key with parens, prefixed with &. if len(keys) == 1 && strings.HasPrefix(keys[0].Text, "&(") && strings.HasSuffix(keys[0].Text, ")") { return true, strings.TrimSuffix(keys[0].Text[2:], ")") } return false, "" } func (p *parser) isSnippet() (bool, string) { keys := p.block.Keys // A snippet block is a single key with parens. Nothing else qualifies. if len(keys) == 1 && strings.HasPrefix(keys[0].Text, "(") && strings.HasSuffix(keys[0].Text, ")") { return true, strings.TrimSuffix(keys[0].Text[1:], ")") } return false, "" } // read and store everything in a block for later replay. func (p *parser) blockTokens(retainCurlies bool) ([]Token, error) { // block must have curlies. err := p.openCurlyBrace() if err != nil { return nil, err } nesting := 1 // count our own nesting tokens := []Token{} if retainCurlies { tokens = append(tokens, p.Token()) } for p.Next() { if p.Val() == "}" { nesting-- if nesting == 0 { if retainCurlies { tokens = append(tokens, p.Token()) } break } } if p.Val() == "{" { nesting++ } tokens = append(tokens, p.tokens[p.cursor]) } // make sure we're matched up if nesting != 0 { return nil, p.SyntaxErr("}") } return tokens, nil } // ServerBlock associates any number of keys from the // head of the server block with tokens, which are // grouped by segments. type ServerBlock struct { HasBraces bool Keys []Token Segments []Segment IsNamedRoute bool } func (sb ServerBlock) GetKeysText() []string { res := make([]string, 0, len(sb.Keys)) for _, k := range sb.Keys { res = append(res, k.Text) } return res } // DispenseDirective returns a dispenser that contains // all the tokens in the server block. func (sb ServerBlock) DispenseDirective(dir string) *Dispenser { var tokens []Token for _, seg := range sb.Segments { if len(seg) > 0 && seg[0].Text == dir { tokens = append(tokens, seg...) } } return NewDispenser(tokens) } // Segment is a list of tokens which begins with a directive // and ends at the end of the directive (either at the end of // the line, or at the end of a block it opens). type Segment []Token // Directive returns the directive name for the segment. // The directive name is the text of the first token. func (s Segment) Directive() string { if len(s) > 0 { return s[0].Text } return "" } // spanOpen and spanClose are used to bound spans that // contain the name of an environment variable. var ( spanOpen, spanClose = []byte{'{', '$'}, []byte{'}'} envVarDefaultDelimiter = ":" ) ================================================ FILE: caddyconfig/caddyfile/parse_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyfile import ( "bytes" "os" "path/filepath" "strings" "testing" ) func TestParseVariadic(t *testing.T) { args := make([]string, 10) for i, tc := range []struct { input string result bool }{ { input: "", result: false, }, { input: "{args[1", result: false, }, { input: "1]}", result: false, }, { input: "{args[:]}aaaaa", result: false, }, { input: "aaaaa{args[:]}", result: false, }, { input: "{args.}", result: false, }, { input: "{args.1}", result: false, }, { input: "{args[]}", result: false, }, { input: "{args[:]}", result: true, }, { input: "{args[:]}", result: true, }, { input: "{args[0:]}", result: true, }, { input: "{args[:0]}", result: true, }, { input: "{args[-1:]}", result: false, }, { input: "{args[:11]}", result: false, }, { input: "{args[10:0]}", result: false, }, { input: "{args[0:10]}", result: true, }, { input: "{args[0]}:{args[1]}:{args[2]}", result: false, }, } { token := Token{ File: "test", Line: 1, Text: tc.input, } if v, _, _ := parseVariadic(token, len(args)); v != tc.result { t.Errorf("Test %d error expectation failed Expected: %t, got %t", i, tc.result, v) } } } func TestAllTokens(t *testing.T) { input := []byte("a b c\nd e") expected := []string{"a", "b", "c", "d", "e"} tokens, err := allTokens("TestAllTokens", input) if err != nil { t.Fatalf("Expected no error, got %v", err) } if len(tokens) != len(expected) { t.Fatalf("Expected %d tokens, got %d", len(expected), len(tokens)) } for i, val := range expected { if tokens[i].Text != val { t.Errorf("Token %d should be '%s' but was '%s'", i, val, tokens[i].Text) } } } func TestParseOneAndImport(t *testing.T) { testParseOne := func(input string) (ServerBlock, error) { p := testParser(input) p.Next() // parseOne doesn't call Next() to start, so we must err := p.parseOne() return p.block, err } for i, test := range []struct { input string shouldErr bool keys []string numTokens []int // number of tokens to expect in each segment }{ {`localhost`, false, []string{ "localhost", }, []int{}}, {`localhost dir1`, false, []string{ "localhost", }, []int{1}}, { `localhost:1234 dir1 foo bar`, false, []string{ "localhost:1234", }, []int{3}, }, {`localhost { dir1 }`, false, []string{ "localhost", }, []int{1}}, {`localhost:1234 { dir1 foo bar dir2 }`, false, []string{ "localhost:1234", }, []int{3, 1}}, {`http://localhost https://localhost dir1 foo bar`, false, []string{ "http://localhost", "https://localhost", }, []int{3}}, {`http://localhost https://localhost { dir1 foo bar }`, false, []string{ "http://localhost", "https://localhost", }, []int{3}}, {`http://localhost, https://localhost { dir1 foo bar }`, false, []string{ "http://localhost", "https://localhost", }, []int{3}}, {`http://localhost, { }`, true, []string{ "http://localhost", }, []int{}}, {`host1:80, http://host2.com dir1 foo bar dir2 baz`, false, []string{ "host1:80", "http://host2.com", }, []int{3, 2}}, {`http://host1.com, http://host2.com, https://host3.com`, false, []string{ "http://host1.com", "http://host2.com", "https://host3.com", }, []int{}}, {`http://host1.com:1234, https://host2.com dir1 foo { bar baz } dir2`, false, []string{ "http://host1.com:1234", "https://host2.com", }, []int{6, 1}}, {`127.0.0.1 dir1 { bar baz } dir2 { foo bar }`, false, []string{ "127.0.0.1", }, []int{5, 5}}, {`localhost dir1 { foo`, true, []string{ "localhost", }, []int{3}}, {`localhost dir1 { }`, false, []string{ "localhost", }, []int{3}}, {`localhost dir1 { } }`, true, []string{ "localhost", }, []int{}}, {`localhost{ dir1 }`, true, []string{}, []int{}}, {`localhost dir1 { nested { foo } } dir2 foo bar`, false, []string{ "localhost", }, []int{7, 3}}, {``, false, []string{}, []int{}}, {`localhost dir1 arg1 import testdata/import_test1.txt`, false, []string{ "localhost", }, []int{2, 3, 1}}, {`import testdata/import_test2.txt`, false, []string{ "host1", }, []int{1, 2}}, {`import testdata/not_found.txt`, true, []string{}, []int{}}, // empty file should just log a warning, and result in no tokens {`import testdata/empty.txt`, false, []string{}, []int{}}, {`import testdata/only_white_space.txt`, false, []string{}, []int{}}, // import path/to/dir/* should skip any files that start with a . when iterating over them. {`localhost dir1 arg1 import testdata/glob/*`, false, []string{ "localhost", }, []int{2, 3, 1}}, // import path/to/dir/.* should continue to read all dotfiles in a dir. {`import testdata/glob/.*`, false, []string{ "host1", }, []int{1, 2}}, {`""`, false, []string{}, []int{}}, {``, false, []string{}, []int{}}, // Unexpected next token after '{' on same line {`localhost dir1 { a b }`, true, []string{"localhost"}, []int{}}, // Unexpected '{' on a new line {`localhost dir1 { a b }`, true, []string{"localhost"}, []int{}}, // Workaround with quotes {`localhost dir1 "{" a b "}"`, false, []string{"localhost"}, []int{5}}, // Unexpected '{}' at end of line {`localhost dir1 {}`, true, []string{"localhost"}, []int{}}, // Workaround with quotes {`localhost dir1 "{}"`, false, []string{"localhost"}, []int{2}}, // import with args {`import testdata/import_args0.txt a`, false, []string{"a"}, []int{}}, {`import testdata/import_args1.txt a b`, false, []string{"a", "b"}, []int{}}, {`import testdata/import_args*.txt a b`, false, []string{"a"}, []int{2}}, // test cases found by fuzzing! {`import }{$"`, true, []string{}, []int{}}, {`import /*/*.txt`, true, []string{}, []int{}}, {`import /???/?*?o`, true, []string{}, []int{}}, {`import /??`, true, []string{}, []int{}}, {`import /[a-z]`, true, []string{}, []int{}}, {`import {$}`, true, []string{}, []int{}}, {`import {%}`, true, []string{}, []int{}}, {`import {$$}`, true, []string{}, []int{}}, {`import {%%}`, true, []string{}, []int{}}, } { result, err := testParseOne(test.input) if test.shouldErr && err == nil { t.Errorf("Test %d: Expected an error, but didn't get one", i) } if !test.shouldErr && err != nil { t.Errorf("Test %d: Expected no error, but got: %v", i, err) } // t.Logf("%+v\n", result) if len(result.Keys) != len(test.keys) { t.Errorf("Test %d: Expected %d keys, got %d", i, len(test.keys), len(result.Keys)) continue } for j, addr := range result.GetKeysText() { if addr != test.keys[j] { t.Errorf("Test %d, key %d: Expected '%s', but was '%s'", i, j, test.keys[j], addr) } } if len(result.Segments) != len(test.numTokens) { t.Errorf("Test %d: Expected %d segments, had %d", i, len(test.numTokens), len(result.Segments)) continue } for j, seg := range result.Segments { if len(seg) != test.numTokens[j] { t.Errorf("Test %d, segment %d: Expected %d tokens, counted %d", i, j, test.numTokens[j], len(seg)) continue } } } } func TestRecursiveImport(t *testing.T) { testParseOne := func(input string) (ServerBlock, error) { p := testParser(input) p.Next() // parseOne doesn't call Next() to start, so we must err := p.parseOne() return p.block, err } isExpected := func(got ServerBlock) bool { textKeys := got.GetKeysText() if len(textKeys) != 1 || textKeys[0] != "localhost" { t.Errorf("got keys unexpected: expect localhost, got %v", textKeys) return false } if len(got.Segments) != 2 { t.Errorf("got wrong number of segments: expect 2, got %d", len(got.Segments)) return false } if len(got.Segments[0]) != 1 || len(got.Segments[1]) != 2 { t.Errorf("got unexpected tokens: %v", got.Segments) return false } return true } recursiveFile1, err := filepath.Abs("testdata/recursive_import_test1") if err != nil { t.Fatal(err) } recursiveFile2, err := filepath.Abs("testdata/recursive_import_test2") if err != nil { t.Fatal(err) } // test relative recursive import err = os.WriteFile(recursiveFile1, []byte( `localhost dir1 import recursive_import_test2`), 0o644) if err != nil { t.Fatal(err) } defer os.Remove(recursiveFile1) err = os.WriteFile(recursiveFile2, []byte("dir2 1"), 0o644) if err != nil { t.Fatal(err) } defer os.Remove(recursiveFile2) // import absolute path result, err := testParseOne("import " + recursiveFile1) if err != nil { t.Fatal(err) } if !isExpected(result) { t.Error("absolute+relative import failed") } // import relative path result, err = testParseOne("import testdata/recursive_import_test1") if err != nil { t.Fatal(err) } if !isExpected(result) { t.Error("relative+relative import failed") } // test absolute recursive import err = os.WriteFile(recursiveFile1, []byte( `localhost dir1 import `+recursiveFile2), 0o644) if err != nil { t.Fatal(err) } // import absolute path result, err = testParseOne("import " + recursiveFile1) if err != nil { t.Fatal(err) } if !isExpected(result) { t.Error("absolute+absolute import failed") } // import relative path result, err = testParseOne("import testdata/recursive_import_test1") if err != nil { t.Fatal(err) } if !isExpected(result) { t.Error("relative+absolute import failed") } } func TestDirectiveImport(t *testing.T) { testParseOne := func(input string) (ServerBlock, error) { p := testParser(input) p.Next() // parseOne doesn't call Next() to start, so we must err := p.parseOne() return p.block, err } isExpected := func(got ServerBlock) bool { textKeys := got.GetKeysText() if len(textKeys) != 1 || textKeys[0] != "localhost" { t.Errorf("got keys unexpected: expect localhost, got %v", textKeys) return false } if len(got.Segments) != 2 { t.Errorf("got wrong number of segments: expect 2, got %d", len(got.Segments)) return false } if len(got.Segments[0]) != 1 || len(got.Segments[1]) != 8 { t.Errorf("got unexpected tokens: %v", got.Segments) return false } return true } directiveFile, err := filepath.Abs("testdata/directive_import_test") if err != nil { t.Fatal(err) } err = os.WriteFile(directiveFile, []byte(`prop1 1 prop2 2`), 0o644) if err != nil { t.Fatal(err) } defer os.Remove(directiveFile) // import from existing file result, err := testParseOne(`localhost dir1 proxy { import testdata/directive_import_test transparent }`) if err != nil { t.Fatal(err) } if !isExpected(result) { t.Error("directive import failed") } // import from nonexistent file _, err = testParseOne(`localhost dir1 proxy { import testdata/nonexistent_file transparent }`) if err == nil { t.Fatal("expected error when importing a nonexistent file") } } func TestParseAll(t *testing.T) { for i, test := range []struct { input string shouldErr bool keys [][]string // keys per server block, in order }{ {`localhost`, false, [][]string{ {"localhost"}, }}, {`localhost:1234`, false, [][]string{ {"localhost:1234"}, }}, {`localhost:1234 { } localhost:2015 { }`, false, [][]string{ {"localhost:1234"}, {"localhost:2015"}, }}, {`localhost:1234, http://host2`, false, [][]string{ {"localhost:1234", "http://host2"}, }}, {`foo.example.com , example.com`, false, [][]string{ {"foo.example.com", "example.com"}, }}, {`localhost:1234, http://host2,`, true, [][]string{}}, {`http://host1.com, http://host2.com { } https://host3.com, https://host4.com { }`, false, [][]string{ {"http://host1.com", "http://host2.com"}, {"https://host3.com", "https://host4.com"}, }}, {`import testdata/import_glob*.txt`, false, [][]string{ {"glob0.host0"}, {"glob0.host1"}, {"glob1.host0"}, {"glob2.host0"}, }}, {`import notfound/*`, false, [][]string{}}, // glob needn't error with no matches {`import notfound/file.conf`, true, [][]string{}}, // but a specific file should // recursive self-import {`import testdata/import_recursive0.txt`, true, [][]string{}}, {`import testdata/import_recursive3.txt import testdata/import_recursive1.txt`, true, [][]string{}}, // cyclic imports {`(A) { import A } :80 import A `, true, [][]string{}}, {`(A) { import B } (B) { import A } :80 import A `, true, [][]string{}}, } { p := testParser(test.input) blocks, err := p.parseAll() if test.shouldErr && err == nil { t.Errorf("Test %d: Expected an error, but didn't get one", i) } if !test.shouldErr && err != nil { t.Errorf("Test %d: Expected no error, but got: %v", i, err) } if len(blocks) != len(test.keys) { t.Errorf("Test %d: Expected %d server blocks, got %d", i, len(test.keys), len(blocks)) continue } for j, block := range blocks { if len(block.Keys) != len(test.keys[j]) { t.Errorf("Test %d: Expected %d keys in block %d, got %d: %v", i, len(test.keys[j]), j, len(block.Keys), block.Keys) continue } for k, addr := range block.GetKeysText() { if addr != test.keys[j][k] { t.Errorf("Test %d, block %d, key %d: Expected '%s', but got '%s'", i, j, k, test.keys[j][k], addr) } } } } } func TestEnvironmentReplacement(t *testing.T) { os.Setenv("FOOBAR", "foobar") os.Setenv("CHAINED", "$FOOBAR") for i, test := range []struct { input string expect string }{ { input: "", expect: "", }, { input: "foo", expect: "foo", }, { input: "{$NOT_SET}", expect: "", }, { input: "foo{$NOT_SET}bar", expect: "foobar", }, { input: "{$FOOBAR}", expect: "foobar", }, { input: "foo {$FOOBAR} bar", expect: "foo foobar bar", }, { input: "foo{$FOOBAR}bar", expect: "foofoobarbar", }, { input: "foo\n{$FOOBAR}\nbar", expect: "foo\nfoobar\nbar", }, { input: "{$FOOBAR} {$FOOBAR}", expect: "foobar foobar", }, { input: "{$FOOBAR}{$FOOBAR}", expect: "foobarfoobar", }, { input: "{$CHAINED}", expect: "$FOOBAR", // should not chain env expands }, { input: "{$FOO:default}", expect: "default", }, { input: "foo{$BAR:bar}baz", expect: "foobarbaz", }, { input: "foo{$BAR:$FOOBAR}baz", expect: "foo$FOOBARbaz", // should not chain env expands }, { input: "{$FOOBAR", expect: "{$FOOBAR", }, { input: "{$LONGER_NAME $FOOBAR}", expect: "", }, { input: "{$}", expect: "{$}", }, { input: "{$$}", expect: "", }, { input: "{$", expect: "{$", }, { input: "}{$", expect: "}{$", }, } { actual := replaceEnvVars([]byte(test.input)) if !bytes.Equal(actual, []byte(test.expect)) { t.Errorf("Test %d: Expected: '%s' but got '%s'", i, test.expect, actual) } } } func TestImportReplacementInJSONWithBrace(t *testing.T) { for i, test := range []struct { args []string input string expect string }{ { args: []string{"123"}, input: "{args[0]}", expect: "123", }, { args: []string{"123"}, input: `{"key":"{args[0]}"}`, expect: `{"key":"123"}`, }, { args: []string{"123", "123"}, input: `{"key":[{args[0]},{args[1]}]}`, expect: `{"key":[123,123]}`, }, } { repl := makeArgsReplacer(test.args) actual := repl.ReplaceKnown(test.input, "") if actual != test.expect { t.Errorf("Test %d: Expected: '%s' but got '%s'", i, test.expect, actual) } } } func TestSnippets(t *testing.T) { p := testParser(` (common) { gzip foo errors stderr } http://example.com { import common } `) blocks, err := p.parseAll() if err != nil { t.Fatal(err) } if len(blocks) != 1 { t.Fatalf("Expect exactly one server block. Got %d.", len(blocks)) } if actual, expected := blocks[0].GetKeysText()[0], "http://example.com"; expected != actual { t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual) } if len(blocks[0].Segments) != 2 { t.Fatalf("Server block should have tokens from import, got: %+v", blocks[0]) } if actual, expected := blocks[0].Segments[0][0].Text, "gzip"; expected != actual { t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual) } if actual, expected := blocks[0].Segments[1][1].Text, "stderr"; expected != actual { t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual) } } func writeStringToTempFileOrDie(t *testing.T, str string) (pathToFile string) { file, err := os.CreateTemp("", t.Name()) if err != nil { panic(err) // get a stack trace so we know where this was called from. } if _, err := file.WriteString(str); err != nil { panic(err) } if err := file.Close(); err != nil { panic(err) } return file.Name() } func TestImportedFilesIgnoreNonDirectiveImportTokens(t *testing.T) { fileName := writeStringToTempFileOrDie(t, ` http://example.com { # This isn't an import directive, it's just an arg with value 'import' basic_auth / import password } `) // Parse the root file that imports the other one. p := testParser(`import ` + fileName) blocks, err := p.parseAll() if err != nil { t.Fatal(err) } auth := blocks[0].Segments[0] line := auth[0].Text + " " + auth[1].Text + " " + auth[2].Text + " " + auth[3].Text if line != "basic_auth / import password" { // Previously, it would be changed to: // basic_auth / import /path/to/test/dir/password // referencing a file that (probably) doesn't exist and changing the // password! t.Errorf("Expected basic_auth tokens to be 'basic_auth / import password' but got %#q", line) } } func TestSnippetAcrossMultipleFiles(t *testing.T) { // Make the derived Caddyfile that expects (common) to be defined. fileName := writeStringToTempFileOrDie(t, ` http://example.com { import common } `) // Parse the root file that defines (common) and then imports the other one. p := testParser(` (common) { gzip foo } import ` + fileName + ` `) blocks, err := p.parseAll() if err != nil { t.Fatal(err) } if len(blocks) != 1 { t.Fatalf("Expect exactly one server block. Got %d.", len(blocks)) } if actual, expected := blocks[0].GetKeysText()[0], "http://example.com"; expected != actual { t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual) } if len(blocks[0].Segments) != 1 { t.Fatalf("Server block should have tokens from import") } if actual, expected := blocks[0].Segments[0][0].Text, "gzip"; expected != actual { t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual) } } func TestRejectsGlobalMatcher(t *testing.T) { p := testParser(` @rejected path /foo (common) { gzip foo errors stderr } http://example.com { import common } `) _, err := p.parseAll() if err == nil { t.Fatal("Expected an error, but got nil") } expected := "request matchers may not be defined globally, they must be in a site block; found @rejected, at Testfile:2" if err.Error() != expected { t.Errorf("Expected error to be '%s' but got '%v'", expected, err) } } func TestRejectAnonymousImportBlock(t *testing.T) { p := testParser(` (site) { http://{args[0]} https://{args[0]} { {block} } } import site test.domain { { header_up Host {host} header_up X-Real-IP {remote_host} } } `) _, err := p.parseAll() if err == nil { t.Fatal("Expected an error, but got nil") } expected := "anonymous blocks are not supported" if !strings.HasPrefix(err.Error(), "anonymous blocks are not supported") { t.Errorf("Expected error to start with '%s' but got '%v'", expected, err) } } func TestAcceptSiteImportWithBraces(t *testing.T) { p := testParser(` (site) { http://{args[0]} https://{args[0]} { {block} } } import site test.domain { reverse_proxy http://192.168.1.1:8080 { header_up Host {host} } } `) _, err := p.parseAll() if err != nil { t.Errorf("Expected error to be nil but got '%v'", err) } } func testParser(input string) parser { return parser{Dispenser: NewTestDispenser(input)} } ================================================ FILE: caddyconfig/caddyfile/testdata/empty.txt ================================================ ================================================ FILE: caddyconfig/caddyfile/testdata/glob/.dotfile.txt ================================================ host1 { dir1 dir2 arg1 } ================================================ FILE: caddyconfig/caddyfile/testdata/glob/import_test1.txt ================================================ dir2 arg1 arg2 dir3 ================================================ FILE: caddyconfig/caddyfile/testdata/import_args0.txt ================================================ {args[0]} ================================================ FILE: caddyconfig/caddyfile/testdata/import_args1.txt ================================================ {args[0]} {args[1]} ================================================ FILE: caddyconfig/caddyfile/testdata/import_glob0.txt ================================================ glob0.host0 { dir2 arg1 } glob0.host1 { } ================================================ FILE: caddyconfig/caddyfile/testdata/import_glob1.txt ================================================ glob1.host0 { dir1 dir2 arg1 } ================================================ FILE: caddyconfig/caddyfile/testdata/import_glob2.txt ================================================ glob2.host0 { dir2 arg1 } ================================================ FILE: caddyconfig/caddyfile/testdata/import_recursive0.txt ================================================ import import_recursive0.txt ================================================ FILE: caddyconfig/caddyfile/testdata/import_recursive1.txt ================================================ import import_recursive2.txt ================================================ FILE: caddyconfig/caddyfile/testdata/import_recursive2.txt ================================================ import import_recursive3.txt ================================================ FILE: caddyconfig/caddyfile/testdata/import_recursive3.txt ================================================ import import_recursive1.txt ================================================ FILE: caddyconfig/caddyfile/testdata/import_test1.txt ================================================ dir2 arg1 arg2 dir3 ================================================ FILE: caddyconfig/caddyfile/testdata/import_test2.txt ================================================ host1 { dir1 dir2 arg1 } ================================================ FILE: caddyconfig/caddyfile/testdata/only_white_space.txt ================================================   ================================================ FILE: caddyconfig/configadapters.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyconfig import ( "encoding/json" "fmt" "github.com/caddyserver/caddy/v2" ) // Adapter is a type which can adapt a configuration to Caddy JSON. // It returns the results and any warnings, or an error. type Adapter interface { Adapt(body []byte, options map[string]any) ([]byte, []Warning, error) } // Warning represents a warning or notice related to conversion. type Warning struct { File string `json:"file,omitempty"` Line int `json:"line,omitempty"` Directive string `json:"directive,omitempty"` Message string `json:"message,omitempty"` } func (w Warning) String() string { var directive string if w.Directive != "" { directive = fmt.Sprintf(" (%s)", w.Directive) } return fmt.Sprintf("%s:%d%s: %s", w.File, w.Line, directive, w.Message) } // JSON encodes val as JSON, returning it as a json.RawMessage. Any // marshaling errors (which are highly unlikely with correct code) // are converted to warnings. This is convenient when filling config // structs that require a json.RawMessage, without having to worry // about errors. func JSON(val any, warnings *[]Warning) json.RawMessage { b, err := json.Marshal(val) if err != nil { if warnings != nil { *warnings = append(*warnings, Warning{Message: err.Error()}) } return nil } return b } // JSONModuleObject is like JSON(), except it marshals val into a JSON object // with an added key named fieldName with the value fieldVal. This is useful // for encoding module values where the module name has to be described within // the object by a certain key; for example, `"handler": "file_server"` for a // file server HTTP handler (fieldName="handler" and fieldVal="file_server"). // The val parameter must encode into a map[string]any (i.e. it must be // a struct or map). Any errors are converted into warnings. func JSONModuleObject(val any, fieldName, fieldVal string, warnings *[]Warning) json.RawMessage { // encode to a JSON object first enc, err := json.Marshal(val) if err != nil { if warnings != nil { *warnings = append(*warnings, Warning{Message: err.Error()}) } return nil } // then decode the object var tmp map[string]any err = json.Unmarshal(enc, &tmp) if err != nil { if warnings != nil { message := err.Error() if jsonErr, ok := err.(*json.SyntaxError); ok { message = fmt.Sprintf("%v, at offset %d", jsonErr.Error(), jsonErr.Offset) } *warnings = append(*warnings, Warning{Message: message}) } return nil } // so we can easily add the module's field with its appointed value tmp[fieldName] = fieldVal // then re-marshal as JSON result, err := json.Marshal(tmp) if err != nil { if warnings != nil { *warnings = append(*warnings, Warning{Message: err.Error()}) } return nil } return result } // RegisterAdapter registers a config adapter with the given name. // This should usually be done at init-time. It panics if the // adapter cannot be registered successfully. func RegisterAdapter(name string, adapter Adapter) { if _, ok := configAdapters[name]; ok { panic(fmt.Errorf("%s: already registered", name)) } configAdapters[name] = adapter caddy.RegisterModule(adapterModule{name, adapter}) } // GetAdapter returns the adapter with the given name, // or nil if one with that name is not registered. func GetAdapter(name string) Adapter { return configAdapters[name] } // adapterModule is a wrapper type that can turn any config // adapter into a Caddy module, which has the benefit of being // counted with other modules, even though they do not // technically extend the Caddy configuration structure. // See caddyserver/caddy#3132. type adapterModule struct { name string Adapter } func (am adapterModule) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: caddy.ModuleID("caddy.adapters." + am.name), New: func() caddy.Module { return am }, } } var configAdapters = make(map[string]Adapter) ================================================ FILE: caddyconfig/httpcaddyfile/addresses.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httpcaddyfile import ( "fmt" "net" "net/netip" "reflect" "sort" "strconv" "strings" "unicode" "github.com/caddyserver/certmagic" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) // mapAddressToProtocolToServerBlocks returns a map of listener address to list of server // blocks that will be served on that address. To do this, each server block is // expanded so that each one is considered individually, although keys of a // server block that share the same address stay grouped together so the config // isn't repeated unnecessarily. For example, this Caddyfile: // // example.com { // bind 127.0.0.1 // } // www.example.com, example.net/path, localhost:9999 { // bind 127.0.0.1 1.2.3.4 // } // // has two server blocks to start with. But expressed in this Caddyfile are // actually 4 listener addresses: 127.0.0.1:443, 1.2.3.4:443, 127.0.0.1:9999, // and 127.0.0.1:9999. This is because the bind directive is applied to each // key of its server block (specifying the host part), and each key may have // a different port. And we definitely need to be sure that a site which is // bound to be served on a specific interface is not served on others just // because that is more convenient: it would be a potential security risk // if the difference between interfaces means private vs. public. // // So what this function does for the example above is iterate each server // block, and for each server block, iterate its keys. For the first, it // finds one key (example.com) and determines its listener address // (127.0.0.1:443 - because of 'bind' and automatic HTTPS). It then adds // the listener address to the map value returned by this function, with // the first server block as one of its associations. // // It then iterates each key on the second server block and associates them // with one or more listener addresses. Indeed, each key in this block has // two listener addresses because of the 'bind' directive. Once we know // which addresses serve which keys, we can create a new server block for // each address containing the contents of the server block and only those // specific keys of the server block which use that address. // // It is possible and even likely that some keys in the returned map have // the exact same list of server blocks (i.e. they are identical). This // happens when multiple hosts are declared with a 'bind' directive and // the resulting listener addresses are not shared by any other server // block (or the other server blocks are exactly identical in their token // contents). This happens with our example above because 1.2.3.4:443 // and 1.2.3.4:9999 are used exclusively with the second server block. This // repetition may be undesirable, so call consolidateAddrMappings() to map // multiple addresses to the same lists of server blocks (a many:many mapping). // (Doing this is essentially a map-reduce technique.) func (st *ServerType) mapAddressToProtocolToServerBlocks(originalServerBlocks []serverBlock, options map[string]any, ) (map[string]map[string][]serverBlock, error) { addrToProtocolToServerBlocks := map[string]map[string][]serverBlock{} type keyWithParsedKey struct { key caddyfile.Token parsedKey Address } for i, sblock := range originalServerBlocks { // within a server block, we need to map all the listener addresses // implied by the server block to the keys of the server block which // will be served by them; this has the effect of treating each // key of a server block as its own, but without having to repeat its // contents in cases where multiple keys really can be served together addrToProtocolToKeyWithParsedKeys := map[string]map[string][]keyWithParsedKey{} for j, key := range sblock.block.Keys { parsedKey, err := ParseAddress(key.Text) if err != nil { return nil, fmt.Errorf("parsing key: %v", err) } parsedKey = parsedKey.Normalize() // a key can have multiple listener addresses if there are multiple // arguments to the 'bind' directive (although they will all have // the same port, since the port is defined by the key or is implicit // through automatic HTTPS) listeners, err := st.listenersForServerBlockAddress(sblock, parsedKey, options) if err != nil { return nil, fmt.Errorf("server block %d, key %d (%s): determining listener address: %v", i, j, key.Text, err) } // associate this key with its protocols and each listener address served with them kwpk := keyWithParsedKey{key, parsedKey} for addr, protocols := range listeners { protocolToKeyWithParsedKeys, ok := addrToProtocolToKeyWithParsedKeys[addr] if !ok { protocolToKeyWithParsedKeys = map[string][]keyWithParsedKey{} addrToProtocolToKeyWithParsedKeys[addr] = protocolToKeyWithParsedKeys } // an empty protocol indicates the default, a nil or empty value in the ListenProtocols array if len(protocols) == 0 { protocols[""] = struct{}{} } for prot := range protocols { protocolToKeyWithParsedKeys[prot] = append( protocolToKeyWithParsedKeys[prot], kwpk) } } } // make a slice of the map keys so we can iterate in sorted order addrs := make([]string, 0, len(addrToProtocolToKeyWithParsedKeys)) for addr := range addrToProtocolToKeyWithParsedKeys { addrs = append(addrs, addr) } sort.Strings(addrs) // now that we know which addresses serve which keys of this // server block, we iterate that mapping and create a list of // new server blocks for each address where the keys of the // server block are only the ones which use the address; but // the contents (tokens) are of course the same for _, addr := range addrs { protocolToKeyWithParsedKeys := addrToProtocolToKeyWithParsedKeys[addr] prots := make([]string, 0, len(protocolToKeyWithParsedKeys)) for prot := range protocolToKeyWithParsedKeys { prots = append(prots, prot) } sort.Strings(prots) protocolToServerBlocks, ok := addrToProtocolToServerBlocks[addr] if !ok { protocolToServerBlocks = map[string][]serverBlock{} addrToProtocolToServerBlocks[addr] = protocolToServerBlocks } for _, prot := range prots { keyWithParsedKeys := protocolToKeyWithParsedKeys[prot] keys := make([]caddyfile.Token, len(keyWithParsedKeys)) parsedKeys := make([]Address, len(keyWithParsedKeys)) for k, keyWithParsedKey := range keyWithParsedKeys { keys[k] = keyWithParsedKey.key parsedKeys[k] = keyWithParsedKey.parsedKey } protocolToServerBlocks[prot] = append(protocolToServerBlocks[prot], serverBlock{ block: caddyfile.ServerBlock{ Keys: keys, Segments: sblock.block.Segments, }, pile: sblock.pile, parsedKeys: parsedKeys, }) } } } return addrToProtocolToServerBlocks, nil } // consolidateAddrMappings eliminates repetition of identical server blocks in a mapping of // single listener addresses to protocols to lists of server blocks. Since multiple addresses // may serve multiple protocols to identical sites (server block contents), this function turns // a 1:many mapping into a many:many mapping. Server block contents (tokens) must be // exactly identical so that reflect.DeepEqual returns true in order for the addresses to be combined. // Identical entries are deleted from the addrToServerBlocks map. Essentially, each pairing (each // association from multiple addresses to multiple server blocks; i.e. each element of // the returned slice) becomes a server definition in the output JSON. func (st *ServerType) consolidateAddrMappings(addrToProtocolToServerBlocks map[string]map[string][]serverBlock) []sbAddrAssociation { sbaddrs := make([]sbAddrAssociation, 0, len(addrToProtocolToServerBlocks)) addrs := make([]string, 0, len(addrToProtocolToServerBlocks)) for addr := range addrToProtocolToServerBlocks { addrs = append(addrs, addr) } sort.Strings(addrs) for _, addr := range addrs { protocolToServerBlocks := addrToProtocolToServerBlocks[addr] prots := make([]string, 0, len(protocolToServerBlocks)) for prot := range protocolToServerBlocks { prots = append(prots, prot) } sort.Strings(prots) for _, prot := range prots { serverBlocks := protocolToServerBlocks[prot] // now find other addresses that map to identical // server blocks and add them to our map of listener // addresses and protocols, while removing them from // the original map listeners := map[string]map[string]struct{}{} for otherAddr, otherProtocolToServerBlocks := range addrToProtocolToServerBlocks { for otherProt, otherServerBlocks := range otherProtocolToServerBlocks { if addr == otherAddr && prot == otherProt || reflect.DeepEqual(serverBlocks, otherServerBlocks) { listener, ok := listeners[otherAddr] if !ok { listener = map[string]struct{}{} listeners[otherAddr] = listener } listener[otherProt] = struct{}{} delete(otherProtocolToServerBlocks, otherProt) } } } addresses := make([]string, 0, len(listeners)) for lnAddr := range listeners { addresses = append(addresses, lnAddr) } sort.Strings(addresses) addressesWithProtocols := make([]addressWithProtocols, 0, len(listeners)) for _, lnAddr := range addresses { lnProts := listeners[lnAddr] prots := make([]string, 0, len(lnProts)) for prot := range lnProts { prots = append(prots, prot) } sort.Strings(prots) addressesWithProtocols = append(addressesWithProtocols, addressWithProtocols{ address: lnAddr, protocols: prots, }) } sbaddrs = append(sbaddrs, sbAddrAssociation{ addressesWithProtocols: addressesWithProtocols, serverBlocks: serverBlocks, }) } } return sbaddrs } // listenersForServerBlockAddress essentially converts the Caddyfile site addresses to a map from // Caddy listener addresses and the protocols to serve them with to the parsed address for each server block. func (st *ServerType) listenersForServerBlockAddress(sblock serverBlock, addr Address, options map[string]any, ) (map[string]map[string]struct{}, error) { switch addr.Scheme { case "wss": return nil, fmt.Errorf("the scheme wss:// is only supported in browsers; use https:// instead") case "ws": return nil, fmt.Errorf("the scheme ws:// is only supported in browsers; use http:// instead") case "https", "http", "": // Do nothing or handle the valid schemes default: return nil, fmt.Errorf("unsupported URL scheme %s://", addr.Scheme) } // figure out the HTTP and HTTPS ports; either // use defaults, or override with user config httpPort, httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPPort), strconv.Itoa(caddyhttp.DefaultHTTPSPort) if hport, ok := options["http_port"]; ok { httpPort = strconv.Itoa(hport.(int)) } if hsport, ok := options["https_port"]; ok { httpsPort = strconv.Itoa(hsport.(int)) } // default port is the HTTPS port lnPort := httpsPort if addr.Port != "" { // port explicitly defined lnPort = addr.Port } else if addr.Scheme == "http" { // port inferred from scheme lnPort = httpPort } // error if scheme and port combination violate convention if (addr.Scheme == "http" && lnPort == httpsPort) || (addr.Scheme == "https" && lnPort == httpPort) { return nil, fmt.Errorf("[%s] scheme and port violate convention", addr.String()) } // the bind directive specifies hosts (and potentially network), and the protocols to serve them with, but is optional lnCfgVals := make([]addressesWithProtocols, 0, len(sblock.pile["bind"])) for _, cfgVal := range sblock.pile["bind"] { if val, ok := cfgVal.Value.(addressesWithProtocols); ok { lnCfgVals = append(lnCfgVals, val) } } if len(lnCfgVals) == 0 { if defaultBindValues, ok := options["default_bind"].([]ConfigValue); ok { for _, defaultBindValue := range defaultBindValues { lnCfgVals = append(lnCfgVals, defaultBindValue.Value.(addressesWithProtocols)) } } else { lnCfgVals = []addressesWithProtocols{{ addresses: []string{""}, protocols: nil, }} } } // use a map to prevent duplication listeners := map[string]map[string]struct{}{} for _, lnCfgVal := range lnCfgVals { for _, lnAddr := range lnCfgVal.addresses { lnNetw, lnHost, _, err := caddy.SplitNetworkAddress(lnAddr) if err != nil { return nil, fmt.Errorf("splitting listener address: %v", err) } networkAddr, err := caddy.ParseNetworkAddress(caddy.JoinNetworkAddress(lnNetw, lnHost, lnPort)) if err != nil { return nil, fmt.Errorf("parsing network address: %v", err) } if _, ok := listeners[addr.String()]; !ok { listeners[networkAddr.String()] = map[string]struct{}{} } for _, protocol := range lnCfgVal.protocols { listeners[networkAddr.String()][protocol] = struct{}{} } } } return listeners, nil } // addressesWithProtocols associates a list of listen addresses // with a list of protocols to serve them with type addressesWithProtocols struct { addresses []string protocols []string } // Address represents a site address. It contains // the original input value, and the component // parts of an address. The component parts may be // updated to the correct values as setup proceeds, // but the original value should never be changed. // // The Host field must be in a normalized form. type Address struct { Original, Scheme, Host, Port, Path string } // ParseAddress parses an address string into a structured format with separate // scheme, host, port, and path portions, as well as the original input string. func ParseAddress(str string) (Address, error) { const maxLen = 4096 if len(str) > maxLen { str = str[:maxLen] } remaining := strings.TrimSpace(str) a := Address{Original: remaining} // extract scheme splitScheme := strings.SplitN(remaining, "://", 2) switch len(splitScheme) { case 0: return a, nil case 1: remaining = splitScheme[0] case 2: a.Scheme = splitScheme[0] remaining = splitScheme[1] } // extract host and port hostSplit := strings.SplitN(remaining, "/", 2) if len(hostSplit) > 0 { host, port, err := net.SplitHostPort(hostSplit[0]) if err != nil { host, port, err = net.SplitHostPort(hostSplit[0] + ":") if err != nil { host = hostSplit[0] } } a.Host = host a.Port = port } if len(hostSplit) == 2 { // all that remains is the path a.Path = "/" + hostSplit[1] } // make sure port is valid if a.Port != "" { if portNum, err := strconv.Atoi(a.Port); err != nil { return Address{}, fmt.Errorf("invalid port '%s': %v", a.Port, err) } else if portNum < 0 || portNum > 65535 { return Address{}, fmt.Errorf("port %d is out of range", portNum) } } return a, nil } // String returns a human-readable form of a. It will // be a cleaned-up and filled-out URL string. func (a Address) String() string { if a.Host == "" && a.Port == "" { return "" } scheme := a.Scheme if scheme == "" { if a.Port == strconv.Itoa(certmagic.HTTPSPort) { scheme = "https" } else { scheme = "http" } } s := scheme if s != "" { s += "://" } if a.Port != "" && ((scheme == "https" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort)) || (scheme == "http" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort))) { s += net.JoinHostPort(a.Host, a.Port) } else { s += a.Host } if a.Path != "" { s += a.Path } return s } // Normalize returns a normalized version of a. func (a Address) Normalize() Address { path := a.Path // ensure host is normalized if it's an IP address host := strings.TrimSpace(a.Host) if ip, err := netip.ParseAddr(host); err == nil { if ip.Is6() && !ip.Is4() && !ip.Is4In6() { host = ip.String() } } return Address{ Original: a.Original, Scheme: lowerExceptPlaceholders(a.Scheme), Host: lowerExceptPlaceholders(host), Port: a.Port, Path: path, } } // lowerExceptPlaceholders lowercases s except within // placeholders (substrings in non-escaped '{ }' spans). // See https://github.com/caddyserver/caddy/issues/3264 func lowerExceptPlaceholders(s string) string { var sb strings.Builder var escaped, inPlaceholder bool for _, ch := range s { if ch == '\\' && !escaped { escaped = true sb.WriteRune(ch) continue } if ch == '{' && !escaped { inPlaceholder = true } if ch == '}' && inPlaceholder && !escaped { inPlaceholder = false } if inPlaceholder { sb.WriteRune(ch) } else { sb.WriteRune(unicode.ToLower(ch)) } escaped = false } return sb.String() } ================================================ FILE: caddyconfig/httpcaddyfile/addresses_fuzz.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build gofuzz package httpcaddyfile func FuzzParseAddress(data []byte) int { addr, err := ParseAddress(string(data)) if err != nil { if addr == (Address{}) { return 1 } return 0 } return 1 } ================================================ FILE: caddyconfig/httpcaddyfile/addresses_test.go ================================================ package httpcaddyfile import ( "testing" ) func TestParseAddress(t *testing.T) { for i, test := range []struct { input string scheme, host, port, path string shouldErr bool }{ {``, "", "", "", "", false}, {`localhost`, "", "localhost", "", "", false}, {`localhost:1234`, "", "localhost", "1234", "", false}, {`localhost:`, "", "localhost", "", "", false}, {`0.0.0.0`, "", "0.0.0.0", "", "", false}, {`127.0.0.1:1234`, "", "127.0.0.1", "1234", "", false}, {`:1234`, "", "", "1234", "", false}, {`[::1]`, "", "::1", "", "", false}, {`[::1]:1234`, "", "::1", "1234", "", false}, {`:`, "", "", "", "", false}, {`:http`, "", "", "", "", true}, {`:https`, "", "", "", "", true}, {`localhost:http`, "", "", "", "", true}, // using service name in port is verboten, as of Go 1.12.8 {`localhost:https`, "", "", "", "", true}, {`http://localhost:https`, "", "", "", "", true}, // conflict {`http://localhost:http`, "", "", "", "", true}, // repeated scheme {`host:https/path`, "", "", "", "", true}, {`http://localhost:443`, "http", "localhost", "443", "", false}, // NOTE: not conventional {`https://localhost:80`, "https", "localhost", "80", "", false}, // NOTE: not conventional {`http://localhost`, "http", "localhost", "", "", false}, {`https://localhost`, "https", "localhost", "", "", false}, {`http://{env.APP_DOMAIN}`, "http", "{env.APP_DOMAIN}", "", "", false}, {`{env.APP_DOMAIN}:80`, "", "{env.APP_DOMAIN}", "80", "", false}, {`{env.APP_DOMAIN}/path`, "", "{env.APP_DOMAIN}", "", "/path", false}, {`example.com/{env.APP_PATH}`, "", "example.com", "", "/{env.APP_PATH}", false}, {`http://127.0.0.1`, "http", "127.0.0.1", "", "", false}, {`https://127.0.0.1`, "https", "127.0.0.1", "", "", false}, {`http://[::1]`, "http", "::1", "", "", false}, {`http://localhost:1234`, "http", "localhost", "1234", "", false}, {`https://127.0.0.1:1234`, "https", "127.0.0.1", "1234", "", false}, {`http://[::1]:1234`, "http", "::1", "1234", "", false}, {``, "", "", "", "", false}, {`::1`, "", "::1", "", "", false}, {`localhost::`, "", "localhost::", "", "", false}, {`#$%@`, "", "#$%@", "", "", false}, // don't want to presume what the hostname could be {`host/path`, "", "host", "", "/path", false}, {`http://host/`, "http", "host", "", "/", false}, {`//asdf`, "", "", "", "//asdf", false}, {`:1234/asdf`, "", "", "1234", "/asdf", false}, {`http://host/path`, "http", "host", "", "/path", false}, {`https://host:443/path/foo`, "https", "host", "443", "/path/foo", false}, {`host:80/path`, "", "host", "80", "/path", false}, {`/path`, "", "", "", "/path", false}, } { actual, err := ParseAddress(test.input) if err != nil && !test.shouldErr { t.Errorf("Test %d (%s): Expected no error, but had error: %v", i, test.input, err) } if err == nil && test.shouldErr { t.Errorf("Test %d (%s): Expected error, but had none (%#v)", i, test.input, actual) } if !test.shouldErr && actual.Original != test.input { t.Errorf("Test %d (%s): Expected original '%s', got '%s'", i, test.input, test.input, actual.Original) } if actual.Scheme != test.scheme { t.Errorf("Test %d (%s): Expected scheme '%s', got '%s'", i, test.input, test.scheme, actual.Scheme) } if actual.Host != test.host { t.Errorf("Test %d (%s): Expected host '%s', got '%s'", i, test.input, test.host, actual.Host) } if actual.Port != test.port { t.Errorf("Test %d (%s): Expected port '%s', got '%s'", i, test.input, test.port, actual.Port) } if actual.Path != test.path { t.Errorf("Test %d (%s): Expected path '%s', got '%s'", i, test.input, test.path, actual.Path) } } } func TestAddressString(t *testing.T) { for i, test := range []struct { addr Address expected string }{ {Address{Scheme: "http", Host: "host", Port: "1234", Path: "/path"}, "http://host:1234/path"}, {Address{Scheme: "", Host: "host", Port: "", Path: ""}, "http://host"}, {Address{Scheme: "", Host: "host", Port: "80", Path: ""}, "http://host"}, {Address{Scheme: "", Host: "host", Port: "443", Path: ""}, "https://host"}, {Address{Scheme: "https", Host: "host", Port: "443", Path: ""}, "https://host"}, {Address{Scheme: "https", Host: "host", Port: "", Path: ""}, "https://host"}, {Address{Scheme: "", Host: "host", Port: "80", Path: "/path"}, "http://host/path"}, {Address{Scheme: "http", Host: "", Port: "1234", Path: ""}, "http://:1234"}, {Address{Scheme: "", Host: "", Port: "", Path: ""}, ""}, } { actual := test.addr.String() if actual != test.expected { t.Errorf("Test %d: expected '%s' but got '%s'", i, test.expected, actual) } } } func TestKeyNormalization(t *testing.T) { testCases := []struct { input string expect Address }{ { input: "example.com", expect: Address{ Host: "example.com", }, }, { input: "http://host:1234/path", expect: Address{ Scheme: "http", Host: "host", Port: "1234", Path: "/path", }, }, { input: "HTTP://A/ABCDEF", expect: Address{ Scheme: "http", Host: "a", Path: "/ABCDEF", }, }, { input: "A/ABCDEF", expect: Address{ Host: "a", Path: "/ABCDEF", }, }, { input: "A:2015/Path", expect: Address{ Host: "a", Port: "2015", Path: "/Path", }, }, { input: "sub.{env.MY_DOMAIN}", expect: Address{ Host: "sub.{env.MY_DOMAIN}", }, }, { input: "sub.ExAmPle", expect: Address{ Host: "sub.example", }, }, { input: "sub.\\{env.MY_DOMAIN\\}", expect: Address{ Host: "sub.\\{env.my_domain\\}", }, }, { input: "sub.{env.MY_DOMAIN}.com", expect: Address{ Host: "sub.{env.MY_DOMAIN}.com", }, }, { input: ":80", expect: Address{ Port: "80", }, }, { input: ":443", expect: Address{ Port: "443", }, }, { input: ":1234", expect: Address{ Port: "1234", }, }, { input: "", expect: Address{}, }, { input: ":", expect: Address{}, }, { input: "[::]", expect: Address{ Host: "::", }, }, { input: "127.0.0.1", expect: Address{ Host: "127.0.0.1", }, }, { input: "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1234", expect: Address{ Host: "2001:db8:85a3:8d3:1319:8a2e:370:7348", Port: "1234", }, }, { // IPv4 address in IPv6 form (#4381) input: "[::ffff:cff4:e77d]:1234", expect: Address{ Host: "::ffff:cff4:e77d", Port: "1234", }, }, { input: "::ffff:cff4:e77d", expect: Address{ Host: "::ffff:cff4:e77d", }, }, } for i, tc := range testCases { addr, err := ParseAddress(tc.input) if err != nil { t.Errorf("Test %d: Parsing address '%s': %v", i, tc.input, err) continue } actual := addr.Normalize() if actual.Scheme != tc.expect.Scheme { t.Errorf("Test %d: Input '%s': Expected Scheme='%s' but got Scheme='%s'", i, tc.input, tc.expect.Scheme, actual.Scheme) } if actual.Host != tc.expect.Host { t.Errorf("Test %d: Input '%s': Expected Host='%s' but got Host='%s'", i, tc.input, tc.expect.Host, actual.Host) } if actual.Port != tc.expect.Port { t.Errorf("Test %d: Input '%s': Expected Port='%s' but got Port='%s'", i, tc.input, tc.expect.Port, actual.Port) } if actual.Path != tc.expect.Path { t.Errorf("Test %d: Input '%s': Expected Path='%s' but got Path='%s'", i, tc.input, tc.expect.Path, actual.Path) } } } ================================================ FILE: caddyconfig/httpcaddyfile/builtins.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httpcaddyfile import ( "encoding/json" "fmt" "html" "net/http" "reflect" "strconv" "strings" "time" "github.com/caddyserver/certmagic" "github.com/mholt/acmez/v3/acme" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddytls" ) func init() { RegisterDirective("bind", parseBind) RegisterDirective("tls", parseTLS) RegisterHandlerDirective("fs", parseFilesystem) RegisterDirective("root", parseRoot) RegisterHandlerDirective("vars", parseVars) RegisterHandlerDirective("redir", parseRedir) RegisterHandlerDirective("respond", parseRespond) RegisterHandlerDirective("abort", parseAbort) RegisterHandlerDirective("error", parseError) RegisterHandlerDirective("route", parseRoute) RegisterHandlerDirective("handle", parseHandle) RegisterDirective("handle_errors", parseHandleErrors) RegisterHandlerDirective("invoke", parseInvoke) RegisterDirective("log", parseLog) RegisterHandlerDirective("skip_log", parseLogSkip) RegisterHandlerDirective("log_skip", parseLogSkip) RegisterHandlerDirective("log_name", parseLogName) } // parseBind parses the bind directive. Syntax: // // bind [{ // protocols [h1|h2|h2c|h3] [...] // }] func parseBind(h Helper) ([]ConfigValue, error) { h.Next() // consume directive name var addresses, protocols []string addresses = h.RemainingArgs() for h.NextBlock(0) { switch h.Val() { case "protocols": protocols = h.RemainingArgs() if len(protocols) == 0 { return nil, h.Errf("protocols requires one or more arguments") } default: return nil, h.Errf("unknown subdirective: %s", h.Val()) } } return []ConfigValue{{Class: "bind", Value: addressesWithProtocols{ addresses: addresses, protocols: protocols, }}}, nil } // parseTLS parses the tls directive. Syntax: // // tls [|internal|force_automate]|[ ] { // protocols [] // ciphers // curves // client_auth { // mode [request|require|verify_if_given|require_and_verify] // trust_pool [...] // trusted_leaf_cert // trusted_leaf_cert_file // } // alpn // load // ca // ca_root // key_type [ed25519|p256|p384|rsa2048|rsa4096] // dns [ [...]] (required, though, if DNS is not configured as global option) // propagation_delay // propagation_timeout // resolvers // dns_ttl // dns_challenge_override_domain // on_demand // reuse_private_keys // force_automate // eab // issuer [...] // get_certificate [...] // insecure_secrets_log // renewal_window_ratio // } func parseTLS(h Helper) ([]ConfigValue, error) { h.Next() // consume directive name cp := new(caddytls.ConnectionPolicy) var fileLoader caddytls.FileLoader var folderLoader caddytls.FolderLoader var certSelector caddytls.CustomCertSelectionPolicy var acmeIssuer *caddytls.ACMEIssuer var keyType string var internalIssuer *caddytls.InternalIssuer var issuers []certmagic.Issuer var certManagers []certmagic.Manager var onDemand bool var reusePrivateKeys bool var forceAutomate bool var renewalWindowRatio float64 // Track which DNS challenge options are set var dnsOptionsSet []string firstLine := h.RemainingArgs() switch len(firstLine) { case 0: case 1: if firstLine[0] == "internal" { internalIssuer = new(caddytls.InternalIssuer) } else if firstLine[0] == "force_automate" { forceAutomate = true } else if !strings.Contains(firstLine[0], "@") { return nil, h.Err("single argument must either be 'internal', 'force_automate', or an email address") } else { acmeIssuer = &caddytls.ACMEIssuer{ Email: firstLine[0], } } case 2: // file certificate loader certFilename := firstLine[0] keyFilename := firstLine[1] // tag this certificate so if multiple certs match, specifically // this one that the user has provided will be used, see #2588: // https://github.com/caddyserver/caddy/issues/2588 ... but we // must be careful about how we do this; being careless will // lead to failed handshakes // // we need to remember which cert files we've seen, since we // must load each cert only once; otherwise, they each get a // different tag... since a cert loaded twice has the same // bytes, it will overwrite the first one in the cache, and // only the last cert (and its tag) will survive, so any conn // policy that is looking for any tag other than the last one // to be loaded won't find it, and TLS handshakes will fail // (see end of issue #3004) // // tlsCertTags maps certificate filenames to their tag. // This is used to remember which tag is used for each // certificate files, since we need to avoid loading // the same certificate files more than once, overwriting // previous tags tlsCertTags, ok := h.State["tlsCertTags"].(map[string]string) if !ok { tlsCertTags = make(map[string]string) h.State["tlsCertTags"] = tlsCertTags } tag, ok := tlsCertTags[certFilename] if !ok { // haven't seen this cert file yet, let's give it a tag // and add a loader for it tag = fmt.Sprintf("cert%d", len(tlsCertTags)) fileLoader = append(fileLoader, caddytls.CertKeyFilePair{ Certificate: certFilename, Key: keyFilename, Tags: []string{tag}, }) // remember this for next time we see this cert file tlsCertTags[certFilename] = tag } certSelector.AnyTag = append(certSelector.AnyTag, tag) default: return nil, h.ArgErr() } var hasBlock bool for h.NextBlock(0) { hasBlock = true switch h.Val() { case "protocols": args := h.RemainingArgs() if len(args) == 0 { return nil, h.Errf("protocols requires one or two arguments") } if len(args) > 0 { if _, ok := caddytls.SupportedProtocols[args[0]]; !ok { return nil, h.Errf("wrong protocol name or protocol not supported: '%s'", args[0]) } cp.ProtocolMin = args[0] } if len(args) > 1 { if _, ok := caddytls.SupportedProtocols[args[1]]; !ok { return nil, h.Errf("wrong protocol name or protocol not supported: '%s'", args[1]) } cp.ProtocolMax = args[1] } case "ciphers": for h.NextArg() { if !caddytls.CipherSuiteNameSupported(h.Val()) { return nil, h.Errf("wrong cipher suite name or cipher suite not supported: '%s'", h.Val()) } cp.CipherSuites = append(cp.CipherSuites, h.Val()) } case "curves": for h.NextArg() { if _, ok := caddytls.SupportedCurves[h.Val()]; !ok { return nil, h.Errf("Wrong curve name or curve not supported: '%s'", h.Val()) } cp.Curves = append(cp.Curves, h.Val()) } case "client_auth": cp.ClientAuthentication = &caddytls.ClientAuthentication{} if err := cp.ClientAuthentication.UnmarshalCaddyfile(h.NewFromNextSegment()); err != nil { return nil, err } case "alpn": args := h.RemainingArgs() if len(args) == 0 { return nil, h.ArgErr() } cp.ALPN = args case "load": folderLoader = append(folderLoader, h.RemainingArgs()...) case "ca": arg := h.RemainingArgs() if len(arg) != 1 { return nil, h.ArgErr() } if acmeIssuer == nil { acmeIssuer = new(caddytls.ACMEIssuer) } acmeIssuer.CA = arg[0] case "key_type": arg := h.RemainingArgs() if len(arg) != 1 { return nil, h.ArgErr() } keyType = arg[0] case "eab": arg := h.RemainingArgs() if len(arg) != 2 { return nil, h.ArgErr() } if acmeIssuer == nil { acmeIssuer = new(caddytls.ACMEIssuer) } acmeIssuer.ExternalAccount = &acme.EAB{ KeyID: arg[0], MACKey: arg[1], } case "issuer": if !h.NextArg() { return nil, h.ArgErr() } modName := h.Val() modID := "tls.issuance." + modName unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID) if err != nil { return nil, err } issuer, ok := unm.(certmagic.Issuer) if !ok { return nil, h.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm) } issuers = append(issuers, issuer) case "get_certificate": if !h.NextArg() { return nil, h.ArgErr() } modName := h.Val() modID := "tls.get_certificate." + modName unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID) if err != nil { return nil, err } certManager, ok := unm.(certmagic.Manager) if !ok { return nil, h.Errf("module %s (%T) is not a certmagic.CertificateManager", modID, unm) } certManagers = append(certManagers, certManager) case "dns": if acmeIssuer == nil { acmeIssuer = new(caddytls.ACMEIssuer) } if acmeIssuer.Challenges == nil { acmeIssuer.Challenges = new(caddytls.ChallengesConfig) } if acmeIssuer.Challenges.DNS == nil { acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig) } // DNS provider configuration optional, since it may be configured globally via the TLS app with global options if h.NextArg() { provName := h.Val() modID := "dns.providers." + provName unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID) if err != nil { return nil, err } acmeIssuer.Challenges.DNS.ProviderRaw = caddyconfig.JSONModuleObject(unm, "name", provName, h.warnings) } else if h.Option("dns") == nil { // if DNS is omitted locally, it needs to be configured globally return nil, h.ArgErr() } case "resolvers": args := h.RemainingArgs() if len(args) == 0 { return nil, h.ArgErr() } if acmeIssuer == nil { acmeIssuer = new(caddytls.ACMEIssuer) } if acmeIssuer.Challenges == nil { acmeIssuer.Challenges = new(caddytls.ChallengesConfig) } if acmeIssuer.Challenges.DNS == nil { acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig) } dnsOptionsSet = append(dnsOptionsSet, "resolvers") acmeIssuer.Challenges.DNS.Resolvers = args case "propagation_delay": arg := h.RemainingArgs() if len(arg) != 1 { return nil, h.ArgErr() } delayStr := arg[0] delay, err := caddy.ParseDuration(delayStr) if err != nil { return nil, h.Errf("invalid propagation_delay duration %s: %v", delayStr, err) } if acmeIssuer == nil { acmeIssuer = new(caddytls.ACMEIssuer) } if acmeIssuer.Challenges == nil { acmeIssuer.Challenges = new(caddytls.ChallengesConfig) } if acmeIssuer.Challenges.DNS == nil { acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig) } dnsOptionsSet = append(dnsOptionsSet, "propagation_delay") acmeIssuer.Challenges.DNS.PropagationDelay = caddy.Duration(delay) case "propagation_timeout": arg := h.RemainingArgs() if len(arg) != 1 { return nil, h.ArgErr() } timeoutStr := arg[0] var timeout time.Duration if timeoutStr == "-1" { timeout = time.Duration(-1) } else { var err error timeout, err = caddy.ParseDuration(timeoutStr) if err != nil { return nil, h.Errf("invalid propagation_timeout duration %s: %v", timeoutStr, err) } } if acmeIssuer == nil { acmeIssuer = new(caddytls.ACMEIssuer) } if acmeIssuer.Challenges == nil { acmeIssuer.Challenges = new(caddytls.ChallengesConfig) } if acmeIssuer.Challenges.DNS == nil { acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig) } dnsOptionsSet = append(dnsOptionsSet, "propagation_timeout") acmeIssuer.Challenges.DNS.PropagationTimeout = caddy.Duration(timeout) case "dns_ttl": arg := h.RemainingArgs() if len(arg) != 1 { return nil, h.ArgErr() } ttlStr := arg[0] ttl, err := caddy.ParseDuration(ttlStr) if err != nil { return nil, h.Errf("invalid dns_ttl duration %s: %v", ttlStr, err) } if acmeIssuer == nil { acmeIssuer = new(caddytls.ACMEIssuer) } if acmeIssuer.Challenges == nil { acmeIssuer.Challenges = new(caddytls.ChallengesConfig) } if acmeIssuer.Challenges.DNS == nil { acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig) } dnsOptionsSet = append(dnsOptionsSet, "dns_ttl") acmeIssuer.Challenges.DNS.TTL = caddy.Duration(ttl) case "dns_challenge_override_domain": arg := h.RemainingArgs() if len(arg) != 1 { return nil, h.ArgErr() } if acmeIssuer == nil { acmeIssuer = new(caddytls.ACMEIssuer) } if acmeIssuer.Challenges == nil { acmeIssuer.Challenges = new(caddytls.ChallengesConfig) } if acmeIssuer.Challenges.DNS == nil { acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig) } dnsOptionsSet = append(dnsOptionsSet, "dns_challenge_override_domain") acmeIssuer.Challenges.DNS.OverrideDomain = arg[0] case "ca_root": arg := h.RemainingArgs() if len(arg) != 1 { return nil, h.ArgErr() } if acmeIssuer == nil { acmeIssuer = new(caddytls.ACMEIssuer) } acmeIssuer.TrustedRootsPEMFiles = append(acmeIssuer.TrustedRootsPEMFiles, arg[0]) case "on_demand": if h.NextArg() { return nil, h.ArgErr() } onDemand = true case "reuse_private_keys": if h.NextArg() { return nil, h.ArgErr() } reusePrivateKeys = true case "insecure_secrets_log": if !h.NextArg() { return nil, h.ArgErr() } cp.InsecureSecretsLog = h.Val() case "renewal_window_ratio": arg := h.RemainingArgs() if len(arg) != 1 { return nil, h.ArgErr() } ratio, err := strconv.ParseFloat(arg[0], 64) if err != nil { return nil, h.Errf("parsing renewal_window_ratio: %v", err) } if ratio <= 0 || ratio >= 1 { return nil, h.Errf("renewal_window_ratio must be between 0 and 1 (exclusive)") } renewalWindowRatio = ratio default: return nil, h.Errf("unknown subdirective: %s", h.Val()) } } // Validate DNS challenge config: any DNS challenge option except "dns" requires a DNS provider if acmeIssuer != nil && acmeIssuer.Challenges != nil && acmeIssuer.Challenges.DNS != nil { dnsCfg := acmeIssuer.Challenges.DNS providerSet := dnsCfg.ProviderRaw != nil || h.Option("dns") != nil || h.Option("acme_dns") != nil if len(dnsOptionsSet) > 0 && !providerSet { return nil, h.Errf( "setting DNS challenge options [%s] requires a DNS provider (set with the 'dns' subdirective or 'acme_dns' global option)", strings.Join(dnsOptionsSet, ", "), ) } } // a naked tls directive is not allowed if len(firstLine) == 0 && !hasBlock { return nil, h.ArgErr() } // begin building the final config values configVals := []ConfigValue{} // certificate loaders if len(fileLoader) > 0 { configVals = append(configVals, ConfigValue{ Class: "tls.cert_loader", Value: fileLoader, }) } if len(folderLoader) > 0 { configVals = append(configVals, ConfigValue{ Class: "tls.cert_loader", Value: folderLoader, }) } // some tls subdirectives are shortcuts that implicitly configure issuers, and the // user can also configure issuers explicitly using the issuer subdirective; the // logic to support both would likely be complex, or at least unintuitive if len(issuers) > 0 && (acmeIssuer != nil || internalIssuer != nil) { return nil, h.Err("cannot mix issuer subdirective (explicit issuers) with other issuer-specific subdirectives (implicit issuers)") } if acmeIssuer != nil && internalIssuer != nil { return nil, h.Err("cannot create both ACME and internal certificate issuers") } // now we should either have: explicitly-created issuers, or an implicitly-created // ACME or internal issuer, or no issuers at all switch { case len(issuers) > 0: for _, issuer := range issuers { configVals = append(configVals, ConfigValue{ Class: "tls.cert_issuer", Value: issuer, }) } case acmeIssuer != nil: // implicit ACME issuers (from various subdirectives) - use defaults; there might be more than one defaultIssuers := caddytls.DefaultIssuers(acmeIssuer.Email) // if an ACME CA endpoint was set, the user expects to use that specific one, // not any others that may be defaults, so replace all defaults with that ACME CA if acmeIssuer.CA != "" { defaultIssuers = []certmagic.Issuer{acmeIssuer} } for _, issuer := range defaultIssuers { // apply settings from the implicitly-configured ACMEIssuer to any // default ACMEIssuers, but preserve each default issuer's CA endpoint, // because, for example, if you configure the DNS challenge, it should // apply to any of the default ACMEIssuers, but you don't want to trample // out their unique CA endpoints if iss, ok := issuer.(*caddytls.ACMEIssuer); ok && iss != nil { acmeCopy := *acmeIssuer acmeCopy.CA = iss.CA issuer = &acmeCopy } configVals = append(configVals, ConfigValue{ Class: "tls.cert_issuer", Value: issuer, }) } case internalIssuer != nil: configVals = append(configVals, ConfigValue{ Class: "tls.cert_issuer", Value: internalIssuer, }) } // certificate key type if keyType != "" { configVals = append(configVals, ConfigValue{ Class: "tls.key_type", Value: keyType, }) } // on-demand TLS if onDemand { configVals = append(configVals, ConfigValue{ Class: "tls.on_demand", Value: true, }) } for _, certManager := range certManagers { configVals = append(configVals, ConfigValue{ Class: "tls.cert_manager", Value: certManager, }) } // reuse private keys TLS if reusePrivateKeys { configVals = append(configVals, ConfigValue{ Class: "tls.reuse_private_keys", Value: true, }) } // renewal window ratio if renewalWindowRatio > 0 { configVals = append(configVals, ConfigValue{ Class: "tls.renewal_window_ratio", Value: renewalWindowRatio, }) } // if enabled, the names in the site addresses will be // added to the automation policies if forceAutomate { configVals = append(configVals, ConfigValue{ Class: "tls.force_automate", Value: true, }) } // custom certificate selection if len(certSelector.AnyTag) > 0 { cp.CertSelection = &certSelector } // connection policy -- always add one, to ensure that TLS // is enabled, because this directive was used (this is // needed, for instance, when a site block has a key of // just ":5000" - i.e. no hostname, and only on-demand TLS // is enabled) configVals = append(configVals, ConfigValue{ Class: "tls.connection_policy", Value: cp, }) return configVals, nil } // parseRoot parses the root directive. Syntax: // // root [] func parseRoot(h Helper) ([]ConfigValue, error) { h.Next() // consume directive name // count the tokens to determine what to do argsCount := h.CountRemainingArgs() if argsCount == 0 { return nil, h.Errf("too few arguments; must have at least a root path") } if argsCount > 2 { return nil, h.Errf("too many arguments; should only be a matcher and a path") } // with only one arg, assume it's a root path with no matcher token if argsCount == 1 { if !h.NextArg() { return nil, h.ArgErr() } return h.NewRoute(nil, caddyhttp.VarsMiddleware{"root": h.Val()}), nil } // parse the matcher token into a matcher set userMatcherSet, err := h.ExtractMatcherSet() if err != nil { return nil, err } h.Next() // consume directive name again, matcher parsing does a reset // advance to the root path if !h.NextArg() { return nil, h.ArgErr() } // make the route with the matcher return h.NewRoute(userMatcherSet, caddyhttp.VarsMiddleware{"root": h.Val()}), nil } // parseFilesystem parses the fs directive. Syntax: // // fs func parseFilesystem(h Helper) (caddyhttp.MiddlewareHandler, error) { h.Next() // consume directive name if !h.NextArg() { return nil, h.ArgErr() } if h.NextArg() { return nil, h.ArgErr() } return caddyhttp.VarsMiddleware{"fs": h.Val()}, nil } // parseVars parses the vars directive. See its UnmarshalCaddyfile method for syntax. func parseVars(h Helper) (caddyhttp.MiddlewareHandler, error) { v := new(caddyhttp.VarsMiddleware) err := v.UnmarshalCaddyfile(h.Dispenser) return v, err } // parseRedir parses the redir directive. Syntax: // // redir [] [] // // can be "permanent" for 301, "temporary" for 302 (default), // a placeholder, or any number in the 3xx range or 401. The special // code "html" can be used to redirect only browser clients (will // respond with HTTP 200 and no Location header; redirect is performed // with JS and a meta tag). func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) { h.Next() // consume directive name if !h.NextArg() { return nil, h.ArgErr() } to := h.Val() var code string if h.NextArg() { code = h.Val() } var body string var hdr http.Header switch code { case "permanent": code = "301" case "temporary", "": code = "302" case "html": // Script tag comes first since that will better imitate a redirect in the browser's // history, but the meta tag is a fallback for most non-JS clients. const metaRedir = ` Redirecting... Redirecting to %s... ` safeTo := html.EscapeString(to) body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo) hdr = http.Header{"Content-Type": []string{"text/html; charset=utf-8"}} code = "200" // don't redirect non-browser clients default: // Allow placeholders for the code if strings.HasPrefix(code, "{") { break } // Try to validate as an integer otherwise codeInt, err := strconv.Atoi(code) if err != nil { return nil, h.Errf("Not a supported redir code type or not valid integer: '%s'", code) } // Sometimes, a 401 with Location header is desirable because // requests made with XHR will "eat" the 3xx redirect; so if // the intent was to redirect to an auth page, a 3xx won't // work. Responding with 401 allows JS code to read the // Location header and do a window.location redirect manually. // see https://stackoverflow.com/a/2573589/846934 // see https://github.com/oauth2-proxy/oauth2-proxy/issues/1522 if codeInt < 300 || (codeInt > 399 && codeInt != 401) { return nil, h.Errf("Redir code not in the 3xx range or 401: '%v'", codeInt) } } // don't redirect non-browser clients if code != "200" { hdr = http.Header{"Location": []string{to}} } return caddyhttp.StaticResponse{ StatusCode: caddyhttp.WeakString(code), Headers: hdr, Body: body, }, nil } // parseRespond parses the respond directive. func parseRespond(h Helper) (caddyhttp.MiddlewareHandler, error) { sr := new(caddyhttp.StaticResponse) err := sr.UnmarshalCaddyfile(h.Dispenser) return sr, err } // parseAbort parses the abort directive. func parseAbort(h Helper) (caddyhttp.MiddlewareHandler, error) { h.Next() // consume directive for h.Next() || h.NextBlock(0) { return nil, h.ArgErr() } return &caddyhttp.StaticResponse{Abort: true}, nil } // parseError parses the error directive. func parseError(h Helper) (caddyhttp.MiddlewareHandler, error) { se := new(caddyhttp.StaticError) err := se.UnmarshalCaddyfile(h.Dispenser) return se, err } // parseRoute parses the route directive. func parseRoute(h Helper) (caddyhttp.MiddlewareHandler, error) { allResults, err := parseSegmentAsConfig(h) if err != nil { return nil, err } for _, result := range allResults { switch result.Value.(type) { case caddyhttp.Route, caddyhttp.Subroute: default: return nil, h.Errf("%s directive returned something other than an HTTP route or subroute: %#v (only handler directives can be used in routes)", result.directive, result.Value) } } return buildSubroute(allResults, h.groupCounter, false) } func parseHandle(h Helper) (caddyhttp.MiddlewareHandler, error) { return ParseSegmentAsSubroute(h) } func parseHandleErrors(h Helper) ([]ConfigValue, error) { h.Next() // consume directive name expression := "" args := h.RemainingArgs() if len(args) > 0 { codes := []string{} for _, val := range args { if len(val) != 3 { return nil, h.Errf("bad status value '%s'", val) } if strings.HasSuffix(val, "xx") { val = val[:1] _, err := strconv.Atoi(val) if err != nil { return nil, h.Errf("bad status value '%s': %v", val, err) } if expression != "" { expression += " || " } expression += fmt.Sprintf("{http.error.status_code} >= %s00 && {http.error.status_code} <= %s99", val, val) continue } _, err := strconv.Atoi(val) if err != nil { return nil, h.Errf("bad status value '%s': %v", val, err) } codes = append(codes, val) } if len(codes) > 0 { if expression != "" { expression += " || " } expression += "{http.error.status_code} in [" + strings.Join(codes, ", ") + "]" } // Reset cursor position to get ready for ParseSegmentAsSubroute h.Reset() h.Next() h.RemainingArgs() h.Prev() } else { // If no arguments present reset the cursor position to get ready for ParseSegmentAsSubroute h.Prev() } handler, err := ParseSegmentAsSubroute(h) if err != nil { return nil, err } subroute, ok := handler.(*caddyhttp.Subroute) if !ok { return nil, h.Errf("segment was not parsed as a subroute") } // wrap the subroutes wrappingRoute := caddyhttp.Route{ HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(subroute, "handler", "subroute", nil)}, } subroute = &caddyhttp.Subroute{ Routes: []caddyhttp.Route{wrappingRoute}, } if expression != "" { statusMatcher := caddy.ModuleMap{ "expression": h.JSON(caddyhttp.MatchExpression{Expr: expression}), } subroute.Routes[0].MatcherSetsRaw = []caddy.ModuleMap{statusMatcher} } return []ConfigValue{ { Class: "error_route", Value: subroute, }, }, nil } // parseInvoke parses the invoke directive. func parseInvoke(h Helper) (caddyhttp.MiddlewareHandler, error) { h.Next() // consume directive if !h.NextArg() { return nil, h.ArgErr() } for h.Next() || h.NextBlock(0) { return nil, h.ArgErr() } // remember that we're invoking this name // to populate the server with these named routes if h.State[namedRouteKey] == nil { h.State[namedRouteKey] = map[string]struct{}{} } h.State[namedRouteKey].(map[string]struct{})[h.Val()] = struct{}{} // return the handler return &caddyhttp.Invoke{Name: h.Val()}, nil } // parseLog parses the log directive. Syntax: // // log { // hostnames // output ... // core ... // format ... // level // } func parseLog(h Helper) ([]ConfigValue, error) { return parseLogHelper(h, nil) } // parseLogHelper is used both for the parseLog directive within Server Blocks, // as well as the global "log" option for configuring loggers at the global // level. The parseAsGlobalOption parameter is used to distinguish any differing logic // between the two. func parseLogHelper(h Helper, globalLogNames map[string]struct{}) ([]ConfigValue, error) { h.Next() // consume option name // When the globalLogNames parameter is passed in, we make // modifications to the parsing behavior. parseAsGlobalOption := globalLogNames != nil // nolint:prealloc var configValues []ConfigValue // Logic below expects that a name is always present when a // global option is being parsed; or an optional override // is supported for access logs. var logName string if parseAsGlobalOption { if h.NextArg() { logName = h.Val() // Only a single argument is supported. if h.NextArg() { return nil, h.ArgErr() } } else { // If there is no log name specified, we // reference the default logger. See the // setupNewDefault function in the logging // package for where this is configured. logName = caddy.DefaultLoggerName } // Verify this name is unused. _, used := globalLogNames[logName] if used { return nil, h.Err("duplicate global log option for: " + logName) } globalLogNames[logName] = struct{}{} } else { // An optional override of the logger name can be provided; // otherwise a default will be used, like "log0", "log1", etc. if h.NextArg() { logName = h.Val() // Only a single argument is supported. if h.NextArg() { return nil, h.ArgErr() } } } cl := new(caddy.CustomLog) // allow overriding the current site block's hostnames for this logger; // this is useful for setting up loggers per subdomain in a site block // with a wildcard domain customHostnames := []string{} noHostname := false for h.NextBlock(0) { switch h.Val() { case "hostnames": if parseAsGlobalOption { return nil, h.Err("hostnames is not allowed in the log global options") } args := h.RemainingArgs() if len(args) == 0 { return nil, h.ArgErr() } customHostnames = append(customHostnames, args...) case "output": if !h.NextArg() { return nil, h.ArgErr() } moduleName := h.Val() // can't use the usual caddyfile.Unmarshaler flow with the // standard writers because they are in the caddy package // (because they are the default) and implementing that // interface there would unfortunately create circular import var wo caddy.WriterOpener switch moduleName { case "stdout": wo = caddy.StdoutWriter{} case "stderr": wo = caddy.StderrWriter{} case "discard": wo = caddy.DiscardWriter{} default: modID := "caddy.logging.writers." + moduleName unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID) if err != nil { return nil, err } var ok bool wo, ok = unm.(caddy.WriterOpener) if !ok { return nil, h.Errf("module %s (%T) is not a WriterOpener", modID, unm) } } cl.WriterRaw = caddyconfig.JSONModuleObject(wo, "output", moduleName, h.warnings) case "sampling": d := h.Dispenser.NewFromNextSegment() for d.NextArg() { // consume any tokens on the same line, if any. } sampling := &caddy.LogSampling{} for nesting := d.Nesting(); d.NextBlock(nesting); { subdir := d.Val() switch subdir { case "interval": if !d.NextArg() { return nil, d.ArgErr() } interval, err := time.ParseDuration(d.Val() + "ns") if err != nil { return nil, d.Errf("failed to parse interval: %v", err) } sampling.Interval = interval case "first": if !d.NextArg() { return nil, d.ArgErr() } first, err := strconv.Atoi(d.Val()) if err != nil { return nil, d.Errf("failed to parse first: %v", err) } sampling.First = first case "thereafter": if !d.NextArg() { return nil, d.ArgErr() } thereafter, err := strconv.Atoi(d.Val()) if err != nil { return nil, d.Errf("failed to parse thereafter: %v", err) } sampling.Thereafter = thereafter default: return nil, d.Errf("unrecognized subdirective: %s", subdir) } } cl.Sampling = sampling case "core": if !h.NextArg() { return nil, h.ArgErr() } moduleName := h.Val() moduleID := "caddy.logging.cores." + moduleName unm, err := caddyfile.UnmarshalModule(h.Dispenser, moduleID) if err != nil { return nil, err } core, ok := unm.(zapcore.Core) if !ok { return nil, h.Errf("module %s (%T) is not a zapcore.Core", moduleID, unm) } cl.CoreRaw = caddyconfig.JSONModuleObject(core, "module", moduleName, h.warnings) case "format": if !h.NextArg() { return nil, h.ArgErr() } moduleName := h.Val() moduleID := "caddy.logging.encoders." + moduleName unm, err := caddyfile.UnmarshalModule(h.Dispenser, moduleID) if err != nil { return nil, err } enc, ok := unm.(zapcore.Encoder) if !ok { return nil, h.Errf("module %s (%T) is not a zapcore.Encoder", moduleID, unm) } cl.EncoderRaw = caddyconfig.JSONModuleObject(enc, "format", moduleName, h.warnings) case "level": if !h.NextArg() { return nil, h.ArgErr() } cl.Level = h.Val() if h.NextArg() { return nil, h.ArgErr() } case "include": if !parseAsGlobalOption { return nil, h.Err("include is not allowed in the log directive") } for h.NextArg() { cl.Include = append(cl.Include, h.Val()) } case "exclude": if !parseAsGlobalOption { return nil, h.Err("exclude is not allowed in the log directive") } for h.NextArg() { cl.Exclude = append(cl.Exclude, h.Val()) } case "no_hostname": if h.NextArg() { return nil, h.ArgErr() } noHostname = true default: return nil, h.Errf("unrecognized subdirective: %s", h.Val()) } } var val namedCustomLog val.hostnames = customHostnames val.noHostname = noHostname isEmptyConfig := reflect.DeepEqual(cl, new(caddy.CustomLog)) // Skip handling of empty logging configs if parseAsGlobalOption { // Use indicated name for global log options val.name = logName } else { if logName != "" { val.name = logName } else if !isEmptyConfig { // Construct a log name for server log streams logCounter, ok := h.State["logCounter"].(int) if !ok { logCounter = 0 } val.name = fmt.Sprintf("log%d", logCounter) logCounter++ h.State["logCounter"] = logCounter } if val.name != "" { cl.Include = []string{"http.log.access." + val.name} } } if !isEmptyConfig { val.log = cl } configValues = append(configValues, ConfigValue{ Class: "custom_log", Value: val, }) return configValues, nil } // parseLogSkip parses the log_skip directive. Syntax: // // log_skip [] func parseLogSkip(h Helper) (caddyhttp.MiddlewareHandler, error) { h.Next() // consume directive name // "skip_log" is deprecated, replaced by "log_skip" if h.Val() == "skip_log" { caddy.Log().Named("config.adapter.caddyfile").Warn("the 'skip_log' directive is deprecated, please use 'log_skip' instead!") } if h.NextArg() { return nil, h.ArgErr() } if h.NextBlock(0) { return nil, h.Err("log_skip directive does not accept blocks") } return caddyhttp.VarsMiddleware{"log_skip": true}, nil } // parseLogName parses the log_name directive. Syntax: // // log_name func parseLogName(h Helper) (caddyhttp.MiddlewareHandler, error) { h.Next() // consume directive name return caddyhttp.VarsMiddleware{ caddyhttp.AccessLoggerNameVarKey: h.RemainingArgs(), }, nil } ================================================ FILE: caddyconfig/httpcaddyfile/builtins_test.go ================================================ package httpcaddyfile import ( "strings" "testing" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" _ "github.com/caddyserver/caddy/v2/modules/logging" ) func TestLogDirectiveSyntax(t *testing.T) { for i, tc := range []struct { input string output string expectError bool }{ { input: `:8080 { log } `, output: `{"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{}}}}}}`, expectError: false, }, { input: `:8080 { log { core mock output file foo.log } } `, output: `{"logging":{"logs":{"default":{"exclude":["http.log.access.log0"]},"log0":{"writer":{"filename":"foo.log","output":"file"},"core":{"module":"mock"},"include":["http.log.access.log0"]}}},"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{"default_logger_name":"log0"}}}}}}`, expectError: false, }, { input: `:8080 { log { format filter { wrap console fields { request>remote_ip ip_mask { ipv4 24 ipv6 32 } } } } } `, output: `{"logging":{"logs":{"default":{"exclude":["http.log.access.log0"]},"log0":{"encoder":{"fields":{"request\u003eremote_ip":{"filter":"ip_mask","ipv4_cidr":24,"ipv6_cidr":32}},"format":"filter","wrap":{"format":"console"}},"include":["http.log.access.log0"]}}},"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{"default_logger_name":"log0"}}}}}}`, expectError: false, }, { input: `:8080 { log name-override { core mock output file foo.log } } `, output: `{"logging":{"logs":{"default":{"exclude":["http.log.access.name-override"]},"name-override":{"writer":{"filename":"foo.log","output":"file"},"core":{"module":"mock"},"include":["http.log.access.name-override"]}}},"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{"default_logger_name":"name-override"}}}}}}`, expectError: false, }, { input: `:8080 { log { sampling { interval 2 first 3 thereafter 4 } } } `, output: `{"logging":{"logs":{"default":{"exclude":["http.log.access.log0"]},"log0":{"sampling":{"interval":2,"first":3,"thereafter":4},"include":["http.log.access.log0"]}}},"apps":{"http":{"servers":{"srv0":{"listen":[":8080"],"logs":{"default_logger_name":"log0"}}}}}}`, expectError: false, }, } { adapter := caddyfile.Adapter{ ServerType: ServerType{}, } out, _, err := adapter.Adapt([]byte(tc.input), nil) if err != nil != tc.expectError { t.Errorf("Test %d error expectation failed Expected: %v, got %s", i, tc.expectError, err) continue } if string(out) != tc.output { t.Errorf("Test %d error output mismatch Expected: %s, got %s", i, tc.output, out) } } } func TestRedirDirectiveSyntax(t *testing.T) { for i, tc := range []struct { input string expectError bool }{ { input: `:8080 { redir :8081 }`, expectError: false, }, { input: `:8080 { redir * :8081 }`, expectError: false, }, { input: `:8080 { redir /api/* :8081 300 }`, expectError: false, }, { input: `:8080 { redir :8081 300 }`, expectError: false, }, { input: `:8080 { redir /api/* :8081 399 }`, expectError: false, }, { input: `:8080 { redir :8081 399 }`, expectError: false, }, { input: `:8080 { redir /old.html /new.html }`, expectError: false, }, { input: `:8080 { redir /old.html /new.html temporary }`, expectError: false, }, { input: `:8080 { redir https://example.com{uri} permanent }`, expectError: false, }, { input: `:8080 { redir /old.html /new.html permanent }`, expectError: false, }, { input: `:8080 { redir /old.html /new.html html }`, expectError: false, }, { // this is now allowed so a Location header // can be written and consumed by JS // in the case of XHR requests input: `:8080 { redir * :8081 401 }`, expectError: false, }, { input: `:8080 { redir * :8081 402 }`, expectError: true, }, { input: `:8080 { redir * :8081 {http.reverse_proxy.status_code} }`, expectError: false, }, { input: `:8080 { redir /old.html /new.html htlm }`, expectError: true, }, { input: `:8080 { redir * :8081 200 }`, expectError: true, }, { input: `:8080 { redir * :8081 temp }`, expectError: true, }, { input: `:8080 { redir * :8081 perm }`, expectError: true, }, { input: `:8080 { redir * :8081 php }`, expectError: true, }, } { adapter := caddyfile.Adapter{ ServerType: ServerType{}, } _, _, err := adapter.Adapt([]byte(tc.input), nil) if err != nil != tc.expectError { t.Errorf("Test %d error expectation failed Expected: %v, got %s", i, tc.expectError, err) continue } } } func TestImportErrorLine(t *testing.T) { for i, tc := range []struct { input string errorFunc func(err error) bool }{ { input: `(t1) { abort {args[:]} } :8080 { import t1 import t1 true }`, errorFunc: func(err error) bool { return err != nil && strings.Contains(err.Error(), "Caddyfile:6 (import t1)") }, }, { input: `(t1) { abort {args[:]} } :8080 { import t1 true }`, errorFunc: func(err error) bool { return err != nil && strings.Contains(err.Error(), "Caddyfile:5 (import t1)") }, }, { input: ` import testdata/import_variadic_snippet.txt :8080 { import t1 true }`, errorFunc: func(err error) bool { return err == nil }, }, { input: ` import testdata/import_variadic_with_import.txt :8080 { import t1 true import t2 true }`, errorFunc: func(err error) bool { return err == nil }, }, } { adapter := caddyfile.Adapter{ ServerType: ServerType{}, } _, _, err := adapter.Adapt([]byte(tc.input), nil) if !tc.errorFunc(err) { t.Errorf("Test %d error expectation failed, got %s", i, err) continue } } } func TestNestedImport(t *testing.T) { for i, tc := range []struct { input string errorFunc func(err error) bool }{ { input: `(t1) { respond {args[0]} {args[1]} } (t2) { import t1 {args[0]} 202 } :8080 { handle { import t2 "foobar" } }`, errorFunc: func(err error) bool { return err == nil }, }, { input: `(t1) { respond {args[:]} } (t2) { import t1 {args[0]} {args[1]} } :8080 { handle { import t2 "foobar" 202 } }`, errorFunc: func(err error) bool { return err == nil }, }, { input: `(t1) { respond {args[0]} {args[1]} } (t2) { import t1 {args[:]} } :8080 { handle { import t2 "foobar" 202 } }`, errorFunc: func(err error) bool { return err == nil }, }, } { adapter := caddyfile.Adapter{ ServerType: ServerType{}, } _, _, err := adapter.Adapt([]byte(tc.input), nil) if !tc.errorFunc(err) { t.Errorf("Test %d error expectation failed, got %s", i, err) continue } } } ================================================ FILE: caddyconfig/httpcaddyfile/directives.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httpcaddyfile import ( "encoding/json" "maps" "net" "slices" "sort" "strconv" "strings" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) // defaultDirectiveOrder specifies the default order // to apply directives in HTTP routes. This must only // consist of directives that are included in Caddy's // standard distribution. // // e.g. The 'root' directive goes near the start in // case rewrites or redirects depend on existence of // files, i.e. the file matcher, which must know the // root first. // // e.g. The 'header' directive goes before 'redir' so // that headers can be manipulated before doing redirects. // // e.g. The 'respond' directive is near the end because it // writes a response and terminates the middleware chain. var defaultDirectiveOrder = []string{ "tracing", // set variables that may be used by other directives "map", "vars", "fs", "root", "log_append", "skip_log", // TODO: deprecated, renamed to log_skip "log_skip", "log_name", "header", "copy_response_headers", // only in reverse_proxy's handle_response "request_body", "redir", // incoming request manipulation "method", "rewrite", "uri", "try_files", // middleware handlers; some wrap responses "basicauth", // TODO: deprecated, renamed to basic_auth "basic_auth", "forward_auth", "request_header", "encode", "push", "intercept", "templates", // special routing & dispatching directives "invoke", "handle", "handle_path", "route", // handlers that typically respond to requests "abort", "error", "copy_response", // only in reverse_proxy's handle_response "respond", "metrics", "reverse_proxy", "php_fastcgi", "file_server", "acme_server", } // directiveOrder specifies the order to apply directives // in HTTP routes, after being modified by either the // plugins or by the user via the "order" global option. var directiveOrder = defaultDirectiveOrder // RegisterDirective registers a unique directive dir with an // associated unmarshaling (setup) function. When directive dir // is encountered in a Caddyfile, setupFunc will be called to // unmarshal its tokens. func RegisterDirective(dir string, setupFunc UnmarshalFunc) { if _, ok := registeredDirectives[dir]; ok { panic("directive " + dir + " already registered") } registeredDirectives[dir] = setupFunc } // RegisterHandlerDirective is like RegisterDirective, but for // directives which specifically output only an HTTP handler. // Directives registered with this function will always have // an optional matcher token as the first argument. func RegisterHandlerDirective(dir string, setupFunc UnmarshalHandlerFunc) { RegisterDirective(dir, func(h Helper) ([]ConfigValue, error) { if !h.Next() { return nil, h.ArgErr() } matcherSet, err := h.ExtractMatcherSet() if err != nil { return nil, err } val, err := setupFunc(h) if err != nil { return nil, err } return h.NewRoute(matcherSet, val), nil }) } // RegisterDirectiveOrder registers the default order for a // directive from a plugin. // // This is useful when a plugin has a well-understood place // it should run in the middleware pipeline, and it allows // users to avoid having to define the order themselves. // // The directive dir may be placed in the position relative // to ('before' or 'after') a directive included in Caddy's // standard distribution. It cannot be relative to another // plugin's directive. // // EXPERIMENTAL: This API may change or be removed. func RegisterDirectiveOrder(dir string, position Positional, standardDir string) { // check if directive was already ordered if slices.Contains(directiveOrder, dir) { panic("directive '" + dir + "' already ordered") } if position != Before && position != After { panic("the 2nd argument must be either 'before' or 'after', got '" + position + "'") } // check if directive exists in standard distribution, since // we can't allow plugins to depend on one another; we can't // guarantee the order that plugins are loaded in. foundStandardDir := slices.Contains(defaultDirectiveOrder, standardDir) if !foundStandardDir { panic("the 3rd argument '" + standardDir + "' must be a directive that exists in the standard distribution of Caddy") } // insert directive into proper position newOrder := directiveOrder for i, d := range newOrder { if d != standardDir { continue } switch position { case Before: newOrder = append(newOrder[:i], append([]string{dir}, newOrder[i:]...)...) case After: newOrder = append(newOrder[:i+1], append([]string{dir}, newOrder[i+1:]...)...) case First, Last: } break } directiveOrder = newOrder } // RegisterGlobalOption registers a unique global option opt with // an associated unmarshaling (setup) function. When the global // option opt is encountered in a Caddyfile, setupFunc will be // called to unmarshal its tokens. func RegisterGlobalOption(opt string, setupFunc UnmarshalGlobalFunc) { if _, ok := registeredGlobalOptions[opt]; ok { panic("global option " + opt + " already registered") } registeredGlobalOptions[opt] = setupFunc } // Helper is a type which helps setup a value from // Caddyfile tokens. type Helper struct { *caddyfile.Dispenser // State stores intermediate variables during caddyfile adaptation. State map[string]any options map[string]any warnings *[]caddyconfig.Warning matcherDefs map[string]caddy.ModuleMap parentBlock caddyfile.ServerBlock groupCounter counter } // Option gets the option keyed by name. func (h Helper) Option(name string) any { return h.options[name] } // Caddyfiles returns the list of config files from // which tokens in the current server block were loaded. func (h Helper) Caddyfiles() []string { // first obtain set of names of files involved // in this server block, without duplicates files := make(map[string]struct{}) for _, segment := range h.parentBlock.Segments { for _, token := range segment { files[token.File] = struct{}{} } } // then convert the set into a slice filesSlice := make([]string, 0, len(files)) for file := range files { filesSlice = append(filesSlice, file) } sort.Strings(filesSlice) return filesSlice } // JSON converts val into JSON. Any errors are added to warnings. func (h Helper) JSON(val any) json.RawMessage { return caddyconfig.JSON(val, h.warnings) } // MatcherToken assumes the next argument token is (possibly) a matcher, // and if so, returns the matcher set along with a true value. If the next // token is not a matcher, nil and false is returned. Note that a true // value may be returned with a nil matcher set if it is a catch-all. func (h Helper) MatcherToken() (caddy.ModuleMap, bool, error) { if !h.NextArg() { return nil, false, nil } return matcherSetFromMatcherToken(h.Dispenser.Token(), h.matcherDefs, h.warnings) } // ExtractMatcherSet is like MatcherToken, except this is a higher-level // method that returns the matcher set described by the matcher token, // or nil if there is none, and deletes the matcher token from the // dispenser and resets it as if this look-ahead never happened. Useful // when wrapping a route (one or more handlers) in a user-defined matcher. func (h Helper) ExtractMatcherSet() (caddy.ModuleMap, error) { matcherSet, hasMatcher, err := h.MatcherToken() if err != nil { return nil, err } if hasMatcher { // strip matcher token; we don't need to // use the return value here because a // new dispenser should have been made // solely for this directive's tokens, // with no other uses of same slice h.Dispenser.Delete() } h.Dispenser.Reset() // pretend this lookahead never happened return matcherSet, nil } // NewRoute returns config values relevant to creating a new HTTP route. func (h Helper) NewRoute(matcherSet caddy.ModuleMap, handler caddyhttp.MiddlewareHandler, ) []ConfigValue { mod, err := caddy.GetModule(caddy.GetModuleID(handler)) if err != nil { *h.warnings = append(*h.warnings, caddyconfig.Warning{ File: h.File(), Line: h.Line(), Message: err.Error(), }) return nil } var matcherSetsRaw []caddy.ModuleMap if matcherSet != nil { matcherSetsRaw = append(matcherSetsRaw, matcherSet) } return []ConfigValue{ { Class: "route", Value: caddyhttp.Route{ MatcherSetsRaw: matcherSetsRaw, HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", mod.ID.Name(), h.warnings)}, }, }, } } // GroupRoutes adds the routes (caddyhttp.Route type) in vals to the // same group, if there is more than one route in vals. func (h Helper) GroupRoutes(vals []ConfigValue) { // ensure there's at least two routes; group of one is pointless var count int for _, v := range vals { if _, ok := v.Value.(caddyhttp.Route); ok { count++ if count > 1 { break } } } if count < 2 { return } // now that we know the group will have some effect, do it groupName := h.groupCounter.nextGroup() for i := range vals { if route, ok := vals[i].Value.(caddyhttp.Route); ok { route.Group = groupName vals[i].Value = route } } } // WithDispenser returns a new instance based on d. All others Helper // fields are copied, so typically maps are shared with this new instance. func (h Helper) WithDispenser(d *caddyfile.Dispenser) Helper { h.Dispenser = d return h } // ParseSegmentAsSubroute parses the segment such that its subdirectives // are themselves treated as directives, from which a subroute is built // and returned. func ParseSegmentAsSubroute(h Helper) (caddyhttp.MiddlewareHandler, error) { allResults, err := parseSegmentAsConfig(h) if err != nil { return nil, err } return buildSubroute(allResults, h.groupCounter, true) } // parseSegmentAsConfig parses the segment such that its subdirectives // are themselves treated as directives, including named matcher definitions, // and the raw Config structs are returned. func parseSegmentAsConfig(h Helper) ([]ConfigValue, error) { var allResults []ConfigValue for h.Next() { // don't allow non-matcher args on the first line if h.NextArg() { return nil, h.ArgErr() } // slice the linear list of tokens into top-level segments var segments []caddyfile.Segment for nesting := h.Nesting(); h.NextBlock(nesting); { segments = append(segments, h.NextSegment()) } // copy existing matcher definitions so we can augment // new ones that are defined only in this scope matcherDefs := make(map[string]caddy.ModuleMap, len(h.matcherDefs)) maps.Copy(matcherDefs, h.matcherDefs) // find and extract any embedded matcher definitions in this scope for i := 0; i < len(segments); i++ { seg := segments[i] if strings.HasPrefix(seg.Directive(), matcherPrefix) { // parse, then add the matcher to matcherDefs err := parseMatcherDefinitions(caddyfile.NewDispenser(seg), matcherDefs) if err != nil { return nil, err } // remove the matcher segment (consumed), then step back the loop segments = append(segments[:i], segments[i+1:]...) i-- } } // with matchers ready to go, evaluate each directive's segment for _, seg := range segments { dir := seg.Directive() dirFunc, ok := registeredDirectives[dir] if !ok { return nil, h.Errf("unrecognized directive: %s - are you sure your Caddyfile structure (nesting and braces) is correct?", dir) } subHelper := h subHelper.Dispenser = caddyfile.NewDispenser(seg) subHelper.matcherDefs = matcherDefs results, err := dirFunc(subHelper) if err != nil { return nil, h.Errf("parsing caddyfile tokens for '%s': %v", dir, err) } dir = normalizeDirectiveName(dir) for _, result := range results { result.directive = dir allResults = append(allResults, result) } } } return allResults, nil } // ConfigValue represents a value to be added to the final // configuration, or a value to be consulted when building // the final configuration. type ConfigValue struct { // The kind of value this is. As the config is // being built, the adapter will look in the // "pile" for values belonging to a certain // class when it is setting up a certain part // of the config. The associated value will be // type-asserted and placed accordingly. Class string // The value to be used when building the config. // Generally its type is associated with the // name of the Class. Value any directive string } func sortRoutes(routes []ConfigValue) { dirPositions := make(map[string]int) for i, dir := range directiveOrder { dirPositions[dir] = i } sort.SliceStable(routes, func(i, j int) bool { // if the directives are different, just use the established directive order iDir, jDir := routes[i].directive, routes[j].directive if iDir != jDir { return dirPositions[iDir] < dirPositions[jDir] } // directives are the same; sub-sort by path matcher length if there's // only one matcher set and one path (this is a very common case and // usually -- but not always -- helpful/expected, oh well; user can // always take manual control of order using handler or route blocks) iRoute, ok := routes[i].Value.(caddyhttp.Route) if !ok { return false } jRoute, ok := routes[j].Value.(caddyhttp.Route) if !ok { return false } // decode the path matchers if there is just one matcher set var iPM, jPM caddyhttp.MatchPath if len(iRoute.MatcherSetsRaw) == 1 { _ = json.Unmarshal(iRoute.MatcherSetsRaw[0]["path"], &iPM) } if len(jRoute.MatcherSetsRaw) == 1 { _ = json.Unmarshal(jRoute.MatcherSetsRaw[0]["path"], &jPM) } // if there is only one path in the path matcher, sort by longer path // (more specific) first; missing path matchers or multi-matchers are // treated as zero-length paths var iPathLen, jPathLen int if len(iPM) == 1 { iPathLen = len(iPM[0]) } if len(jPM) == 1 { jPathLen = len(jPM[0]) } sortByPath := func() bool { // we can only confidently compare path lengths if both // directives have a single path to match (issue #5037) if iPathLen > 0 && jPathLen > 0 { // trim the trailing wildcard if there is one iPathTrimmed := strings.TrimSuffix(iPM[0], "*") jPathTrimmed := strings.TrimSuffix(jPM[0], "*") // if both paths are the same except for a trailing wildcard, // sort by the shorter path first (which is more specific) if iPathTrimmed == jPathTrimmed { return iPathLen < jPathLen } // we use the trimmed length to compare the paths // https://github.com/caddyserver/caddy/issues/7012#issuecomment-2870142195 // credit to https://github.com/Hellio404 // for sorts with many items, mixing matchers w/ and w/o wildcards will confuse the sort and result in incorrect orders iPathLen = len(iPathTrimmed) jPathLen = len(jPathTrimmed) // if both paths have the same length, sort lexically // https://github.com/caddyserver/caddy/pull/7015#issuecomment-2871993588 if iPathLen == jPathLen { return iPathTrimmed < jPathTrimmed } // sort most-specific (longest) path first return iPathLen > jPathLen } // if both directives don't have a single path to compare, // sort whichever one has a matcher first; if both have // a matcher, sort equally (stable sort preserves order) return len(iRoute.MatcherSetsRaw) > 0 && len(jRoute.MatcherSetsRaw) == 0 }() // some directives involve setting values which can overwrite // each other, so it makes most sense to reverse the order so // that the least-specific matcher is first, allowing the last // matching one to win if iDir == "vars" { return !sortByPath } // everything else is most-specific matcher first return sortByPath }) } // serverBlock pairs a Caddyfile server block with // a "pile" of config values, keyed by class name, // as well as its parsed keys for convenience. type serverBlock struct { block caddyfile.ServerBlock pile map[string][]ConfigValue // config values obtained from directives parsedKeys []Address } // hostsFromKeys returns a list of all the non-empty hostnames found in // the keys of the server block sb. If logger mode is false, a key with // an empty hostname portion will return an empty slice, since that // server block is interpreted to effectively match all hosts. An empty // string is never added to the slice. // // If loggerMode is true, then the non-standard ports of keys will be // joined to the hostnames. This is to effectively match the Host // header of requests that come in for that key. // // The resulting slice is not sorted but will never have duplicates. func (sb serverBlock) hostsFromKeys(loggerMode bool) []string { // ensure each entry in our list is unique hostMap := make(map[string]struct{}) for _, addr := range sb.parsedKeys { if addr.Host == "" { if !loggerMode { // server block contains a key like ":443", i.e. the host portion // is empty / catch-all, which means to match all hosts return []string{} } // never append an empty string continue } if loggerMode && addr.Port != "" && addr.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort) && addr.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort) { hostMap[net.JoinHostPort(addr.Host, addr.Port)] = struct{}{} } else { hostMap[addr.Host] = struct{}{} } } // convert map to slice sblockHosts := make([]string, 0, len(hostMap)) for host := range hostMap { sblockHosts = append(sblockHosts, host) } return sblockHosts } func (sb serverBlock) hostsFromKeysNotHTTP(httpPort string) []string { // ensure each entry in our list is unique hostMap := make(map[string]struct{}) for _, addr := range sb.parsedKeys { if addr.Host == "" { continue } if addr.Scheme != "http" && addr.Port != httpPort { hostMap[addr.Host] = struct{}{} } } // convert map to slice sblockHosts := make([]string, 0, len(hostMap)) for host := range hostMap { sblockHosts = append(sblockHosts, host) } return sblockHosts } // hasHostCatchAllKey returns true if sb has a key that // omits a host portion, i.e. it "catches all" hosts. func (sb serverBlock) hasHostCatchAllKey() bool { return slices.ContainsFunc(sb.parsedKeys, func(addr Address) bool { return addr.Host == "" }) } // isAllHTTP returns true if all sb keys explicitly specify // the http:// scheme func (sb serverBlock) isAllHTTP() bool { return !slices.ContainsFunc(sb.parsedKeys, func(addr Address) bool { return addr.Scheme != "http" }) } // Positional are the supported modes for ordering directives. type Positional string const ( Before Positional = "before" After Positional = "after" First Positional = "first" Last Positional = "last" ) type ( // UnmarshalFunc is a function which can unmarshal Caddyfile // tokens into zero or more config values using a Helper type. // These are passed in a call to RegisterDirective. UnmarshalFunc func(h Helper) ([]ConfigValue, error) // UnmarshalHandlerFunc is like UnmarshalFunc, except the // output of the unmarshaling is an HTTP handler. This // function does not need to deal with HTTP request matching // which is abstracted away. Since writing HTTP handlers // with Caddyfile support is very common, this is a more // convenient way to add a handler to the chain since a lot // of the details common to HTTP handlers are taken care of // for you. These are passed to a call to // RegisterHandlerDirective. UnmarshalHandlerFunc func(h Helper) (caddyhttp.MiddlewareHandler, error) // UnmarshalGlobalFunc is a function which can unmarshal Caddyfile // tokens from a global option. It is passed the tokens to parse and // existing value from the previous instance of this global option // (if any). It returns the value to associate with this global option. UnmarshalGlobalFunc func(d *caddyfile.Dispenser, existingVal any) (any, error) ) var registeredDirectives = make(map[string]UnmarshalFunc) var registeredGlobalOptions = make(map[string]UnmarshalGlobalFunc) ================================================ FILE: caddyconfig/httpcaddyfile/directives_test.go ================================================ package httpcaddyfile import ( "reflect" "sort" "testing" ) func TestHostsFromKeys(t *testing.T) { for i, tc := range []struct { keys []Address expectNormalMode []string expectLoggerMode []string }{ { []Address{ {Original: "foo", Host: "foo"}, }, []string{"foo"}, []string{"foo"}, }, { []Address{ {Original: "foo", Host: "foo"}, {Original: "bar", Host: "bar"}, }, []string{"bar", "foo"}, []string{"bar", "foo"}, }, { []Address{ {Original: ":2015", Port: "2015"}, }, []string{}, []string{}, }, { []Address{ {Original: ":443", Port: "443"}, }, []string{}, []string{}, }, { []Address{ {Original: "foo", Host: "foo"}, {Original: ":2015", Port: "2015"}, }, []string{}, []string{"foo"}, }, { []Address{ {Original: "example.com:2015", Host: "example.com", Port: "2015"}, }, []string{"example.com"}, []string{"example.com:2015"}, }, { []Address{ {Original: "example.com:80", Host: "example.com", Port: "80"}, }, []string{"example.com"}, []string{"example.com"}, }, { []Address{ {Original: "https://:2015/foo", Scheme: "https", Port: "2015", Path: "/foo"}, }, []string{}, []string{}, }, { []Address{ {Original: "https://example.com:2015/foo", Scheme: "https", Host: "example.com", Port: "2015", Path: "/foo"}, }, []string{"example.com"}, []string{"example.com:2015"}, }, } { sb := serverBlock{parsedKeys: tc.keys} // test in normal mode actual := sb.hostsFromKeys(false) sort.Strings(actual) if !reflect.DeepEqual(tc.expectNormalMode, actual) { t.Errorf("Test %d (loggerMode=false): Expected: %v Actual: %v", i, tc.expectNormalMode, actual) } // test in logger mode actual = sb.hostsFromKeys(true) sort.Strings(actual) if !reflect.DeepEqual(tc.expectLoggerMode, actual) { t.Errorf("Test %d (loggerMode=true): Expected: %v Actual: %v", i, tc.expectLoggerMode, actual) } } } ================================================ FILE: caddyconfig/httpcaddyfile/httptype.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httpcaddyfile import ( "cmp" "encoding/json" "fmt" "net" "reflect" "slices" "sort" "strconv" "strings" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddypki" "github.com/caddyserver/caddy/v2/modules/caddytls" ) func init() { caddyconfig.RegisterAdapter("caddyfile", caddyfile.Adapter{ServerType: ServerType{}}) } // App represents the configuration for a non-standard // Caddy app module (e.g. third-party plugin) which was // parsed from a global options block. type App struct { // The JSON key for the app being configured Name string // The raw app config as JSON Value json.RawMessage } // ServerType can set up a config from an HTTP Caddyfile. type ServerType struct{} // Setup makes a config from the tokens. func (st ServerType) Setup( inputServerBlocks []caddyfile.ServerBlock, options map[string]any, ) (*caddy.Config, []caddyconfig.Warning, error) { var warnings []caddyconfig.Warning gc := counter{new(int)} state := make(map[string]any) // load all the server blocks and associate them with a "pile" of config values originalServerBlocks := make([]serverBlock, 0, len(inputServerBlocks)) for _, sblock := range inputServerBlocks { for j, k := range sblock.Keys { if j == 0 && strings.HasPrefix(k.Text, "@") { return nil, warnings, fmt.Errorf("%s:%d: cannot define a matcher outside of a site block: '%s'", k.File, k.Line, k.Text) } if _, ok := registeredDirectives[k.Text]; ok { return nil, warnings, fmt.Errorf("%s:%d: parsed '%s' as a site address, but it is a known directive; directives must appear in a site block", k.File, k.Line, k.Text) } } originalServerBlocks = append(originalServerBlocks, serverBlock{ block: sblock, pile: make(map[string][]ConfigValue), }) } // apply any global options var err error originalServerBlocks, err = st.evaluateGlobalOptionsBlock(originalServerBlocks, options) if err != nil { return nil, warnings, err } // this will replace both static and user-defined placeholder shorthands // with actual identifiers used by Caddy replacer := NewShorthandReplacer() originalServerBlocks, err = st.extractNamedRoutes(originalServerBlocks, options, &warnings, replacer) if err != nil { return nil, warnings, err } for _, sb := range originalServerBlocks { for i := range sb.block.Segments { replacer.ApplyToSegment(&sb.block.Segments[i]) } if len(sb.block.Keys) == 0 { return nil, warnings, fmt.Errorf("server block without any key is global configuration, and if used, it must be first") } // extract matcher definitions matcherDefs := make(map[string]caddy.ModuleMap) for _, segment := range sb.block.Segments { if dir := segment.Directive(); strings.HasPrefix(dir, matcherPrefix) { d := sb.block.DispenseDirective(dir) err := parseMatcherDefinitions(d, matcherDefs) if err != nil { return nil, warnings, err } } } // evaluate each directive ("segment") in this block for _, segment := range sb.block.Segments { dir := segment.Directive() if strings.HasPrefix(dir, matcherPrefix) { // matcher definitions were pre-processed continue } dirFunc, ok := registeredDirectives[dir] if !ok { tkn := segment[0] message := "%s:%d: unrecognized directive: %s" if !sb.block.HasBraces { message += "\nDid you mean to define a second site? If so, you must use curly braces around each site to separate their configurations." } return nil, warnings, fmt.Errorf(message, tkn.File, tkn.Line, dir) } h := Helper{ Dispenser: caddyfile.NewDispenser(segment), options: options, warnings: &warnings, matcherDefs: matcherDefs, parentBlock: sb.block, groupCounter: gc, State: state, } results, err := dirFunc(h) if err != nil { return nil, warnings, fmt.Errorf("parsing caddyfile tokens for '%s': %v", dir, err) } dir = normalizeDirectiveName(dir) for _, result := range results { result.directive = dir sb.pile[result.Class] = append(sb.pile[result.Class], result) } // specially handle named routes that were pulled out from // the invoke directive, which could be nested anywhere within // some subroutes in this directive; we add them to the pile // for this server block if state[namedRouteKey] != nil { for name := range state[namedRouteKey].(map[string]struct{}) { result := ConfigValue{Class: namedRouteKey, Value: name} sb.pile[result.Class] = append(sb.pile[result.Class], result) } state[namedRouteKey] = nil } } } // map sbmap, err := st.mapAddressToProtocolToServerBlocks(originalServerBlocks, options) if err != nil { return nil, warnings, err } // reduce pairings := st.consolidateAddrMappings(sbmap) // each pairing of listener addresses to list of server // blocks is basically a server definition servers, err := st.serversFromPairings(pairings, options, &warnings, gc) if err != nil { return nil, warnings, err } // hoist the metrics config from per-server to global metrics, _ := options["metrics"].(*caddyhttp.Metrics) for _, s := range servers { if s.Metrics != nil { metrics = cmp.Or(metrics, &caddyhttp.Metrics{}) metrics = &caddyhttp.Metrics{ PerHost: metrics.PerHost || s.Metrics.PerHost, } s.Metrics = nil // we don't need it anymore } } // now that each server is configured, make the HTTP app httpApp := caddyhttp.App{ HTTPPort: tryInt(options["http_port"], &warnings), HTTPSPort: tryInt(options["https_port"], &warnings), GracePeriod: tryDuration(options["grace_period"], &warnings), ShutdownDelay: tryDuration(options["shutdown_delay"], &warnings), Metrics: metrics, Servers: servers, } // then make the TLS app tlsApp, warnings, err := st.buildTLSApp(pairings, options, warnings) if err != nil { return nil, warnings, err } // then make the PKI app pkiApp, warnings, err := st.buildPKIApp(pairings, options, warnings) if err != nil { return nil, warnings, err } // extract any custom logs, and enforce configured levels var customLogs []namedCustomLog var hasDefaultLog bool addCustomLog := func(ncl namedCustomLog) { if ncl.name == "" { return } if ncl.name == caddy.DefaultLoggerName { hasDefaultLog = true } if _, ok := options["debug"]; ok && ncl.log != nil && ncl.log.Level == "" { ncl.log.Level = zap.DebugLevel.CapitalString() } customLogs = append(customLogs, ncl) } // Apply global log options, when set if options["log"] != nil { for _, logValue := range options["log"].([]ConfigValue) { addCustomLog(logValue.Value.(namedCustomLog)) } } if !hasDefaultLog { // if the default log was not customized, ensure we // configure it with any applicable options if _, ok := options["debug"]; ok { customLogs = append(customLogs, namedCustomLog{ name: caddy.DefaultLoggerName, log: &caddy.CustomLog{ BaseLog: caddy.BaseLog{Level: zap.DebugLevel.CapitalString()}, }, }) } } // Apply server-specific log options for _, p := range pairings { for _, sb := range p.serverBlocks { for _, clVal := range sb.pile["custom_log"] { addCustomLog(clVal.Value.(namedCustomLog)) } } } // annnd the top-level config, then we're done! cfg := &caddy.Config{AppsRaw: make(caddy.ModuleMap)} // loop through the configured options, and if any of // them are an httpcaddyfile App, then we insert them // into the config as raw Caddy apps for _, opt := range options { if app, ok := opt.(App); ok { cfg.AppsRaw[app.Name] = app.Value } } // insert the standard Caddy apps into the config if len(httpApp.Servers) > 0 { cfg.AppsRaw["http"] = caddyconfig.JSON(httpApp, &warnings) } if !reflect.DeepEqual(tlsApp, &caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)}) { cfg.AppsRaw["tls"] = caddyconfig.JSON(tlsApp, &warnings) } if !reflect.DeepEqual(pkiApp, &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}) { cfg.AppsRaw["pki"] = caddyconfig.JSON(pkiApp, &warnings) } if filesystems, ok := options["filesystem"].(caddy.Module); ok { cfg.AppsRaw["caddy.filesystems"] = caddyconfig.JSON( filesystems, &warnings) } if storageCvtr, ok := options["storage"].(caddy.StorageConverter); ok { cfg.StorageRaw = caddyconfig.JSONModuleObject(storageCvtr, "module", storageCvtr.(caddy.Module).CaddyModule().ID.Name(), &warnings) } if adminConfig, ok := options["admin"].(*caddy.AdminConfig); ok && adminConfig != nil { cfg.Admin = adminConfig } if pc, ok := options["persist_config"].(string); ok && pc == "off" { if cfg.Admin == nil { cfg.Admin = new(caddy.AdminConfig) } if cfg.Admin.Config == nil { cfg.Admin.Config = new(caddy.ConfigSettings) } cfg.Admin.Config.Persist = new(bool) } if len(customLogs) > 0 { if cfg.Logging == nil { cfg.Logging = &caddy.Logging{ Logs: make(map[string]*caddy.CustomLog), } } // Add the default log first if defined, so that it doesn't // accidentally get re-created below due to the Exclude logic for _, ncl := range customLogs { if ncl.name == caddy.DefaultLoggerName && ncl.log != nil { cfg.Logging.Logs[caddy.DefaultLoggerName] = ncl.log break } } // Add the rest of the custom logs for _, ncl := range customLogs { if ncl.log == nil || ncl.name == caddy.DefaultLoggerName { continue } if ncl.name != "" { cfg.Logging.Logs[ncl.name] = ncl.log } // most users seem to prefer not writing access logs // to the default log when they are directed to a // file or have any other special customization if ncl.name != caddy.DefaultLoggerName && len(ncl.log.Include) > 0 { defaultLog, ok := cfg.Logging.Logs[caddy.DefaultLoggerName] if !ok { defaultLog = new(caddy.CustomLog) cfg.Logging.Logs[caddy.DefaultLoggerName] = defaultLog } defaultLog.Exclude = append(defaultLog.Exclude, ncl.log.Include...) // avoid duplicates by sorting + compacting sort.Strings(defaultLog.Exclude) defaultLog.Exclude = slices.Compact(defaultLog.Exclude) } } // we may have not actually added anything, so remove if empty if len(cfg.Logging.Logs) == 0 { cfg.Logging = nil } } return cfg, warnings, nil } // evaluateGlobalOptionsBlock evaluates the global options block, // which is expected to be the first server block if it has zero // keys. It returns the updated list of server blocks with the // global options block removed, and updates options accordingly. func (ServerType) evaluateGlobalOptionsBlock(serverBlocks []serverBlock, options map[string]any) ([]serverBlock, error) { if len(serverBlocks) == 0 || len(serverBlocks[0].block.Keys) > 0 { return serverBlocks, nil } for _, segment := range serverBlocks[0].block.Segments { opt := segment.Directive() var val any var err error disp := caddyfile.NewDispenser(segment) optFunc, ok := registeredGlobalOptions[opt] if !ok { tkn := segment[0] return nil, fmt.Errorf("%s:%d: unrecognized global option: %s", tkn.File, tkn.Line, opt) } val, err = optFunc(disp, options[opt]) if err != nil { return nil, fmt.Errorf("parsing caddyfile tokens for '%s': %v", opt, err) } // As a special case, fold multiple "servers" options together // in an array instead of overwriting a possible existing value if opt == "servers" { existingOpts, ok := options[opt].([]serverOptions) if !ok { existingOpts = []serverOptions{} } serverOpts, ok := val.(serverOptions) if !ok { return nil, fmt.Errorf("unexpected type from 'servers' global options: %T", val) } options[opt] = append(existingOpts, serverOpts) continue } // Additionally, fold multiple "log" options together into an // array so that multiple loggers can be configured. if opt == "log" { existingOpts, ok := options[opt].([]ConfigValue) if !ok { existingOpts = []ConfigValue{} } logOpts, ok := val.([]ConfigValue) if !ok { return nil, fmt.Errorf("unexpected type from 'log' global options: %T", val) } options[opt] = append(existingOpts, logOpts...) continue } // Also fold multiple "default_bind" options together into an // array so that server blocks can have multiple binds by default. if opt == "default_bind" { existingOpts, ok := options[opt].([]ConfigValue) if !ok { existingOpts = []ConfigValue{} } defaultBindOpts, ok := val.([]ConfigValue) if !ok { return nil, fmt.Errorf("unexpected type from 'default_bind' global options: %T", val) } options[opt] = append(existingOpts, defaultBindOpts...) continue } options[opt] = val } // If we got "servers" options, we'll sort them by their listener address if serverOpts, ok := options["servers"].([]serverOptions); ok { sort.Slice(serverOpts, func(i, j int) bool { return len(serverOpts[i].ListenerAddress) > len(serverOpts[j].ListenerAddress) }) // Reject the config if there are duplicate listener address seen := make(map[string]bool) for _, entry := range serverOpts { if _, alreadySeen := seen[entry.ListenerAddress]; alreadySeen { return nil, fmt.Errorf("cannot have 'servers' global options with duplicate listener addresses: %s", entry.ListenerAddress) } seen[entry.ListenerAddress] = true } } return serverBlocks[1:], nil } // extractNamedRoutes pulls out any named route server blocks // so they don't get parsed as sites, and stores them in options // for later. func (ServerType) extractNamedRoutes( serverBlocks []serverBlock, options map[string]any, warnings *[]caddyconfig.Warning, replacer ShorthandReplacer, ) ([]serverBlock, error) { namedRoutes := map[string]*caddyhttp.Route{} gc := counter{new(int)} state := make(map[string]any) // copy the server blocks so we can // splice out the named route ones filtered := append([]serverBlock{}, serverBlocks...) index := -1 for _, sb := range serverBlocks { index++ if !sb.block.IsNamedRoute { continue } // splice out this block, because we know it's not a real server filtered = append(filtered[:index], filtered[index+1:]...) index-- if len(sb.block.Segments) == 0 { continue } wholeSegment := caddyfile.Segment{} for i := range sb.block.Segments { // replace user-defined placeholder shorthands in extracted named routes replacer.ApplyToSegment(&sb.block.Segments[i]) // zip up all the segments since ParseSegmentAsSubroute // was designed to take a directive+ wholeSegment = append(wholeSegment, sb.block.Segments[i]...) } h := Helper{ Dispenser: caddyfile.NewDispenser(wholeSegment), options: options, warnings: warnings, matcherDefs: nil, parentBlock: sb.block, groupCounter: gc, State: state, } handler, err := ParseSegmentAsSubroute(h) if err != nil { return nil, err } subroute := handler.(*caddyhttp.Subroute) route := caddyhttp.Route{} if len(subroute.Routes) == 1 && len(subroute.Routes[0].MatcherSetsRaw) == 0 { // if there's only one route with no matcher, then we can simplify route.HandlersRaw = append(route.HandlersRaw, subroute.Routes[0].HandlersRaw[0]) } else { // otherwise we need the whole subroute route.HandlersRaw = []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", subroute.CaddyModule().ID.Name(), h.warnings)} } namedRoutes[sb.block.GetKeysText()[0]] = &route } options["named_routes"] = namedRoutes return filtered, nil } // serversFromPairings creates the servers for each pairing of addresses // to server blocks. Each pairing is essentially a server definition. func (st *ServerType) serversFromPairings( pairings []sbAddrAssociation, options map[string]any, warnings *[]caddyconfig.Warning, groupCounter counter, ) (map[string]*caddyhttp.Server, error) { servers := make(map[string]*caddyhttp.Server) defaultSNI := tryString(options["default_sni"], warnings) fallbackSNI := tryString(options["fallback_sni"], warnings) httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort) if hp, ok := options["http_port"].(int); ok { httpPort = strconv.Itoa(hp) } httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort) if hsp, ok := options["https_port"].(int); ok { httpsPort = strconv.Itoa(hsp) } autoHTTPS := []string{} if ah, ok := options["auto_https"].([]string); ok { autoHTTPS = ah } for i, p := range pairings { // detect ambiguous site definitions: server blocks which // have the same host bound to the same interface (listener // address), otherwise their routes will improperly be added // to the same server (see issue #4635) for j, sblock1 := range p.serverBlocks { for _, key := range sblock1.block.GetKeysText() { for k, sblock2 := range p.serverBlocks { if k == j { continue } if slices.Contains(sblock2.block.GetKeysText(), key) { return nil, fmt.Errorf("ambiguous site definition: %s", key) } } } } var ( addresses []string protocols [][]string ) for _, addressWithProtocols := range p.addressesWithProtocols { addresses = append(addresses, addressWithProtocols.address) protocols = append(protocols, addressWithProtocols.protocols) } srv := &caddyhttp.Server{ Listen: addresses, ListenProtocols: protocols, } // remove srv.ListenProtocols[j] if it only contains the default protocols for j, lnProtocols := range srv.ListenProtocols { srv.ListenProtocols[j] = nil for _, lnProtocol := range lnProtocols { if lnProtocol != "" { srv.ListenProtocols[j] = lnProtocols break } } } // remove srv.ListenProtocols if it only contains the default protocols for all listen addresses listenProtocols := srv.ListenProtocols srv.ListenProtocols = nil for _, lnProtocols := range listenProtocols { if lnProtocols != nil { srv.ListenProtocols = listenProtocols break } } // handle the auto_https global option for _, val := range autoHTTPS { switch val { case "off": if srv.AutoHTTPS == nil { srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig) } srv.AutoHTTPS.Disabled = true case "disable_redirects": if srv.AutoHTTPS == nil { srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig) } srv.AutoHTTPS.DisableRedir = true case "disable_certs": if srv.AutoHTTPS == nil { srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig) } srv.AutoHTTPS.DisableCerts = true case "ignore_loaded_certs": if srv.AutoHTTPS == nil { srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig) } srv.AutoHTTPS.IgnoreLoadedCerts = true } } // Using paths in site addresses is deprecated // See ParseAddress() where parsing should later reject paths // See https://github.com/caddyserver/caddy/pull/4728 for a full explanation for _, sblock := range p.serverBlocks { for _, addr := range sblock.parsedKeys { if addr.Path != "" { caddy.Log().Named("caddyfile").Warn("Using a path in a site address is deprecated; please use the 'handle' directive instead", zap.String("address", addr.String())) } } } // sort server blocks by their keys; this is important because // only the first matching site should be evaluated, and we should // attempt to match most specific site first (host and path), in // case their matchers overlap; we do this somewhat naively by // descending sort by length of host then path sort.SliceStable(p.serverBlocks, func(i, j int) bool { // TODO: we could pre-process the specificities for efficiency, // but I don't expect many blocks will have THAT many keys... var iLongestPath, jLongestPath string var iLongestHost, jLongestHost string var iWildcardHost, jWildcardHost bool for _, addr := range p.serverBlocks[i].parsedKeys { if strings.Contains(addr.Host, "*") || addr.Host == "" { iWildcardHost = true } if specificity(addr.Host) > specificity(iLongestHost) { iLongestHost = addr.Host } if specificity(addr.Path) > specificity(iLongestPath) { iLongestPath = addr.Path } } for _, addr := range p.serverBlocks[j].parsedKeys { if strings.Contains(addr.Host, "*") || addr.Host == "" { jWildcardHost = true } if specificity(addr.Host) > specificity(jLongestHost) { jLongestHost = addr.Host } if specificity(addr.Path) > specificity(jLongestPath) { jLongestPath = addr.Path } } // catch-all blocks (blocks with no hostname) should always go // last, even after blocks with wildcard hosts if specificity(iLongestHost) == 0 { return false } if specificity(jLongestHost) == 0 { return true } if iWildcardHost != jWildcardHost { // site blocks that have a key with a wildcard in the hostname // must always be less specific than blocks without one; see // https://github.com/caddyserver/caddy/issues/3410 return jWildcardHost && !iWildcardHost } if specificity(iLongestHost) == specificity(jLongestHost) { return len(iLongestPath) > len(jLongestPath) } return specificity(iLongestHost) > specificity(jLongestHost) }) var hasCatchAllTLSConnPolicy, addressQualifiesForTLS bool autoHTTPSWillAddConnPolicy := srv.AutoHTTPS == nil || !srv.AutoHTTPS.Disabled // if needed, the ServerLogConfig is initialized beforehand so // that all server blocks can populate it with data, even when not // coming with a log directive for _, sblock := range p.serverBlocks { if len(sblock.pile["custom_log"]) != 0 { srv.Logs = new(caddyhttp.ServerLogConfig) break } } // add named routes to the server if 'invoke' was used inside of it configuredNamedRoutes := options["named_routes"].(map[string]*caddyhttp.Route) for _, sblock := range p.serverBlocks { if len(sblock.pile[namedRouteKey]) == 0 { continue } for _, value := range sblock.pile[namedRouteKey] { if srv.NamedRoutes == nil { srv.NamedRoutes = map[string]*caddyhttp.Route{} } name := value.Value.(string) if configuredNamedRoutes[name] == nil { return nil, fmt.Errorf("cannot invoke named route '%s', which was not defined", name) } srv.NamedRoutes[name] = configuredNamedRoutes[name] } } // create a subroute for each site in the server block for _, sblock := range p.serverBlocks { matcherSetsEnc, err := st.compileEncodedMatcherSets(sblock) if err != nil { return nil, fmt.Errorf("server block %v: compiling matcher sets: %v", sblock.block.Keys, err) } hosts := sblock.hostsFromKeys(false) // emit warnings if user put unspecified IP addresses; they probably want the bind directive for _, h := range hosts { if h == "0.0.0.0" || h == "::" { caddy.Log().Named("caddyfile").Warn("Site block has an unspecified IP address which only matches requests having that Host header; you probably want the 'bind' directive to configure the socket", zap.String("address", h)) } } // collect hosts that are forced to be automated forceAutomatedNames := make(map[string]struct{}) if _, ok := sblock.pile["tls.force_automate"]; ok { for _, host := range hosts { forceAutomatedNames[host] = struct{}{} } } // tls: connection policies if cpVals, ok := sblock.pile["tls.connection_policy"]; ok { // tls connection policies for _, cpVal := range cpVals { cp := cpVal.Value.(*caddytls.ConnectionPolicy) // make sure the policy covers all hostnames from the block for _, h := range hosts { if h == defaultSNI { hosts = append(hosts, "") cp.DefaultSNI = defaultSNI break } if h == fallbackSNI { hosts = append(hosts, "") cp.FallbackSNI = fallbackSNI break } } if len(hosts) > 0 { slices.Sort(hosts) // for deterministic JSON output cp.MatchersRaw = caddy.ModuleMap{ "sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones } } else { cp.DefaultSNI = defaultSNI cp.FallbackSNI = fallbackSNI } // only append this policy if it actually changes something, // or if the configuration explicitly automates certs for // these names (this is necessary to hoist a connection policy // above one that may manually load a wildcard cert that would // otherwise clobber the automated one; the code that appends // policies that manually load certs comes later, so they're // lower in the list) if !cp.SettingsEmpty() || mapContains(forceAutomatedNames, hosts) { srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp) hasCatchAllTLSConnPolicy = len(hosts) == 0 } } } for _, addr := range sblock.parsedKeys { // if server only uses HTTP port, auto-HTTPS will not apply if listenersUseAnyPortOtherThan(srv.Listen, httpPort) { // exclude any hosts that were defined explicitly with "http://" // in the key from automated cert management (issue #2998) if addr.Scheme == "http" && addr.Host != "" { if srv.AutoHTTPS == nil { srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig) } if !slices.Contains(srv.AutoHTTPS.Skip, addr.Host) { srv.AutoHTTPS.Skip = append(srv.AutoHTTPS.Skip, addr.Host) } } } // If TLS is specified as directive, it will also result in 1 or more connection policy being created // Thus, catch-all address with non-standard port, e.g. :8443, can have TLS enabled without // specifying prefix "https://" // Second part of the condition is to allow creating TLS conn policy even though `auto_https` has been disabled // ensuring compatibility with behavior described in below link // https://caddy.community/t/making-sense-of-auto-https-and-why-disabling-it-still-serves-https-instead-of-http/9761 createdTLSConnPolicies, ok := sblock.pile["tls.connection_policy"] hasTLSEnabled := (ok && len(createdTLSConnPolicies) > 0) || (addr.Host != "" && (srv.AutoHTTPS == nil || !slices.Contains(srv.AutoHTTPS.Skip, addr.Host))) // we'll need to remember if the address qualifies for auto-HTTPS, so we // can add a TLS conn policy if necessary if addr.Scheme == "https" || (addr.Scheme != "http" && addr.Port != httpPort && hasTLSEnabled) { addressQualifiesForTLS = true } // predict whether auto-HTTPS will add the conn policy for us; if so, we // may not need to add one for this server autoHTTPSWillAddConnPolicy = autoHTTPSWillAddConnPolicy && (addr.Port == httpsPort || (addr.Port != httpPort && addr.Host != "")) } // Look for any config values that provide listener wrappers on the server block for _, listenerConfig := range sblock.pile["listener_wrapper"] { listenerWrapper, ok := listenerConfig.Value.(caddy.ListenerWrapper) if !ok { return nil, fmt.Errorf("config for a listener wrapper did not provide a value that implements caddy.ListenerWrapper") } jsonListenerWrapper := caddyconfig.JSONModuleObject( listenerWrapper, "wrapper", listenerWrapper.(caddy.Module).CaddyModule().ID.Name(), warnings) srv.ListenerWrappersRaw = append(srv.ListenerWrappersRaw, jsonListenerWrapper) } // Look for any config values that provide packet conn wrappers on the server block for _, listenerConfig := range sblock.pile["packet_conn_wrapper"] { packetConnWrapper, ok := listenerConfig.Value.(caddy.PacketConnWrapper) if !ok { return nil, fmt.Errorf("config for a packet conn wrapper did not provide a value that implements caddy.PacketConnWrapper") } jsonPacketConnWrapper := caddyconfig.JSONModuleObject( packetConnWrapper, "wrapper", packetConnWrapper.(caddy.Module).CaddyModule().ID.Name(), warnings) srv.PacketConnWrappersRaw = append(srv.PacketConnWrappersRaw, jsonPacketConnWrapper) } // set up each handler directive, making sure to honor directive order dirRoutes := sblock.pile["route"] siteSubroute, err := buildSubroute(dirRoutes, groupCounter, true) if err != nil { return nil, err } // add the site block's route(s) to the server srv.Routes = appendSubrouteToRouteList(srv.Routes, siteSubroute, matcherSetsEnc, p, warnings) // if error routes are defined, add those too if errorSubrouteVals, ok := sblock.pile["error_route"]; ok { if srv.Errors == nil { srv.Errors = new(caddyhttp.HTTPErrorConfig) } sort.SliceStable(errorSubrouteVals, func(i, j int) bool { sri, srj := errorSubrouteVals[i].Value.(*caddyhttp.Subroute), errorSubrouteVals[j].Value.(*caddyhttp.Subroute) if len(sri.Routes[0].MatcherSetsRaw) == 0 && len(srj.Routes[0].MatcherSetsRaw) != 0 { return false } return true }) errorsSubroute := &caddyhttp.Subroute{} for _, val := range errorSubrouteVals { sr := val.Value.(*caddyhttp.Subroute) errorsSubroute.Routes = append(errorsSubroute.Routes, sr.Routes...) } srv.Errors.Routes = appendSubrouteToRouteList(srv.Errors.Routes, errorsSubroute, matcherSetsEnc, p, warnings) } // add log associations // see https://github.com/caddyserver/caddy/issues/3310 sblockLogHosts := sblock.hostsFromKeys(true) for _, cval := range sblock.pile["custom_log"] { ncl := cval.Value.(namedCustomLog) // if `no_hostname` is set, then this logger will not // be associated with any of the site block's hostnames, // and only be usable via the `log_name` directive // or the `access_logger_names` variable if ncl.noHostname { continue } if sblock.hasHostCatchAllKey() && len(ncl.hostnames) == 0 { // all requests for hosts not able to be listed should use // this log because it's a catch-all-hosts server block srv.Logs.DefaultLoggerName = ncl.name } else if len(ncl.hostnames) > 0 { // if the logger overrides the hostnames, map that to the logger name for _, h := range ncl.hostnames { if srv.Logs.LoggerNames == nil { srv.Logs.LoggerNames = make(map[string]caddyhttp.StringArray) } srv.Logs.LoggerNames[h] = append(srv.Logs.LoggerNames[h], ncl.name) } } else { // otherwise, map each host to the logger name for _, h := range sblockLogHosts { // strip the port from the host, if any host, _, err := net.SplitHostPort(h) if err != nil { host = h } if srv.Logs.LoggerNames == nil { srv.Logs.LoggerNames = make(map[string]caddyhttp.StringArray) } srv.Logs.LoggerNames[host] = append(srv.Logs.LoggerNames[host], ncl.name) } } } if srv.Logs != nil && len(sblock.pile["custom_log"]) == 0 { // server has access logs enabled, but this server block does not // enable access logs; therefore, all hosts of this server block // should not be access-logged if len(hosts) == 0 { // if the server block has a catch-all-hosts key, then we should // not log reqs to any host unless it appears in the map srv.Logs.SkipUnmappedHosts = true } srv.Logs.SkipHosts = append(srv.Logs.SkipHosts, sblockLogHosts...) } } // sort for deterministic JSON output if srv.Logs != nil { slices.Sort(srv.Logs.SkipHosts) } // a server cannot (natively) serve both HTTP and HTTPS at the // same time, so make sure the configuration isn't in conflict err := detectConflictingSchemes(srv, p.serverBlocks, options) if err != nil { return nil, err } // a catch-all TLS conn policy is necessary to ensure TLS can // be offered to all hostnames of the server; even though only // one policy is needed to enable TLS for the server, that // policy might apply to only certain TLS handshakes; but when // using the Caddyfile, user would expect all handshakes to at // least have a matching connection policy, so here we append a // catch-all/default policy if there isn't one already (it's // important that it goes at the end) - see issue #3004: // https://github.com/caddyserver/caddy/issues/3004 // TODO: maybe a smarter way to handle this might be to just make the // auto-HTTPS logic at provision-time detect if there is any connection // policy missing for any HTTPS-enabled hosts, if so, add it... maybe? if addressQualifiesForTLS && !hasCatchAllTLSConnPolicy && (len(srv.TLSConnPolicies) > 0 || !autoHTTPSWillAddConnPolicy || defaultSNI != "" || fallbackSNI != "") { srv.TLSConnPolicies = append(srv.TLSConnPolicies, &caddytls.ConnectionPolicy{ DefaultSNI: defaultSNI, FallbackSNI: fallbackSNI, }) } // tidy things up a bit srv.TLSConnPolicies, err = consolidateConnPolicies(srv.TLSConnPolicies) if err != nil { return nil, fmt.Errorf("consolidating TLS connection policies for server %d: %v", i, err) } srv.Routes = consolidateRoutes(srv.Routes) servers[fmt.Sprintf("srv%d", i)] = srv } if err := applyServerOptions(servers, options, warnings); err != nil { return nil, fmt.Errorf("applying global server options: %v", err) } return servers, nil } func detectConflictingSchemes(srv *caddyhttp.Server, serverBlocks []serverBlock, options map[string]any) error { httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort) if hp, ok := options["http_port"].(int); ok { httpPort = strconv.Itoa(hp) } httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort) if hsp, ok := options["https_port"].(int); ok { httpsPort = strconv.Itoa(hsp) } var httpOrHTTPS string checkAndSetHTTP := func(addr Address) error { if httpOrHTTPS == "HTTPS" { errMsg := fmt.Errorf("server listening on %v is configured for HTTPS and cannot natively multiplex HTTP and HTTPS: %s", srv.Listen, addr.Original) if addr.Scheme == "" && addr.Host == "" { errMsg = fmt.Errorf("%s (try specifying https:// in the address)", errMsg) } return errMsg } if len(srv.TLSConnPolicies) > 0 { // any connection policies created for an HTTP server // is a logical conflict, as it would enable HTTPS return fmt.Errorf("server listening on %v is HTTP, but attempts to configure TLS connection policies", srv.Listen) } httpOrHTTPS = "HTTP" return nil } checkAndSetHTTPS := func(addr Address) error { if httpOrHTTPS == "HTTP" { return fmt.Errorf("server listening on %v is configured for HTTP and cannot natively multiplex HTTP and HTTPS: %s", srv.Listen, addr.Original) } httpOrHTTPS = "HTTPS" return nil } for _, sblock := range serverBlocks { for _, addr := range sblock.parsedKeys { if addr.Scheme == "http" || addr.Port == httpPort { if err := checkAndSetHTTP(addr); err != nil { return err } } else if addr.Scheme == "https" || addr.Port == httpsPort || len(srv.TLSConnPolicies) > 0 { if err := checkAndSetHTTPS(addr); err != nil { return err } } else if addr.Host == "" { if err := checkAndSetHTTP(addr); err != nil { return err } } } } return nil } // consolidateConnPolicies sorts any catch-all policy to the end, removes empty TLS connection // policies, and combines equivalent ones for a cleaner overall output. func consolidateConnPolicies(cps caddytls.ConnectionPolicies) (caddytls.ConnectionPolicies, error) { // catch-all policies (those without any matcher) should be at the // end, otherwise it nullifies any more specific policies sort.SliceStable(cps, func(i, j int) bool { return cps[j].MatchersRaw == nil && cps[i].MatchersRaw != nil }) for i := 0; i < len(cps); i++ { // compare it to the others for j := 0; j < len(cps); j++ { if j == i { continue } // if they're exactly equal in every way, just keep one of them if reflect.DeepEqual(cps[i], cps[j]) { cps = slices.Delete(cps, j, j+1) i-- break } // as a special case, if there are adjacent TLS conn policies that are identical except // by their matchers, and the matchers are specifically just ServerName ("sni") matchers // (by far the most common), we can combine them into a single policy if i == j-1 && len(cps[i].MatchersRaw) == 1 && len(cps[j].MatchersRaw) == 1 { if iSNIMatcherJSON, ok := cps[i].MatchersRaw["sni"]; ok { if jSNIMatcherJSON, ok := cps[j].MatchersRaw["sni"]; ok { // position of policies and the matcher criteria check out; if settings are // the same, then we can combine the policies; we have to unmarshal and // remarshal the matchers though if cps[i].SettingsEqual(*cps[j]) { var iSNIMatcher caddytls.MatchServerName if err := json.Unmarshal(iSNIMatcherJSON, &iSNIMatcher); err == nil { var jSNIMatcher caddytls.MatchServerName if err := json.Unmarshal(jSNIMatcherJSON, &jSNIMatcher); err == nil { iSNIMatcher = append(iSNIMatcher, jSNIMatcher...) cps[i].MatchersRaw["sni"], err = json.Marshal(iSNIMatcher) if err != nil { return nil, fmt.Errorf("recombining SNI matchers: %v", err) } cps = slices.Delete(cps, j, j+1) i-- break } } } } } } // if they have the same matcher, try to reconcile each field: either they must // be identical, or we have to be able to combine them safely if reflect.DeepEqual(cps[i].MatchersRaw, cps[j].MatchersRaw) { if len(cps[i].ALPN) > 0 && len(cps[j].ALPN) > 0 && !reflect.DeepEqual(cps[i].ALPN, cps[j].ALPN) { return nil, fmt.Errorf("two policies with same match criteria have conflicting ALPN: %v vs. %v", cps[i].ALPN, cps[j].ALPN) } if len(cps[i].CipherSuites) > 0 && len(cps[j].CipherSuites) > 0 && !reflect.DeepEqual(cps[i].CipherSuites, cps[j].CipherSuites) { return nil, fmt.Errorf("two policies with same match criteria have conflicting cipher suites: %v vs. %v", cps[i].CipherSuites, cps[j].CipherSuites) } if cps[i].ClientAuthentication == nil && cps[j].ClientAuthentication != nil && !reflect.DeepEqual(cps[i].ClientAuthentication, cps[j].ClientAuthentication) { return nil, fmt.Errorf("two policies with same match criteria have conflicting client auth configuration: %+v vs. %+v", cps[i].ClientAuthentication, cps[j].ClientAuthentication) } if len(cps[i].Curves) > 0 && len(cps[j].Curves) > 0 && !reflect.DeepEqual(cps[i].Curves, cps[j].Curves) { return nil, fmt.Errorf("two policies with same match criteria have conflicting curves: %v vs. %v", cps[i].Curves, cps[j].Curves) } if cps[i].DefaultSNI != "" && cps[j].DefaultSNI != "" && cps[i].DefaultSNI != cps[j].DefaultSNI { return nil, fmt.Errorf("two policies with same match criteria have conflicting default SNI: %s vs. %s", cps[i].DefaultSNI, cps[j].DefaultSNI) } if cps[i].FallbackSNI != "" && cps[j].FallbackSNI != "" && cps[i].FallbackSNI != cps[j].FallbackSNI { return nil, fmt.Errorf("two policies with same match criteria have conflicting fallback SNI: %s vs. %s", cps[i].FallbackSNI, cps[j].FallbackSNI) } if cps[i].ProtocolMin != "" && cps[j].ProtocolMin != "" && cps[i].ProtocolMin != cps[j].ProtocolMin { return nil, fmt.Errorf("two policies with same match criteria have conflicting min protocol: %s vs. %s", cps[i].ProtocolMin, cps[j].ProtocolMin) } if cps[i].ProtocolMax != "" && cps[j].ProtocolMax != "" && cps[i].ProtocolMax != cps[j].ProtocolMax { return nil, fmt.Errorf("two policies with same match criteria have conflicting max protocol: %s vs. %s", cps[i].ProtocolMax, cps[j].ProtocolMax) } if cps[i].CertSelection != nil && cps[j].CertSelection != nil { // merging fields other than AnyTag is not implemented if !reflect.DeepEqual(cps[i].CertSelection.SerialNumber, cps[j].CertSelection.SerialNumber) || !reflect.DeepEqual(cps[i].CertSelection.SubjectOrganization, cps[j].CertSelection.SubjectOrganization) || cps[i].CertSelection.PublicKeyAlgorithm != cps[j].CertSelection.PublicKeyAlgorithm || !reflect.DeepEqual(cps[i].CertSelection.AllTags, cps[j].CertSelection.AllTags) { return nil, fmt.Errorf("two policies with same match criteria have conflicting cert selections: %+v vs. %+v", cps[i].CertSelection, cps[j].CertSelection) } } // by now we've decided that we can merge the two -- we'll keep i and drop j if len(cps[i].ALPN) == 0 && len(cps[j].ALPN) > 0 { cps[i].ALPN = cps[j].ALPN } if len(cps[i].CipherSuites) == 0 && len(cps[j].CipherSuites) > 0 { cps[i].CipherSuites = cps[j].CipherSuites } if cps[i].ClientAuthentication == nil && cps[j].ClientAuthentication != nil { cps[i].ClientAuthentication = cps[j].ClientAuthentication } if len(cps[i].Curves) == 0 && len(cps[j].Curves) > 0 { cps[i].Curves = cps[j].Curves } if cps[i].DefaultSNI == "" && cps[j].DefaultSNI != "" { cps[i].DefaultSNI = cps[j].DefaultSNI } if cps[i].FallbackSNI == "" && cps[j].FallbackSNI != "" { cps[i].FallbackSNI = cps[j].FallbackSNI } if cps[i].ProtocolMin == "" && cps[j].ProtocolMin != "" { cps[i].ProtocolMin = cps[j].ProtocolMin } if cps[i].ProtocolMax == "" && cps[j].ProtocolMax != "" { cps[i].ProtocolMax = cps[j].ProtocolMax } if cps[i].CertSelection == nil && cps[j].CertSelection != nil { // if j is the only one with a policy, move it over to i cps[i].CertSelection = cps[j].CertSelection } else if cps[i].CertSelection != nil && cps[j].CertSelection != nil { // if both have one, then combine AnyTag for _, tag := range cps[j].CertSelection.AnyTag { if !slices.Contains(cps[i].CertSelection.AnyTag, tag) { cps[i].CertSelection.AnyTag = append(cps[i].CertSelection.AnyTag, tag) } } } cps = slices.Delete(cps, j, j+1) i-- break } } } return cps, nil } // appendSubrouteToRouteList appends the routes in subroute // to the routeList, optionally qualified by matchers. func appendSubrouteToRouteList(routeList caddyhttp.RouteList, subroute *caddyhttp.Subroute, matcherSetsEnc []caddy.ModuleMap, p sbAddrAssociation, warnings *[]caddyconfig.Warning, ) caddyhttp.RouteList { // nothing to do if... there's nothing to do if len(matcherSetsEnc) == 0 && len(subroute.Routes) == 0 && subroute.Errors == nil { return routeList } // No need to wrap the handlers in a subroute if this is the only server block // and there is no matcher for it (doing so would produce unnecessarily nested // JSON), *unless* there is a host matcher within this site block; if so, then // we still need to wrap in a subroute because otherwise the host matcher from // the inside of the site block would be a top-level host matcher, which is // subject to auto-HTTPS (cert management), and using a host matcher within // a site block is a valid, common pattern for excluding domains from cert // management, leading to unexpected behavior; see issue #5124. wrapInSubroute := true if len(matcherSetsEnc) == 0 && len(p.serverBlocks) == 1 { var hasHostMatcher bool outer: for _, route := range subroute.Routes { for _, ms := range route.MatcherSetsRaw { for matcherName := range ms { if matcherName == "host" { hasHostMatcher = true break outer } } } } wrapInSubroute = hasHostMatcher } if wrapInSubroute { route := caddyhttp.Route{ // the semantics of a site block in the Caddyfile dictate // that only the first matching one is evaluated, since // site blocks do not cascade nor inherit Terminal: true, } if len(matcherSetsEnc) > 0 { route.MatcherSetsRaw = matcherSetsEnc } if len(subroute.Routes) > 0 || subroute.Errors != nil { route.HandlersRaw = []json.RawMessage{ caddyconfig.JSONModuleObject(subroute, "handler", "subroute", warnings), } } if len(route.MatcherSetsRaw) > 0 || len(route.HandlersRaw) > 0 { routeList = append(routeList, route) } } else { routeList = append(routeList, subroute.Routes...) } return routeList } // buildSubroute turns the config values, which are expected to be routes // into a clean and orderly subroute that has all the routes within it. func buildSubroute(routes []ConfigValue, groupCounter counter, needsSorting bool) (*caddyhttp.Subroute, error) { if needsSorting { for _, val := range routes { if !slices.Contains(directiveOrder, val.directive) { return nil, fmt.Errorf("directive '%s' is not an ordered HTTP handler, so it cannot be used here - try placing within a route block or using the order global option", val.directive) } } sortRoutes(routes) } subroute := new(caddyhttp.Subroute) // some directives are mutually exclusive (only first matching // instance should be evaluated); this is done by putting their // routes in the same group mutuallyExclusiveDirs := map[string]*struct { count int groupName string }{ // as a special case, group rewrite directives so that they are mutually exclusive; // this means that only the first matching rewrite will be evaluated, and that's // probably a good thing, since there should never be a need to do more than one // rewrite (I think?), and cascading rewrites smell bad... imagine these rewrites: // rewrite /docs/json/* /docs/json/index.html // rewrite /docs/* /docs/index.html // (We use this on the Caddy website, or at least we did once.) The first rewrite's // result is also matched by the second rewrite, making the first rewrite pointless. // See issue #2959. "rewrite": {}, // handle blocks are also mutually exclusive by definition "handle": {}, // root just sets a variable, so if it was not mutually exclusive, intersecting // root directives would overwrite previously-matched ones; they should not cascade "root": {}, } // we need to deterministically loop over each of these directives // in order to keep the group numbers consistent keys := make([]string, 0, len(mutuallyExclusiveDirs)) for k := range mutuallyExclusiveDirs { keys = append(keys, k) } sort.Strings(keys) for _, meDir := range keys { info := mutuallyExclusiveDirs[meDir] // see how many instances of the directive there are for _, r := range routes { if r.directive == meDir { info.count++ if info.count > 1 { break } } } // if there is more than one, put them in a group // (special case: "rewrite" directive must always be in // its own group--even if there is only one--because we // do not want a rewrite to be consolidated into other // adjacent routes that happen to have the same matcher, // see caddyserver/caddy#3108 - because the implied // intent of rewrite is to do an internal redirect, // we can't assume that the request will continue to // match the same matcher; anyway, giving a route a // unique group name should keep it from consolidating) if info.count > 1 || meDir == "rewrite" { info.groupName = groupCounter.nextGroup() } } // add all the routes piled in from directives for _, r := range routes { // put this route into a group if it is mutually exclusive if info, ok := mutuallyExclusiveDirs[r.directive]; ok { route := r.Value.(caddyhttp.Route) route.Group = info.groupName r.Value = route } switch route := r.Value.(type) { case caddyhttp.Subroute: // if a route-class config value is actually a Subroute handler // with nothing but a list of routes, then it is the intention // of the directive to keep these handlers together and in this // same order, but not necessarily in a subroute (if it wanted // to keep them in a subroute, the directive would have returned // a route with a Subroute as its handler); this is useful to // keep multiple handlers/routes together and in the same order // so that the sorting procedure we did above doesn't reorder them if route.Errors != nil { // if error handlers are also set, this is confusing; it's // probably supposed to be wrapped in a Route and encoded // as a regular handler route... programmer error. panic("found subroute with more than just routes; perhaps it should have been wrapped in a route?") } subroute.Routes = append(subroute.Routes, route.Routes...) case caddyhttp.Route: subroute.Routes = append(subroute.Routes, route) } } subroute.Routes = consolidateRoutes(subroute.Routes) return subroute, nil } // normalizeDirectiveName ensures directives that should be sorted // at the same level are named the same before sorting happens. func normalizeDirectiveName(directive string) string { // As a special case, we want "handle_path" to be sorted // at the same level as "handle", so we force them to use // the same directive name after their parsing is complete. // See https://github.com/caddyserver/caddy/issues/3675#issuecomment-678042377 if directive == "handle_path" { directive = "handle" } return directive } // consolidateRoutes combines routes with the same properties // (same matchers, same Terminal and Group settings) for a // cleaner overall output. func consolidateRoutes(routes caddyhttp.RouteList) caddyhttp.RouteList { for i := 0; i < len(routes)-1; i++ { if reflect.DeepEqual(routes[i].MatcherSetsRaw, routes[i+1].MatcherSetsRaw) && routes[i].Terminal == routes[i+1].Terminal && routes[i].Group == routes[i+1].Group { // keep the handlers in the same order, then splice out repetitive route routes[i].HandlersRaw = append(routes[i].HandlersRaw, routes[i+1].HandlersRaw...) routes = append(routes[:i+1], routes[i+2:]...) i-- } } return routes } func matcherSetFromMatcherToken( tkn caddyfile.Token, matcherDefs map[string]caddy.ModuleMap, warnings *[]caddyconfig.Warning, ) (caddy.ModuleMap, bool, error) { // matcher tokens can be wildcards, simple path matchers, // or refer to a pre-defined matcher by some name if tkn.Text == "*" { // match all requests == no matchers, so nothing to do return nil, true, nil } // convenient way to specify a single path match if strings.HasPrefix(tkn.Text, "/") { return caddy.ModuleMap{ "path": caddyconfig.JSON(caddyhttp.MatchPath{tkn.Text}, warnings), }, true, nil } // pre-defined matcher if strings.HasPrefix(tkn.Text, matcherPrefix) { m, ok := matcherDefs[tkn.Text] if !ok { return nil, false, fmt.Errorf("unrecognized matcher name: %+v", tkn.Text) } return m, true, nil } return nil, false, nil } func (st *ServerType) compileEncodedMatcherSets(sblock serverBlock) ([]caddy.ModuleMap, error) { type hostPathPair struct { hostm caddyhttp.MatchHost pathm caddyhttp.MatchPath } // keep routes with common host and path matchers together var matcherPairs []*hostPathPair var catchAllHosts bool for _, addr := range sblock.parsedKeys { // choose a matcher pair that should be shared by this // server block; if none exists yet, create one var chosenMatcherPair *hostPathPair for _, mp := range matcherPairs { if (len(mp.pathm) == 0 && addr.Path == "") || (len(mp.pathm) == 1 && mp.pathm[0] == addr.Path) { chosenMatcherPair = mp break } } if chosenMatcherPair == nil { chosenMatcherPair = new(hostPathPair) if addr.Path != "" { chosenMatcherPair.pathm = []string{addr.Path} } matcherPairs = append(matcherPairs, chosenMatcherPair) } // if one of the keys has no host (i.e. is a catch-all for // any hostname), then we need to null out the host matcher // entirely so that it matches all hosts if addr.Host == "" && !catchAllHosts { chosenMatcherPair.hostm = nil catchAllHosts = true } if catchAllHosts { continue } // add this server block's keys to the matcher // pair if it doesn't already exist if addr.Host != "" && !slices.Contains(chosenMatcherPair.hostm, addr.Host) { chosenMatcherPair.hostm = append(chosenMatcherPair.hostm, addr.Host) } } // iterate each pairing of host and path matchers and // put them into a map for JSON encoding var matcherSets []map[string]caddyhttp.RequestMatcherWithError for _, mp := range matcherPairs { matcherSet := make(map[string]caddyhttp.RequestMatcherWithError) if len(mp.hostm) > 0 { matcherSet["host"] = mp.hostm } if len(mp.pathm) > 0 { matcherSet["path"] = mp.pathm } if len(matcherSet) > 0 { matcherSets = append(matcherSets, matcherSet) } } // finally, encode each of the matcher sets matcherSetsEnc := make([]caddy.ModuleMap, 0, len(matcherSets)) for _, ms := range matcherSets { msEncoded, err := encodeMatcherSet(ms) if err != nil { return nil, fmt.Errorf("server block %v: %v", sblock.block.Keys, err) } matcherSetsEnc = append(matcherSetsEnc, msEncoded) } return matcherSetsEnc, nil } func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.ModuleMap) error { d.Next() // advance to the first token // this is the "name" for "named matchers" definitionName := d.Val() if _, ok := matchers[definitionName]; ok { return fmt.Errorf("matcher is defined more than once: %s", definitionName) } matchers[definitionName] = make(caddy.ModuleMap) // given a matcher name and the tokens following it, parse // the tokens as a matcher module and record it makeMatcher := func(matcherName string, tokens []caddyfile.Token) error { // create a new dispenser from the tokens dispenser := caddyfile.NewDispenser(tokens) // set the matcher name (without @) in the dispenser context so // that matcher modules can access it to use it as their name // (e.g. regexp matchers which use the name for capture groups) dispenser.SetContext(caddyfile.MatcherNameCtxKey, definitionName[1:]) mod, err := caddy.GetModule("http.matchers." + matcherName) if err != nil { return fmt.Errorf("getting matcher module '%s': %v", matcherName, err) } unm, ok := mod.New().(caddyfile.Unmarshaler) if !ok { return fmt.Errorf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName) } err = unm.UnmarshalCaddyfile(dispenser) if err != nil { return err } if rm, ok := unm.(caddyhttp.RequestMatcherWithError); ok { matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil) return nil } // nolint:staticcheck if rm, ok := unm.(caddyhttp.RequestMatcher); ok { matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil) return nil } return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName) } // if the next token is quoted, we can assume it's not a matcher name // and that it's probably an 'expression' matcher if d.NextArg() { if d.Token().Quoted() { // since it was missing the matcher name, we insert a token // in front of the expression token itself; we use Clone() to // make the new token to keep the same the import location as // the next token, if this is within a snippet or imported file. // see https://github.com/caddyserver/caddy/issues/6287 expressionToken := d.Token().Clone() expressionToken.Text = "expression" err := makeMatcher("expression", []caddyfile.Token{expressionToken, d.Token()}) if err != nil { return err } return nil } // if it wasn't quoted, then we need to rewind after calling // d.NextArg() so the below properly grabs the matcher name d.Prev() } // in case there are multiple instances of the same matcher, concatenate // their tokens (we expect that UnmarshalCaddyfile should be able to // handle more than one segment); otherwise, we'd overwrite other // instances of the matcher in this set tokensByMatcherName := make(map[string][]caddyfile.Token) for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); { matcherName := d.Val() tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...) } for matcherName, tokens := range tokensByMatcherName { err := makeMatcher(matcherName, tokens) if err != nil { return err } } return nil } func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcherWithError) (caddy.ModuleMap, error) { msEncoded := make(caddy.ModuleMap) for matcherName, val := range matchers { jsonBytes, err := json.Marshal(val) if err != nil { return nil, fmt.Errorf("marshaling matcher set %#v: %v", matchers, err) } msEncoded[matcherName] = jsonBytes } return msEncoded, nil } // WasReplacedPlaceholderShorthand checks if a token string was // likely a replaced shorthand of the known Caddyfile placeholder // replacement outputs. Useful to prevent some user-defined map // output destinations from overlapping with one of the // predefined shorthands. func WasReplacedPlaceholderShorthand(token string) string { prev := "" for i, item := range placeholderShorthands() { // only look at every 2nd item, which is the replacement if i%2 == 0 { prev = item continue } if strings.Trim(token, "{}") == strings.Trim(item, "{}") { // we return the original shorthand so it // can be used for an error message return prev } } return "" } // tryInt tries to convert val to an integer. If it fails, // it downgrades the error to a warning and returns 0. func tryInt(val any, warnings *[]caddyconfig.Warning) int { intVal, ok := val.(int) if val != nil && !ok && warnings != nil { *warnings = append(*warnings, caddyconfig.Warning{Message: "not an integer type"}) } return intVal } func tryString(val any, warnings *[]caddyconfig.Warning) string { stringVal, ok := val.(string) if val != nil && !ok && warnings != nil { *warnings = append(*warnings, caddyconfig.Warning{Message: "not a string type"}) } return stringVal } func tryDuration(val any, warnings *[]caddyconfig.Warning) caddy.Duration { durationVal, ok := val.(caddy.Duration) if val != nil && !ok && warnings != nil { *warnings = append(*warnings, caddyconfig.Warning{Message: "not a duration type"}) } return durationVal } // listenersUseAnyPortOtherThan returns true if there are any // listeners in addresses that use a port which is not otherPort. // Mostly borrowed from unexported method in caddyhttp package. func listenersUseAnyPortOtherThan(addresses []string, otherPort string) bool { otherPortInt, err := strconv.Atoi(otherPort) if err != nil { return false } for _, lnAddr := range addresses { laddrs, err := caddy.ParseNetworkAddress(lnAddr) if err != nil { continue } if uint(otherPortInt) > laddrs.EndPort || uint(otherPortInt) < laddrs.StartPort { return true } } return false } func mapContains[K comparable, V any](m map[K]V, keys []K) bool { if len(m) == 0 || len(keys) == 0 { return false } for _, key := range keys { if _, ok := m[key]; ok { return true } } return false } // specificity returns len(s) minus any wildcards (*) and // placeholders ({...}). Basically, it's a length count // that penalizes the use of wildcards and placeholders. // This is useful for comparing hostnames and paths. // However, wildcards in paths are not a sure answer to // the question of specificity. For example, // '*.example.com' is clearly less specific than // 'a.example.com', but is '/a' more or less specific // than '/a*'? func specificity(s string) int { l := len(s) - strings.Count(s, "*") for len(s) > 0 { start := strings.Index(s, "{") if start < 0 { return l } end := strings.Index(s[start:], "}") + start + 1 if end <= start { return l } l -= end - start s = s[end:] } return l } type counter struct { n *int } func (c counter) nextGroup() string { name := fmt.Sprintf("group%d", *c.n) *c.n++ return name } type namedCustomLog struct { name string hostnames []string log *caddy.CustomLog noHostname bool } // addressWithProtocols associates a listen address with // the protocols to serve it with type addressWithProtocols struct { address string protocols []string } // sbAddrAssociation is a mapping from a list of // addresses with protocols, and a list of server // blocks that are served on those addresses. type sbAddrAssociation struct { addressesWithProtocols []addressWithProtocols serverBlocks []serverBlock } const ( matcherPrefix = "@" namedRouteKey = "named_route" ) // Interface guard var _ caddyfile.ServerType = (*ServerType)(nil) ================================================ FILE: caddyconfig/httpcaddyfile/httptype_test.go ================================================ package httpcaddyfile import ( "encoding/json" "testing" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func TestMatcherSyntax(t *testing.T) { for i, tc := range []struct { input string expectError bool }{ { input: `http://localhost @debug { query showdebug=1 } `, expectError: false, }, { input: `http://localhost @debug { query bad format } `, expectError: true, }, { input: `http://localhost @debug { not { path /somepath* } } `, expectError: false, }, { input: `http://localhost @debug { not path /somepath* } `, expectError: false, }, { input: `http://localhost @debug not path /somepath* `, expectError: false, }, { input: `@matcher { path /matcher-not-allowed/outside-of-site-block/* } http://localhost `, expectError: true, }, } { adapter := caddyfile.Adapter{ ServerType: ServerType{}, } _, _, err := adapter.Adapt([]byte(tc.input), nil) if err != nil != tc.expectError { t.Errorf("Test %d error expectation failed Expected: %v, got %s", i, tc.expectError, err) continue } } } func TestSpecificity(t *testing.T) { for i, tc := range []struct { input string expect int }{ {"", 0}, {"*", 0}, {"*.*", 1}, {"{placeholder}", 0}, {"/{placeholder}", 1}, {"foo", 3}, {"example.com", 11}, {"a.example.com", 13}, {"*.example.com", 12}, {"/foo", 4}, {"/foo*", 4}, {"{placeholder}.example.com", 12}, {"{placeholder.example.com", 24}, {"}.", 2}, {"}{", 2}, {"{}", 0}, {"{{{}}", 1}, } { actual := specificity(tc.input) if actual != tc.expect { t.Errorf("Test %d (%s): Expected %d but got %d", i, tc.input, tc.expect, actual) } } } func TestGlobalOptions(t *testing.T) { for i, tc := range []struct { input string expectError bool }{ { input: ` { email test@example.com } :80 `, expectError: false, }, { input: ` { admin off } :80 `, expectError: false, }, { input: ` { admin 127.0.0.1:2020 } :80 `, expectError: false, }, { input: ` { admin { disabled false } } :80 `, expectError: true, }, { input: ` { admin { enforce_origin origins 192.168.1.1:2020 127.0.0.1:2020 } } :80 `, expectError: false, }, { input: ` { admin 127.0.0.1:2020 { enforce_origin origins 192.168.1.1:2020 127.0.0.1:2020 } } :80 `, expectError: false, }, { input: ` { admin 192.168.1.1:2020 127.0.0.1:2020 { enforce_origin origins 192.168.1.1:2020 127.0.0.1:2020 } } :80 `, expectError: true, }, { input: ` { admin off { enforce_origin origins 192.168.1.1:2020 127.0.0.1:2020 } } :80 `, expectError: true, }, } { adapter := caddyfile.Adapter{ ServerType: ServerType{}, } _, _, err := adapter.Adapt([]byte(tc.input), nil) if err != nil != tc.expectError { t.Errorf("Test %d error expectation failed Expected: %v, got %s", i, tc.expectError, err) continue } } } func TestDefaultSNIWithoutHTTPS(t *testing.T) { caddyfileStr := `{ default_sni my-sni.com } example.com { }` adapter := caddyfile.Adapter{ ServerType: ServerType{}, } result, _, err := adapter.Adapt([]byte(caddyfileStr), nil) if err != nil { t.Fatalf("Failed to adapt Caddyfile: %v", err) } var config struct { Apps struct { HTTP struct { Servers map[string]*caddyhttp.Server `json:"servers"` } `json:"http"` } `json:"apps"` } if err := json.Unmarshal(result, &config); err != nil { t.Fatalf("Failed to unmarshal JSON config: %v", err) } server, ok := config.Apps.HTTP.Servers["srv0"] if !ok { t.Fatalf("Expected server 'srv0' to be created") } if len(server.TLSConnPolicies) == 0 { t.Fatalf("Expected TLS connection policies to be generated, got none") } found := false for _, policy := range server.TLSConnPolicies { if policy.DefaultSNI == "my-sni.com" { found = true break } } if !found { t.Errorf("Expected default_sni 'my-sni.com' in TLS connection policies, but it was missing. Generated JSON: %s", string(result)) } } ================================================ FILE: caddyconfig/httpcaddyfile/options.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httpcaddyfile import ( "slices" "strconv" "github.com/caddyserver/certmagic" "github.com/libdns/libdns" "github.com/mholt/acmez/v3/acme" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddytls" ) func init() { RegisterGlobalOption("debug", parseOptTrue) RegisterGlobalOption("http_port", parseOptHTTPPort) RegisterGlobalOption("https_port", parseOptHTTPSPort) RegisterGlobalOption("default_bind", parseOptDefaultBind) RegisterGlobalOption("grace_period", parseOptDuration) RegisterGlobalOption("shutdown_delay", parseOptDuration) RegisterGlobalOption("default_sni", parseOptSingleString) RegisterGlobalOption("fallback_sni", parseOptSingleString) RegisterGlobalOption("order", parseOptOrder) RegisterGlobalOption("storage", parseOptStorage) RegisterGlobalOption("storage_check", parseStorageCheck) RegisterGlobalOption("storage_clean_interval", parseStorageCleanInterval) RegisterGlobalOption("renew_interval", parseOptDuration) RegisterGlobalOption("ocsp_interval", parseOptDuration) RegisterGlobalOption("acme_ca", parseOptSingleString) RegisterGlobalOption("acme_ca_root", parseOptSingleString) RegisterGlobalOption("acme_dns", parseOptDNS) RegisterGlobalOption("acme_eab", parseOptACMEEAB) RegisterGlobalOption("cert_issuer", parseOptCertIssuer) RegisterGlobalOption("skip_install_trust", parseOptTrue) RegisterGlobalOption("email", parseOptSingleString) RegisterGlobalOption("admin", parseOptAdmin) RegisterGlobalOption("on_demand_tls", parseOptOnDemand) RegisterGlobalOption("local_certs", parseOptTrue) RegisterGlobalOption("key_type", parseOptSingleString) RegisterGlobalOption("auto_https", parseOptAutoHTTPS) RegisterGlobalOption("metrics", parseMetricsOptions) RegisterGlobalOption("servers", parseServerOptions) RegisterGlobalOption("ocsp_stapling", parseOCSPStaplingOptions) RegisterGlobalOption("cert_lifetime", parseOptDuration) RegisterGlobalOption("log", parseLogOptions) RegisterGlobalOption("preferred_chains", parseOptPreferredChains) RegisterGlobalOption("persist_config", parseOptPersistConfig) RegisterGlobalOption("dns", parseOptDNS) RegisterGlobalOption("tls_resolvers", parseOptTLSResolvers) RegisterGlobalOption("ech", parseOptECH) RegisterGlobalOption("renewal_window_ratio", parseOptRenewalWindowRatio) } func parseOptTrue(d *caddyfile.Dispenser, _ any) (any, error) { return true, nil } func parseOptHTTPPort(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name var httpPort int var httpPortStr string if !d.AllArgs(&httpPortStr) { return 0, d.ArgErr() } var err error httpPort, err = strconv.Atoi(httpPortStr) if err != nil { return 0, d.Errf("converting port '%s' to integer value: %v", httpPortStr, err) } return httpPort, nil } func parseOptHTTPSPort(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name var httpsPort int var httpsPortStr string if !d.AllArgs(&httpsPortStr) { return 0, d.ArgErr() } var err error httpsPort, err = strconv.Atoi(httpsPortStr) if err != nil { return 0, d.Errf("converting port '%s' to integer value: %v", httpsPortStr, err) } return httpsPort, nil } func parseOptOrder(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name // get directive name if !d.Next() { return nil, d.ArgErr() } dirName := d.Val() if _, ok := registeredDirectives[dirName]; !ok { return nil, d.Errf("%s is not a registered directive", dirName) } // get positional token if !d.Next() { return nil, d.ArgErr() } pos := Positional(d.Val()) // if directive already had an order, drop it newOrder := slices.DeleteFunc(directiveOrder, func(d string) bool { return d == dirName }) // act on the positional; if it's First or Last, we're done right away switch pos { case First: newOrder = append([]string{dirName}, newOrder...) if d.NextArg() { return nil, d.ArgErr() } directiveOrder = newOrder return newOrder, nil case Last: newOrder = append(newOrder, dirName) if d.NextArg() { return nil, d.ArgErr() } directiveOrder = newOrder return newOrder, nil // if it's Before or After, continue case Before: case After: default: return nil, d.Errf("unknown positional '%s'", pos) } // get name of other directive if !d.NextArg() { return nil, d.ArgErr() } otherDir := d.Val() if d.NextArg() { return nil, d.ArgErr() } // get the position of the target directive targetIndex := slices.Index(newOrder, otherDir) if targetIndex == -1 { return nil, d.Errf("directive '%s' not found", otherDir) } // if we're inserting after, we need to increment the index to go after if pos == After { targetIndex++ } // insert the directive into the new order newOrder = slices.Insert(newOrder, targetIndex, dirName) directiveOrder = newOrder return newOrder, nil } func parseOptStorage(d *caddyfile.Dispenser, _ any) (any, error) { if !d.Next() { // consume option name return nil, d.ArgErr() } if !d.Next() { // get storage module name return nil, d.ArgErr() } modID := "caddy.storage." + d.Val() unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return nil, err } storage, ok := unm.(caddy.StorageConverter) if !ok { return nil, d.Errf("module %s is not a caddy.StorageConverter", modID) } return storage, nil } func parseStorageCheck(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name if !d.Next() { return "", d.ArgErr() } val := d.Val() if d.Next() { return "", d.ArgErr() } if val != "off" { return "", d.Errf("storage_check must be 'off'") } return val, nil } func parseStorageCleanInterval(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name if !d.Next() { return "", d.ArgErr() } val := d.Val() if d.Next() { return "", d.ArgErr() } if val == "off" { return false, nil } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return nil, d.Errf("failed to parse storage_clean_interval, must be a duration or 'off' %w", err) } return caddy.Duration(dur), nil } func parseOptDuration(d *caddyfile.Dispenser, _ any) (any, error) { if !d.Next() { // consume option name return nil, d.ArgErr() } if !d.Next() { // get duration value return nil, d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return nil, err } return caddy.Duration(dur), nil } func parseOptACMEEAB(d *caddyfile.Dispenser, _ any) (any, error) { eab := new(acme.EAB) d.Next() // consume option name if d.NextArg() { return nil, d.ArgErr() } for d.NextBlock(0) { switch d.Val() { case "key_id": if !d.NextArg() { return nil, d.ArgErr() } eab.KeyID = d.Val() case "mac_key": if !d.NextArg() { return nil, d.ArgErr() } eab.MACKey = d.Val() default: return nil, d.Errf("unrecognized parameter '%s'", d.Val()) } } return eab, nil } func parseOptCertIssuer(d *caddyfile.Dispenser, existing any) (any, error) { d.Next() // consume option name var issuers []certmagic.Issuer if existing != nil { issuers = existing.([]certmagic.Issuer) } // get issuer module name if !d.Next() { return nil, d.ArgErr() } modID := "tls.issuance." + d.Val() unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return nil, err } iss, ok := unm.(certmagic.Issuer) if !ok { return nil, d.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm) } issuers = append(issuers, iss) return issuers, nil } func parseOptSingleString(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name if !d.Next() { return "", d.ArgErr() } val := d.Val() if d.Next() { return "", d.ArgErr() } return val, nil } func parseOptTLSResolvers(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name resolvers := d.RemainingArgs() if len(resolvers) == 0 { return nil, d.ArgErr() } return resolvers, nil } func parseOptDefaultBind(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name var addresses, protocols []string addresses = d.RemainingArgs() if len(addresses) == 0 { addresses = append(addresses, "") } for d.NextBlock(0) { switch d.Val() { case "protocols": protocols = d.RemainingArgs() if len(protocols) == 0 { return nil, d.Errf("protocols requires one or more arguments") } default: return nil, d.Errf("unknown subdirective: %s", d.Val()) } } return []ConfigValue{{Class: "bind", Value: addressesWithProtocols{ addresses: addresses, protocols: protocols, }}}, nil } func parseOptAdmin(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name adminCfg := new(caddy.AdminConfig) if d.NextArg() { listenAddress := d.Val() if listenAddress == "off" { adminCfg.Disabled = true if d.Next() { // Do not accept any remaining options including block return nil, d.Err("No more option is allowed after turning off admin config") } } else { adminCfg.Listen = listenAddress if d.NextArg() { // At most 1 arg is allowed return nil, d.ArgErr() } } } for d.NextBlock(0) { switch d.Val() { case "enforce_origin": adminCfg.EnforceOrigin = true case "origins": adminCfg.Origins = d.RemainingArgs() default: return nil, d.Errf("unrecognized parameter '%s'", d.Val()) } } if adminCfg.Listen == "" && !adminCfg.Disabled { adminCfg.Listen = caddy.DefaultAdminListen } return adminCfg, nil } func parseOptOnDemand(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name if d.NextArg() { return nil, d.ArgErr() } var ond *caddytls.OnDemandConfig for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "ask": if !d.NextArg() { return nil, d.ArgErr() } if ond == nil { ond = new(caddytls.OnDemandConfig) } if ond.PermissionRaw != nil { return nil, d.Err("on-demand TLS permission module (or 'ask') already specified") } perm := caddytls.PermissionByHTTP{Endpoint: d.Val()} ond.PermissionRaw = caddyconfig.JSONModuleObject(perm, "module", "http", nil) case "permission": if !d.NextArg() { return nil, d.ArgErr() } if ond == nil { ond = new(caddytls.OnDemandConfig) } if ond.PermissionRaw != nil { return nil, d.Err("on-demand TLS permission module (or 'ask') already specified") } modName := d.Val() modID := "tls.permission." + modName unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return nil, err } perm, ok := unm.(caddytls.OnDemandPermission) if !ok { return nil, d.Errf("module %s (%T) is not an on-demand TLS permission module", modID, unm) } ond.PermissionRaw = caddyconfig.JSONModuleObject(perm, "module", modName, nil) case "interval": return nil, d.Errf("the on_demand_tls 'interval' option is no longer supported, remove it from your config") case "burst": return nil, d.Errf("the on_demand_tls 'burst' option is no longer supported, remove it from your config") default: return nil, d.Errf("unrecognized parameter '%s'", d.Val()) } } if ond == nil { return nil, d.Err("expected at least one config parameter for on_demand_tls") } return ond, nil } func parseOptPersistConfig(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name if !d.Next() { return "", d.ArgErr() } val := d.Val() if d.Next() { return "", d.ArgErr() } if val != "off" { return "", d.Errf("persist_config must be 'off'") } return val, nil } func parseOptAutoHTTPS(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name val := d.RemainingArgs() if len(val) == 0 { return "", d.ArgErr() } for _, v := range val { switch v { case "off": case "disable_redirects": case "disable_certs": case "ignore_loaded_certs": default: return "", d.Errf("auto_https must be one of 'off', 'disable_redirects', 'disable_certs', or 'ignore_loaded_certs'") } } return val, nil } func unmarshalCaddyfileMetricsOptions(d *caddyfile.Dispenser) (any, error) { d.Next() // consume option name metrics := new(caddyhttp.Metrics) for d.NextBlock(0) { switch d.Val() { case "per_host": metrics.PerHost = true case "observe_catchall_hosts": metrics.ObserveCatchallHosts = true default: return nil, d.Errf("unrecognized servers option '%s'", d.Val()) } } return metrics, nil } func parseMetricsOptions(d *caddyfile.Dispenser, _ any) (any, error) { return unmarshalCaddyfileMetricsOptions(d) } func parseServerOptions(d *caddyfile.Dispenser, _ any) (any, error) { return unmarshalCaddyfileServerOptions(d) } func parseOCSPStaplingOptions(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name var val string if !d.AllArgs(&val) { return nil, d.ArgErr() } if val != "off" { return nil, d.Errf("invalid argument '%s'", val) } return certmagic.OCSPConfig{ DisableStapling: val == "off", }, nil } // parseLogOptions parses the global log option. Syntax: // // log [name] { // output ... // format ... // level // include // exclude // } // // When the name argument is unspecified, this directive modifies the default // logger. func parseLogOptions(d *caddyfile.Dispenser, existingVal any) (any, error) { currentNames := make(map[string]struct{}) if existingVal != nil { innerVals, ok := existingVal.([]ConfigValue) if !ok { return nil, d.Errf("existing log values of unexpected type: %T", existingVal) } for _, rawVal := range innerVals { val, ok := rawVal.Value.(namedCustomLog) if !ok { return nil, d.Errf("existing log value of unexpected type: %T", existingVal) } currentNames[val.name] = struct{}{} } } var warnings []caddyconfig.Warning // Call out the same parser that handles server-specific log configuration. configValues, err := parseLogHelper( Helper{ Dispenser: d, warnings: &warnings, }, currentNames, ) if err != nil { return nil, err } if len(warnings) > 0 { return nil, d.Errf("warnings found in parsing global log options: %+v", warnings) } return configValues, nil } func parseOptPreferredChains(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() return caddytls.ParseCaddyfilePreferredChainsOptions(d) } func parseOptDNS(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name optName := d.Val() // get DNS module name if !d.Next() { // this is allowed if this is the "acme_dns" option since it may refer to the globally-configured "dns" option's value if optName == "acme_dns" { return nil, nil } return nil, d.ArgErr() } modID := "dns.providers." + d.Val() unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return nil, err } switch unm.(type) { case libdns.RecordGetter, libdns.RecordSetter, libdns.RecordAppender, libdns.RecordDeleter: default: return nil, d.Errf("module %s (%T) is not a libdns provider", modID, unm) } return unm, nil } func parseOptECH(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name ech := new(caddytls.ECH) publicNames := d.RemainingArgs() for _, publicName := range publicNames { ech.Configs = append(ech.Configs, caddytls.ECHConfiguration{ PublicName: publicName, }) } if len(ech.Configs) == 0 { return nil, d.ArgErr() } for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "dns": if !d.Next() { return nil, d.ArgErr() } providerName := d.Val() modID := "dns.providers." + providerName unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return nil, err } ech.Publication = append(ech.Publication, &caddytls.ECHPublication{ Configs: publicNames, PublishersRaw: caddy.ModuleMap{ "dns": caddyconfig.JSON(caddytls.ECHDNSPublisher{ ProviderRaw: caddyconfig.JSONModuleObject(unm, "name", providerName, nil), }, nil), }, }) default: return nil, d.Errf("ech: unrecognized subdirective '%s'", d.Val()) } } return ech, nil } func parseOptRenewalWindowRatio(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name if !d.Next() { return 0, d.ArgErr() } val := d.Val() ratio, err := strconv.ParseFloat(val, 64) if err != nil { return 0, d.Errf("parsing renewal_window_ratio: %v", err) } if ratio <= 0 || ratio >= 1 { return 0, d.Errf("renewal_window_ratio must be between 0 and 1 (exclusive)") } if d.Next() { return 0, d.ArgErr() } return ratio, nil } ================================================ FILE: caddyconfig/httpcaddyfile/options_test.go ================================================ package httpcaddyfile import ( "encoding/json" "testing" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddytls" _ "github.com/caddyserver/caddy/v2/modules/logging" ) func TestGlobalLogOptionSyntax(t *testing.T) { for i, tc := range []struct { input string output string expectError bool }{ // NOTE: Additional test cases of successful Caddyfile parsing // are present in: caddytest/integration/caddyfile_adapt/ { input: `{ log default } `, output: `{}`, expectError: false, }, { input: `{ log example { output file foo.log } log example { format json } } `, expectError: true, }, { input: `{ log example /foo { output file foo.log } } `, expectError: true, }, } { adapter := caddyfile.Adapter{ ServerType: ServerType{}, } out, _, err := adapter.Adapt([]byte(tc.input), nil) if err != nil != tc.expectError { t.Errorf("Test %d error expectation failed Expected: %v, got %v", i, tc.expectError, err) continue } if string(out) != tc.output { t.Errorf("Test %d error output mismatch Expected: %s, got %s", i, tc.output, out) } } } func TestGlobalResolversOption(t *testing.T) { tests := []struct { name string input string expectResolvers []string expectError bool }{ { name: "single resolver", input: `{ tls_resolvers 1.1.1.1 } example.com { }`, expectResolvers: []string{"1.1.1.1"}, expectError: false, }, { name: "two resolvers", input: `{ tls_resolvers 1.1.1.1 8.8.8.8 } example.com { }`, expectResolvers: []string{"1.1.1.1", "8.8.8.8"}, expectError: false, }, { name: "multiple resolvers", input: `{ tls_resolvers 1.1.1.1 8.8.8.8 9.9.9.9 } example.com { }`, expectResolvers: []string{"1.1.1.1", "8.8.8.8", "9.9.9.9"}, expectError: false, }, { name: "no resolvers specified", input: `{ } example.com { }`, expectResolvers: nil, expectError: false, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { adapter := caddyfile.Adapter{ ServerType: ServerType{}, } out, _, err := adapter.Adapt([]byte(tc.input), nil) if (err != nil) != tc.expectError { t.Errorf("error expectation failed. Expected error: %v, got: %v", tc.expectError, err) return } if tc.expectError { return } // Parse the output JSON to check resolvers var config struct { Apps struct { TLS *caddytls.TLS `json:"tls"` } `json:"apps"` } if err := json.Unmarshal(out, &config); err != nil { t.Errorf("failed to unmarshal output: %v", err) return } // Check if resolvers match expected if config.Apps.TLS == nil { if tc.expectResolvers != nil { t.Errorf("Expected TLS config with resolvers %v, but TLS config is nil", tc.expectResolvers) } return } actualResolvers := config.Apps.TLS.Resolvers if len(tc.expectResolvers) == 0 && len(actualResolvers) == 0 { return // Both empty, ok } if len(actualResolvers) != len(tc.expectResolvers) { t.Errorf("Expected %d resolvers, got %d. Expected: %v, got: %v", len(tc.expectResolvers), len(actualResolvers), tc.expectResolvers, actualResolvers) return } for j, expected := range tc.expectResolvers { if actualResolvers[j] != expected { t.Errorf("Resolver %d mismatch. Expected: %s, got: %s", j, expected, actualResolvers[j]) } } }) } } ================================================ FILE: caddyconfig/httpcaddyfile/pkiapp.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httpcaddyfile import ( "slices" "strconv" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddypki" ) func init() { RegisterGlobalOption("pki", parsePKIApp) } // parsePKIApp parses the global pki option. Syntax: // // pki { // ca [] { // name // root_cn // intermediate_cn // intermediate_lifetime // maintenance_interval // renewal_window_ratio // root { // cert // key // format // } // intermediate { // cert // key // format // } // } // } // // When the CA ID is unspecified, 'local' is assumed. func parsePKIApp(d *caddyfile.Dispenser, existingVal any) (any, error) { d.Next() // consume app name pki := &caddypki.PKI{ CAs: make(map[string]*caddypki.CA), } for d.NextBlock(0) { switch d.Val() { case "ca": pkiCa := new(caddypki.CA) if d.NextArg() { pkiCa.ID = d.Val() if d.NextArg() { return nil, d.ArgErr() } } if pkiCa.ID == "" { pkiCa.ID = caddypki.DefaultCAID } for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "name": if !d.NextArg() { return nil, d.ArgErr() } pkiCa.Name = d.Val() case "root_cn": if !d.NextArg() { return nil, d.ArgErr() } pkiCa.RootCommonName = d.Val() case "intermediate_cn": if !d.NextArg() { return nil, d.ArgErr() } pkiCa.IntermediateCommonName = d.Val() case "intermediate_lifetime": if !d.NextArg() { return nil, d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return nil, err } pkiCa.IntermediateLifetime = caddy.Duration(dur) case "maintenance_interval": if !d.NextArg() { return nil, d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return nil, err } pkiCa.MaintenanceInterval = caddy.Duration(dur) case "renewal_window_ratio": if !d.NextArg() { return nil, d.ArgErr() } ratio, err := strconv.ParseFloat(d.Val(), 64) if err != nil || ratio <= 0 || ratio > 1 { return nil, d.Errf("renewal_window_ratio must be a number in (0, 1], got %s", d.Val()) } pkiCa.RenewalWindowRatio = ratio case "root": if pkiCa.Root == nil { pkiCa.Root = new(caddypki.KeyPair) } for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "cert": if !d.NextArg() { return nil, d.ArgErr() } pkiCa.Root.Certificate = d.Val() case "key": if !d.NextArg() { return nil, d.ArgErr() } pkiCa.Root.PrivateKey = d.Val() case "format": if !d.NextArg() { return nil, d.ArgErr() } pkiCa.Root.Format = d.Val() default: return nil, d.Errf("unrecognized pki ca root option '%s'", d.Val()) } } case "intermediate": if pkiCa.Intermediate == nil { pkiCa.Intermediate = new(caddypki.KeyPair) } for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "cert": if !d.NextArg() { return nil, d.ArgErr() } pkiCa.Intermediate.Certificate = d.Val() case "key": if !d.NextArg() { return nil, d.ArgErr() } pkiCa.Intermediate.PrivateKey = d.Val() case "format": if !d.NextArg() { return nil, d.ArgErr() } pkiCa.Intermediate.Format = d.Val() default: return nil, d.Errf("unrecognized pki ca intermediate option '%s'", d.Val()) } } default: return nil, d.Errf("unrecognized pki ca option '%s'", d.Val()) } } pki.CAs[pkiCa.ID] = pkiCa default: return nil, d.Errf("unrecognized pki option '%s'", d.Val()) } } return pki, nil } func (st ServerType) buildPKIApp( pairings []sbAddrAssociation, options map[string]any, warnings []caddyconfig.Warning, ) (*caddypki.PKI, []caddyconfig.Warning, error) { skipInstallTrust := false if _, ok := options["skip_install_trust"]; ok { skipInstallTrust = true } // check if auto_https is off - in that case we should not create // any PKI infrastructure even with skip_install_trust directive autoHTTPS := []string{} if ah, ok := options["auto_https"].([]string); ok { autoHTTPS = ah } autoHTTPSOff := slices.Contains(autoHTTPS, "off") falseBool := false // Load the PKI app configured via global options var pkiApp *caddypki.PKI unwrappedPki, ok := options["pki"].(*caddypki.PKI) if ok { pkiApp = unwrappedPki } else { pkiApp = &caddypki.PKI{CAs: make(map[string]*caddypki.CA)} } for _, ca := range pkiApp.CAs { if skipInstallTrust { ca.InstallTrust = &falseBool } pkiApp.CAs[ca.ID] = ca } // Add in the CAs configured via directives for _, p := range pairings { for _, sblock := range p.serverBlocks { // find all the CAs that were defined and add them to the app config // i.e. from any "acme_server" directives for _, caCfgValue := range sblock.pile["pki.ca"] { ca := caCfgValue.Value.(*caddypki.CA) if skipInstallTrust { ca.InstallTrust = &falseBool } // the CA might already exist from global options, so // don't overwrite it in that case if _, ok := pkiApp.CAs[ca.ID]; !ok { pkiApp.CAs[ca.ID] = ca } } } } // if there was no CAs defined in any of the servers, // and we were requested to not install trust, then // add one for the default/local CA to do so // only if auto_https is not completely disabled if len(pkiApp.CAs) == 0 && skipInstallTrust && !autoHTTPSOff { ca := new(caddypki.CA) ca.ID = caddypki.DefaultCAID ca.InstallTrust = &falseBool pkiApp.CAs[ca.ID] = ca } return pkiApp, warnings, nil } ================================================ FILE: caddyconfig/httpcaddyfile/pkiapp_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httpcaddyfile import ( "encoding/json" "testing" "time" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func TestParsePKIApp_maintenanceIntervalAndRenewalWindowRatio(t *testing.T) { input := `{ pki { ca local { maintenance_interval 5m renewal_window_ratio 0.15 } } } :8080 { } ` adapter := caddyfile.Adapter{ServerType: ServerType{}} out, _, err := adapter.Adapt([]byte(input), nil) if err != nil { t.Fatalf("Adapt failed: %v", err) } var cfg struct { Apps struct { PKI struct { CertificateAuthorities map[string]struct { MaintenanceInterval int64 `json:"maintenance_interval,omitempty"` RenewalWindowRatio float64 `json:"renewal_window_ratio,omitempty"` } `json:"certificate_authorities,omitempty"` } `json:"pki,omitempty"` } `json:"apps"` } if err := json.Unmarshal(out, &cfg); err != nil { t.Fatalf("unmarshal config: %v", err) } ca, ok := cfg.Apps.PKI.CertificateAuthorities["local"] if !ok { t.Fatal("expected certificate_authorities.local to exist") } wantInterval := 5 * time.Minute.Nanoseconds() if ca.MaintenanceInterval != wantInterval { t.Errorf("maintenance_interval = %d, want %d (5m)", ca.MaintenanceInterval, wantInterval) } if ca.RenewalWindowRatio != 0.15 { t.Errorf("renewal_window_ratio = %v, want 0.15", ca.RenewalWindowRatio) } } func TestParsePKIApp_renewalWindowRatioInvalid(t *testing.T) { input := `{ pki { ca local { renewal_window_ratio 1.5 } } } :8080 { } ` adapter := caddyfile.Adapter{ServerType: ServerType{}} _, _, err := adapter.Adapt([]byte(input), nil) if err == nil { t.Error("expected error for renewal_window_ratio > 1") } } ================================================ FILE: caddyconfig/httpcaddyfile/serveroptions.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httpcaddyfile import ( "encoding/json" "fmt" "slices" "strconv" "github.com/dustin/go-humanize" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) // serverOptions collects server config overrides parsed from Caddyfile global options type serverOptions struct { // If set, will only apply these options to servers that contain a // listener address that matches exactly. If empty, will apply to all // servers that were not already matched by another serverOptions. ListenerAddress string // These will all map 1:1 to the caddyhttp.Server struct Name string ListenerWrappersRaw []json.RawMessage PacketConnWrappersRaw []json.RawMessage ReadTimeout caddy.Duration ReadHeaderTimeout caddy.Duration WriteTimeout caddy.Duration IdleTimeout caddy.Duration KeepAliveInterval caddy.Duration KeepAliveIdle caddy.Duration KeepAliveCount int MaxHeaderBytes int EnableFullDuplex bool Protocols []string StrictSNIHost *bool TrustedProxiesRaw json.RawMessage TrustedProxiesStrict int TrustedProxiesUnix bool ClientIPHeaders []string ShouldLogCredentials bool Metrics *caddyhttp.Metrics Trace bool // TODO: EXPERIMENTAL // If set, overrides whether QUIC listeners allow 0-RTT (early data). // If nil, the default behavior is used (currently allowed). Allow0RTT *bool } func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (any, error) { d.Next() // consume option name serverOpts := serverOptions{} if d.NextArg() { serverOpts.ListenerAddress = d.Val() if d.NextArg() { return nil, d.ArgErr() } } for d.NextBlock(0) { switch d.Val() { case "name": if serverOpts.ListenerAddress == "" { return nil, d.Errf("cannot set a name for a server without a listener address") } if !d.NextArg() { return nil, d.ArgErr() } serverOpts.Name = d.Val() case "listener_wrappers": for nesting := d.Nesting(); d.NextBlock(nesting); { modID := "caddy.listeners." + d.Val() unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return nil, err } listenerWrapper, ok := unm.(caddy.ListenerWrapper) if !ok { return nil, fmt.Errorf("module %s (%T) is not a listener wrapper", modID, unm) } jsonListenerWrapper := caddyconfig.JSONModuleObject( listenerWrapper, "wrapper", listenerWrapper.(caddy.Module).CaddyModule().ID.Name(), nil, ) serverOpts.ListenerWrappersRaw = append(serverOpts.ListenerWrappersRaw, jsonListenerWrapper) } case "packet_conn_wrappers": for nesting := d.Nesting(); d.NextBlock(nesting); { modID := "caddy.packetconns." + d.Val() unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return nil, err } packetConnWrapper, ok := unm.(caddy.PacketConnWrapper) if !ok { return nil, fmt.Errorf("module %s (%T) is not a packet conn wrapper", modID, unm) } jsonPacketConnWrapper := caddyconfig.JSONModuleObject( packetConnWrapper, "wrapper", packetConnWrapper.(caddy.Module).CaddyModule().ID.Name(), nil, ) serverOpts.PacketConnWrappersRaw = append(serverOpts.PacketConnWrappersRaw, jsonPacketConnWrapper) } case "timeouts": for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "read_body": if !d.NextArg() { return nil, d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return nil, d.Errf("parsing read_body timeout duration: %v", err) } serverOpts.ReadTimeout = caddy.Duration(dur) case "read_header": if !d.NextArg() { return nil, d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return nil, d.Errf("parsing read_header timeout duration: %v", err) } serverOpts.ReadHeaderTimeout = caddy.Duration(dur) case "write": if !d.NextArg() { return nil, d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return nil, d.Errf("parsing write timeout duration: %v", err) } serverOpts.WriteTimeout = caddy.Duration(dur) case "idle": if !d.NextArg() { return nil, d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return nil, d.Errf("parsing idle timeout duration: %v", err) } serverOpts.IdleTimeout = caddy.Duration(dur) default: return nil, d.Errf("unrecognized timeouts option '%s'", d.Val()) } } case "keepalive_interval": if !d.NextArg() { return nil, d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return nil, d.Errf("parsing keepalive interval duration: %v", err) } serverOpts.KeepAliveInterval = caddy.Duration(dur) case "keepalive_idle": if !d.NextArg() { return nil, d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return nil, d.Errf("parsing keepalive idle duration: %v", err) } serverOpts.KeepAliveIdle = caddy.Duration(dur) case "keepalive_count": if !d.NextArg() { return nil, d.ArgErr() } cnt, err := strconv.ParseInt(d.Val(), 10, 32) if err != nil { return nil, d.Errf("parsing keepalive count int: %v", err) } serverOpts.KeepAliveCount = int(cnt) case "max_header_size": var sizeStr string if !d.AllArgs(&sizeStr) { return nil, d.ArgErr() } size, err := humanize.ParseBytes(sizeStr) if err != nil { return nil, d.Errf("parsing max_header_size: %v", err) } serverOpts.MaxHeaderBytes = int(size) case "enable_full_duplex": if d.NextArg() { return nil, d.ArgErr() } serverOpts.EnableFullDuplex = true case "log_credentials": if d.NextArg() { return nil, d.ArgErr() } serverOpts.ShouldLogCredentials = true case "protocols": protos := d.RemainingArgs() for _, proto := range protos { if proto != "h1" && proto != "h2" && proto != "h2c" && proto != "h3" { return nil, d.Errf("unknown protocol '%s': expected h1, h2, h2c, or h3", proto) } if slices.Contains(serverOpts.Protocols, proto) { return nil, d.Errf("protocol %s specified more than once", proto) } serverOpts.Protocols = append(serverOpts.Protocols, proto) } if nesting := d.Nesting(); d.NextBlock(nesting) { return nil, d.ArgErr() } case "strict_sni_host": if d.NextArg() && d.Val() != "insecure_off" && d.Val() != "on" { return nil, d.Errf("strict_sni_host only supports 'on' or 'insecure_off', got '%s'", d.Val()) } boolVal := true if d.Val() == "insecure_off" { boolVal = false } serverOpts.StrictSNIHost = &boolVal case "trusted_proxies": if !d.NextArg() { return nil, d.Err("trusted_proxies expects an IP range source module name as its first argument") } modID := "http.ip_sources." + d.Val() unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return nil, err } source, ok := unm.(caddyhttp.IPRangeSource) if !ok { return nil, fmt.Errorf("module %s (%T) is not an IP range source", modID, unm) } jsonSource := caddyconfig.JSONModuleObject( source, "source", source.(caddy.Module).CaddyModule().ID.Name(), nil, ) serverOpts.TrustedProxiesRaw = jsonSource case "trusted_proxies_strict": if d.NextArg() { return nil, d.ArgErr() } serverOpts.TrustedProxiesStrict = 1 case "trusted_proxies_unix": if d.NextArg() { return nil, d.ArgErr() } serverOpts.TrustedProxiesUnix = true case "client_ip_headers": headers := d.RemainingArgs() for _, header := range headers { if slices.Contains(serverOpts.ClientIPHeaders, header) { return nil, d.Errf("client IP header %s specified more than once", header) } serverOpts.ClientIPHeaders = append(serverOpts.ClientIPHeaders, header) } if nesting := d.Nesting(); d.NextBlock(nesting) { return nil, d.ArgErr() } case "metrics": caddy.Log().Warn("The nested 'metrics' option inside `servers` is deprecated and will be removed in the next major version. Use the global 'metrics' option instead.") serverOpts.Metrics = new(caddyhttp.Metrics) for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "per_host": serverOpts.Metrics.PerHost = true default: return nil, d.Errf("unrecognized metrics option '%s'", d.Val()) } } case "trace": if d.NextArg() { return nil, d.ArgErr() } serverOpts.Trace = true case "0rtt": // only supports "off" for now if !d.NextArg() { return nil, d.ArgErr() } if d.Val() != "off" { return nil, d.Errf("unsupported 0rtt argument '%s' (only 'off' is supported)", d.Val()) } boolVal := false serverOpts.Allow0RTT = &boolVal default: return nil, d.Errf("unrecognized servers option '%s'", d.Val()) } } return serverOpts, nil } // applyServerOptions sets the server options on the appropriate servers func applyServerOptions( servers map[string]*caddyhttp.Server, options map[string]any, _ *[]caddyconfig.Warning, ) error { serverOpts, ok := options["servers"].([]serverOptions) if !ok { return nil } // check for duplicate names, which would clobber the config existingNames := map[string]bool{} for _, opts := range serverOpts { if opts.Name == "" { continue } if existingNames[opts.Name] { return fmt.Errorf("cannot use duplicate server name '%s'", opts.Name) } existingNames[opts.Name] = true } // collect the server name overrides nameReplacements := map[string]string{} for key, server := range servers { // find the options that apply to this server optsIndex := slices.IndexFunc(serverOpts, func(s serverOptions) bool { return s.ListenerAddress == "" || slices.Contains(server.Listen, s.ListenerAddress) }) // if none apply, then move to the next server if optsIndex == -1 { continue } opts := serverOpts[optsIndex] // set all the options server.ListenerWrappersRaw = opts.ListenerWrappersRaw server.PacketConnWrappersRaw = opts.PacketConnWrappersRaw server.ReadTimeout = opts.ReadTimeout server.ReadHeaderTimeout = opts.ReadHeaderTimeout server.WriteTimeout = opts.WriteTimeout server.IdleTimeout = opts.IdleTimeout server.KeepAliveInterval = opts.KeepAliveInterval server.KeepAliveIdle = opts.KeepAliveIdle server.KeepAliveCount = opts.KeepAliveCount server.MaxHeaderBytes = opts.MaxHeaderBytes server.EnableFullDuplex = opts.EnableFullDuplex server.Protocols = opts.Protocols server.StrictSNIHost = opts.StrictSNIHost server.TrustedProxiesRaw = opts.TrustedProxiesRaw server.ClientIPHeaders = opts.ClientIPHeaders server.TrustedProxiesStrict = opts.TrustedProxiesStrict server.TrustedProxiesUnix = opts.TrustedProxiesUnix server.Metrics = opts.Metrics server.Allow0RTT = opts.Allow0RTT if opts.ShouldLogCredentials { if server.Logs == nil { server.Logs = new(caddyhttp.ServerLogConfig) } server.Logs.ShouldLogCredentials = opts.ShouldLogCredentials } if opts.Trace { // TODO: THIS IS EXPERIMENTAL (MAY 2024) if server.Logs == nil { server.Logs = new(caddyhttp.ServerLogConfig) } server.Logs.Trace = opts.Trace } if opts.Name != "" { nameReplacements[key] = opts.Name } } // rename the servers if marked to do so for old, new := range nameReplacements { servers[new] = servers[old] delete(servers, old) } return nil } ================================================ FILE: caddyconfig/httpcaddyfile/shorthands.go ================================================ package httpcaddyfile import ( "regexp" "strings" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) type ComplexShorthandReplacer struct { search *regexp.Regexp replace string } type ShorthandReplacer struct { complex []ComplexShorthandReplacer simple *strings.Replacer } func NewShorthandReplacer() ShorthandReplacer { // replace shorthand placeholders (which are convenient // when writing a Caddyfile) with their actual placeholder // identifiers or variable names replacer := strings.NewReplacer(placeholderShorthands()...) // these are placeholders that allow a user-defined final // parameters, but we still want to provide a shorthand // for those, so we use a regexp to replace regexpReplacements := []ComplexShorthandReplacer{ {regexp.MustCompile(`{header\.([\w-]*)}`), "{http.request.header.$1}"}, {regexp.MustCompile(`{cookie\.([\w-]*)}`), "{http.request.cookie.$1}"}, {regexp.MustCompile(`{labels\.([\w-]*)}`), "{http.request.host.labels.$1}"}, {regexp.MustCompile(`{path\.([\w-]*)}`), "{http.request.uri.path.$1}"}, {regexp.MustCompile(`{file\.([\w-]*)}`), "{http.request.uri.path.file.$1}"}, {regexp.MustCompile(`{query\.([\w-]*)}`), "{http.request.uri.query.$1}"}, {regexp.MustCompile(`{re\.([\w-\.]*)}`), "{http.regexp.$1}"}, {regexp.MustCompile(`{vars\.([\w-]*)}`), "{http.vars.$1}"}, {regexp.MustCompile(`{rp\.([\w-\.]*)}`), "{http.reverse_proxy.$1}"}, {regexp.MustCompile(`{resp\.([\w-\.]*)}`), "{http.intercept.$1}"}, {regexp.MustCompile(`{err\.([\w-\.]*)}`), "{http.error.$1}"}, {regexp.MustCompile(`{file_match\.([\w-]*)}`), "{http.matchers.file.$1}"}, } return ShorthandReplacer{ complex: regexpReplacements, simple: replacer, } } // placeholderShorthands returns a slice of old-new string pairs, // where the left of the pair is a placeholder shorthand that may // be used in the Caddyfile, and the right is the replacement. func placeholderShorthands() []string { return []string{ "{host}", "{http.request.host}", "{hostport}", "{http.request.hostport}", "{port}", "{http.request.port}", "{orig_method}", "{http.request.orig_method}", "{orig_uri}", "{http.request.orig_uri}", "{orig_path}", "{http.request.orig_uri.path}", "{orig_dir}", "{http.request.orig_uri.path.dir}", "{orig_file}", "{http.request.orig_uri.path.file}", "{orig_query}", "{http.request.orig_uri.query}", "{orig_?query}", "{http.request.orig_uri.prefixed_query}", "{method}", "{http.request.method}", "{uri}", "{http.request.uri}", "{%uri}", "{http.request.uri_escaped}", "{path}", "{http.request.uri.path}", "{%path}", "{http.request.uri.path_escaped}", "{dir}", "{http.request.uri.path.dir}", "{file}", "{http.request.uri.path.file}", "{query}", "{http.request.uri.query}", "{%query}", "{http.request.uri.query_escaped}", "{?query}", "{http.request.uri.prefixed_query}", "{remote}", "{http.request.remote}", "{remote_host}", "{http.request.remote.host}", "{remote_port}", "{http.request.remote.port}", "{scheme}", "{http.request.scheme}", "{uuid}", "{http.request.uuid}", "{tls_cipher}", "{http.request.tls.cipher_suite}", "{tls_version}", "{http.request.tls.version}", "{tls_client_fingerprint}", "{http.request.tls.client.fingerprint}", "{tls_client_issuer}", "{http.request.tls.client.issuer}", "{tls_client_serial}", "{http.request.tls.client.serial}", "{tls_client_subject}", "{http.request.tls.client.subject}", "{tls_client_certificate_pem}", "{http.request.tls.client.certificate_pem}", "{tls_client_certificate_der_base64}", "{http.request.tls.client.certificate_der_base64}", "{upstream_hostport}", "{http.reverse_proxy.upstream.hostport}", "{client_ip}", "{http.vars.client_ip}", } } // ApplyToSegment replaces shorthand placeholder to its full placeholder, understandable by Caddy. func (s ShorthandReplacer) ApplyToSegment(segment *caddyfile.Segment) { if segment != nil { for i := 0; i < len(*segment); i++ { // simple string replacements (*segment)[i].Text = s.simple.Replace((*segment)[i].Text) // complex regexp replacements for _, r := range s.complex { (*segment)[i].Text = r.search.ReplaceAllString((*segment)[i].Text, r.replace) } } } } ================================================ FILE: caddyconfig/httpcaddyfile/testdata/import_variadic.txt ================================================ (t2) { respond 200 { body {args[:]} } } :8082 { import t2 false } ================================================ FILE: caddyconfig/httpcaddyfile/testdata/import_variadic_snippet.txt ================================================ (t1) { respond 200 { body {args[:]} } } :8081 { import t1 false } ================================================ FILE: caddyconfig/httpcaddyfile/testdata/import_variadic_with_import.txt ================================================ (t1) { respond 200 { body {args[:]} } } :8081 { import t1 false } import import_variadic.txt :8083 { import t2 true } ================================================ FILE: caddyconfig/httpcaddyfile/tlsapp.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package httpcaddyfile import ( "bytes" "encoding/json" "fmt" "reflect" "slices" "sort" "strconv" "strings" "github.com/caddyserver/certmagic" "github.com/mholt/acmez/v3/acme" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddytls" ) func (st ServerType) buildTLSApp( pairings []sbAddrAssociation, options map[string]any, warnings []caddyconfig.Warning, ) (*caddytls.TLS, []caddyconfig.Warning, error) { tlsApp := &caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)} var certLoaders []caddytls.CertificateLoader httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort) if hp, ok := options["http_port"].(int); ok { httpPort = strconv.Itoa(hp) } autoHTTPS := []string{} if ah, ok := options["auto_https"].([]string); ok { autoHTTPS = ah } // find all hosts that share a server block with a hostless // key, so that they don't get forgotten/omitted by auto-HTTPS // (since they won't appear in route matchers) httpsHostsSharedWithHostlessKey := make(map[string]struct{}) if !slices.Contains(autoHTTPS, "off") { for _, pair := range pairings { for _, sb := range pair.serverBlocks { for _, addr := range sb.parsedKeys { if addr.Host != "" { continue } // this server block has a hostless key, now // go through and add all the hosts to the set for _, otherAddr := range sb.parsedKeys { if otherAddr.Original == addr.Original { continue } if otherAddr.Host != "" && otherAddr.Scheme != "http" && otherAddr.Port != httpPort { httpsHostsSharedWithHostlessKey[otherAddr.Host] = struct{}{} } } break } } } } // a catch-all automation policy is used as a "default" for all subjects that // don't have custom configuration explicitly associated with them; this // is only to add if the global settings or defaults are non-empty catchAllAP, err := newBaseAutomationPolicy(options, warnings, false) if err != nil { return nil, warnings, err } if catchAllAP != nil { if tlsApp.Automation == nil { tlsApp.Automation = new(caddytls.AutomationConfig) } tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, catchAllAP) } forcedAutomatedNames := make(map[string]struct{}) // explicitly configured to be automated, even if covered by a wildcard for _, p := range pairings { // avoid setting up TLS automation policies for a server that is HTTP-only var addresses []string for _, addressWithProtocols := range p.addressesWithProtocols { addresses = append(addresses, addressWithProtocols.address) } if !listenersUseAnyPortOtherThan(addresses, httpPort) { continue } for _, sblock := range p.serverBlocks { // check the scheme of all the site addresses, // skip building AP if they all had http:// if sblock.isAllHTTP() { continue } // get values that populate an automation policy for this block ap, err := newBaseAutomationPolicy(options, warnings, true) if err != nil { return nil, warnings, err } sblockHosts := sblock.hostsFromKeys(false) if len(sblockHosts) == 0 && catchAllAP != nil { ap = catchAllAP } // on-demand tls if _, ok := sblock.pile["tls.on_demand"]; ok { ap.OnDemand = true } // collect hosts that are forced to have certs automated for their specific name if _, ok := sblock.pile["tls.force_automate"]; ok { for _, host := range sblockHosts { forcedAutomatedNames[host] = struct{}{} } } // reuse private keys tls if _, ok := sblock.pile["tls.reuse_private_keys"]; ok { ap.ReusePrivateKeys = true } if keyTypeVals, ok := sblock.pile["tls.key_type"]; ok { ap.KeyType = keyTypeVals[0].Value.(string) } if renewalWindowRatioVals, ok := sblock.pile["tls.renewal_window_ratio"]; ok { ap.RenewalWindowRatio = renewalWindowRatioVals[0].Value.(float64) } else if globalRenewalWindowRatio, ok := options["renewal_window_ratio"]; ok { ap.RenewalWindowRatio = globalRenewalWindowRatio.(float64) } // certificate issuers if issuerVals, ok := sblock.pile["tls.cert_issuer"]; ok { var issuers []certmagic.Issuer for _, issuerVal := range issuerVals { issuers = append(issuers, issuerVal.Value.(certmagic.Issuer)) } if ap == catchAllAP && !reflect.DeepEqual(ap.Issuers, issuers) { // this more correctly implements an error check that was removed // below; try it with this config: // // :443 { // bind 127.0.0.1 // } // // :443 { // bind ::1 // tls { // issuer acme // } // } return nil, warnings, fmt.Errorf("automation policy from site block is also default/catch-all policy because of key without hostname, and the two are in conflict: %#v != %#v", ap.Issuers, issuers) } ap.Issuers = issuers } // certificate managers if certManagerVals, ok := sblock.pile["tls.cert_manager"]; ok { for _, certManager := range certManagerVals { certGetterName := certManager.Value.(caddy.Module).CaddyModule().ID.Name() ap.ManagersRaw = append(ap.ManagersRaw, caddyconfig.JSONModuleObject(certManager.Value, "via", certGetterName, &warnings)) } } // custom bind host for _, cfgVal := range sblock.pile["bind"] { for _, iss := range ap.Issuers { // if an issuer was already configured and it is NOT an ACME issuer, // skip, since we intend to adjust only ACME issuers; ensure we // include any issuer that embeds/wraps an underlying ACME issuer var acmeIssuer *caddytls.ACMEIssuer if acmeWrapper, ok := iss.(acmeCapable); ok { acmeIssuer = acmeWrapper.GetACMEIssuer() } if acmeIssuer == nil { continue } // proceed to configure the ACME issuer's bind host, without // overwriting any existing settings if acmeIssuer.Challenges == nil { acmeIssuer.Challenges = new(caddytls.ChallengesConfig) } if acmeIssuer.Challenges.BindHost == "" { // only binding to one host is supported var bindHost string if asserted, ok := cfgVal.Value.(addressesWithProtocols); ok && len(asserted.addresses) > 0 { bindHost = asserted.addresses[0] } acmeIssuer.Challenges.BindHost = bindHost } } } // we used to ensure this block is allowed to create an automation policy; // doing so was forbidden if it has a key with no host (i.e. ":443") // and if there is a different server block that also has a key with no // host -- since a key with no host matches any host, we need its // associated automation policy to have an empty Subjects list, i.e. no // host filter, which is indistinguishable between the two server blocks // because automation is not done in the context of a particular server... // this is an example of a poor mapping from Caddyfile to JSON but that's // the least-leaky abstraction I could figure out -- however, this check // was preventing certain listeners, like those provided by plugins, from // being used as desired (see the Tailscale listener plugin), so I removed // the check: and I think since I originally wrote the check I added a new // check above which *properly* detects this ambiguity without breaking the // listener plugin; see the check above with a commented example config if len(sblockHosts) == 0 && catchAllAP == nil { // this server block has a key with no hosts, but there is not yet // a catch-all automation policy (probably because no global options // were set), so this one becomes it catchAllAP = ap } hostsNotHTTP := sblock.hostsFromKeysNotHTTP(httpPort) sort.Strings(hostsNotHTTP) // solely for deterministic test results // associate our new automation policy with this server block's hosts ap.SubjectsRaw = hostsNotHTTP // if a combination of public and internal names were given // for this same server block and no issuer was specified, we // need to separate them out in the automation policies so // that the internal names can use the internal issuer and // the other names can use the default/public/ACME issuer var ap2 *caddytls.AutomationPolicy if len(ap.Issuers) == 0 { var internal, external []string for _, s := range ap.SubjectsRaw { // do not create Issuers for Tailscale domains; they will be given a Manager instead if isTailscaleDomain(s) { continue } if !certmagic.SubjectQualifiesForCert(s) { return nil, warnings, fmt.Errorf("subject does not qualify for certificate: '%s'", s) } // we don't use certmagic.SubjectQualifiesForPublicCert() because of one nuance: // names like *.*.tld that may not qualify for a public certificate are actually // fine when used with OnDemand, since OnDemand (currently) does not obtain // wildcards (if it ever does, there will be a separate config option to enable // it that we would need to check here) since the hostname is known at handshake; // and it is unexpected to switch to internal issuer when the user wants to get // regular certificates on-demand for a class of certs like *.*.tld. if subjectQualifiesForPublicCert(ap, s) { external = append(external, s) } else { internal = append(internal, s) } } if len(external) > 0 && len(internal) > 0 { ap.SubjectsRaw = external apCopy := *ap ap2 = &apCopy ap2.SubjectsRaw = internal ap2.IssuersRaw = []json.RawMessage{caddyconfig.JSONModuleObject(caddytls.InternalIssuer{}, "module", "internal", &warnings)} } } if tlsApp.Automation == nil { tlsApp.Automation = new(caddytls.AutomationConfig) } tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap) if ap2 != nil { tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap2) } // certificate loaders if clVals, ok := sblock.pile["tls.cert_loader"]; ok { for _, clVal := range clVals { certLoaders = append(certLoaders, clVal.Value.(caddytls.CertificateLoader)) } } } } // group certificate loaders by module name, then add to config if len(certLoaders) > 0 { loadersByName := make(map[string]caddytls.CertificateLoader) for _, cl := range certLoaders { name := caddy.GetModuleName(cl) // ugh... technically, we may have multiple FileLoader and FolderLoader // modules (because the tls directive returns one per occurrence), but // the config structure expects only one instance of each kind of loader // module, so we have to combine them... instead of enumerating each // possible cert loader module in a type switch, we can use reflection, // which works on any cert loaders that are slice types if reflect.TypeOf(cl).Kind() == reflect.Slice { combined := reflect.ValueOf(loadersByName[name]) if !combined.IsValid() { combined = reflect.New(reflect.TypeOf(cl)).Elem() } clVal := reflect.ValueOf(cl) for i := range clVal.Len() { combined = reflect.Append(combined, clVal.Index(i)) } loadersByName[name] = combined.Interface().(caddytls.CertificateLoader) } } for certLoaderName, loaders := range loadersByName { tlsApp.CertificatesRaw[certLoaderName] = caddyconfig.JSON(loaders, &warnings) } } // set any of the on-demand options, for if/when on-demand TLS is enabled if onDemand, ok := options["on_demand_tls"].(*caddytls.OnDemandConfig); ok { if tlsApp.Automation == nil { tlsApp.Automation = new(caddytls.AutomationConfig) } tlsApp.Automation.OnDemand = onDemand } // set up "global" (to the TLS app) DNS provider config if globalDNS, ok := options["dns"]; ok && globalDNS != nil { tlsApp.DNSRaw = caddyconfig.JSONModuleObject(globalDNS, "name", globalDNS.(caddy.Module).CaddyModule().ID.Name(), nil) } // set up "global" (to the TLS app) DNS resolvers config if globalResolvers, ok := options["tls_resolvers"]; ok && globalResolvers != nil { tlsApp.Resolvers = globalResolvers.([]string) } // set up ECH from Caddyfile options if ech, ok := options["ech"].(*caddytls.ECH); ok { tlsApp.EncryptedClientHello = ech // outer server names will need certificates, so make sure they're included // in an automation policy for them that applies any global options ap, err := newBaseAutomationPolicy(options, warnings, true) if err != nil { return nil, warnings, err } for _, cfg := range ech.Configs { if cfg.PublicName != "" { ap.SubjectsRaw = append(ap.SubjectsRaw, cfg.PublicName) } } if tlsApp.Automation == nil { tlsApp.Automation = new(caddytls.AutomationConfig) } tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap) } // if the storage clean interval is a boolean, then it's "off" to disable cleaning if sc, ok := options["storage_check"].(string); ok && sc == "off" { tlsApp.DisableStorageCheck = true } // if the storage clean interval is a boolean, then it's "off" to disable cleaning if sci, ok := options["storage_clean_interval"].(bool); ok && !sci { tlsApp.DisableStorageClean = true } // set the storage clean interval if configured if storageCleanInterval, ok := options["storage_clean_interval"].(caddy.Duration); ok { if tlsApp.Automation == nil { tlsApp.Automation = new(caddytls.AutomationConfig) } tlsApp.Automation.StorageCleanInterval = storageCleanInterval } // set the expired certificates renew interval if configured if renewCheckInterval, ok := options["renew_interval"].(caddy.Duration); ok { if tlsApp.Automation == nil { tlsApp.Automation = new(caddytls.AutomationConfig) } tlsApp.Automation.RenewCheckInterval = renewCheckInterval } // set the OCSP check interval if configured if ocspCheckInterval, ok := options["ocsp_interval"].(caddy.Duration); ok { if tlsApp.Automation == nil { tlsApp.Automation = new(caddytls.AutomationConfig) } tlsApp.Automation.OCSPCheckInterval = ocspCheckInterval } // set whether OCSP stapling should be disabled for manually-managed certificates if ocspConfig, ok := options["ocsp_stapling"].(certmagic.OCSPConfig); ok { tlsApp.DisableOCSPStapling = ocspConfig.DisableStapling } // if any hostnames appear on the same server block as a key with // no host, they will not be used with route matchers because the // hostless key matches all hosts, therefore, it wouldn't be // considered for auto-HTTPS, so we need to make sure those hosts // are manually considered for managed certificates; we also need // to make sure that any of these names which are internal-only // get internal certificates by default rather than ACME var al caddytls.AutomateLoader internalAP := &caddytls.AutomationPolicy{ IssuersRaw: []json.RawMessage{json.RawMessage(`{"module":"internal"}`)}, } if !slices.Contains(autoHTTPS, "off") && !slices.Contains(autoHTTPS, "disable_certs") { for h := range httpsHostsSharedWithHostlessKey { al = append(al, h) if !certmagic.SubjectQualifiesForPublicCert(h) { internalAP.SubjectsRaw = append(internalAP.SubjectsRaw, h) } } } for name := range forcedAutomatedNames { if slices.Contains(al, name) { continue } al = append(al, name) } slices.Sort(al) // to stabilize the adapt output if len(al) > 0 { tlsApp.CertificatesRaw["automate"] = caddyconfig.JSON(al, &warnings) } if len(internalAP.SubjectsRaw) > 0 { if tlsApp.Automation == nil { tlsApp.Automation = new(caddytls.AutomationConfig) } tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, internalAP) } // if there are any global options set for issuers (ACME ones in particular), make sure they // take effect in every automation policy that does not have any issuers if tlsApp.Automation != nil { globalEmail := options["email"] globalACMECA := options["acme_ca"] globalACMECARoot := options["acme_ca_root"] _, globalACMEDNS := options["acme_dns"] // can be set to nil (to use globally-defined "dns" value instead), but it is still set globalACMEEAB := options["acme_eab"] globalPreferredChains := options["preferred_chains"] hasGlobalACMEDefaults := globalEmail != nil || globalACMECA != nil || globalACMECARoot != nil || globalACMEDNS || globalACMEEAB != nil || globalPreferredChains != nil if hasGlobalACMEDefaults { for i := range tlsApp.Automation.Policies { ap := tlsApp.Automation.Policies[i] if len(ap.Issuers) == 0 && automationPolicyHasAllPublicNames(ap) { // for public names, create default issuers which will later be filled in with configured global defaults // (internal names will implicitly use the internal issuer at auto-https time) emailStr, _ := globalEmail.(string) ap.Issuers = caddytls.DefaultIssuers(emailStr) // if a specific endpoint is configured, can't use multiple default issuers if globalACMECA != nil { ap.Issuers = []certmagic.Issuer{new(caddytls.ACMEIssuer)} } } } } } // finalize and verify policies; do cleanup if tlsApp.Automation != nil { for i, ap := range tlsApp.Automation.Policies { // ensure all issuers have global defaults filled in for j, issuer := range ap.Issuers { err := fillInGlobalACMEDefaults(issuer, options) if err != nil { return nil, warnings, fmt.Errorf("filling in global issuer defaults for AP %d, issuer %d: %v", i, j, err) } } // encode all issuer values we created, so they will be rendered in the output if len(ap.Issuers) > 0 && ap.IssuersRaw == nil { for _, iss := range ap.Issuers { issuerName := iss.(caddy.Module).CaddyModule().ID.Name() ap.IssuersRaw = append(ap.IssuersRaw, caddyconfig.JSONModuleObject(iss, "module", issuerName, &warnings)) } } } // consolidate automation policies that are the exact same tlsApp.Automation.Policies = consolidateAutomationPolicies(tlsApp.Automation.Policies) // ensure automation policies don't overlap subjects (this should be // an error at provision-time as well, but catch it in the adapt phase // for convenience) automationHostSet := make(map[string]struct{}) for _, ap := range tlsApp.Automation.Policies { for _, s := range ap.SubjectsRaw { if _, ok := automationHostSet[s]; ok { return nil, warnings, fmt.Errorf("hostname appears in more than one automation policy, making certificate management ambiguous: %s", s) } automationHostSet[s] = struct{}{} } } // if nothing remains, remove any excess values to clean up the resulting config if len(tlsApp.Automation.Policies) == 0 { tlsApp.Automation.Policies = nil } if reflect.DeepEqual(tlsApp.Automation, new(caddytls.AutomationConfig)) { tlsApp.Automation = nil } } return tlsApp, warnings, nil } type acmeCapable interface{ GetACMEIssuer() *caddytls.ACMEIssuer } func fillInGlobalACMEDefaults(issuer certmagic.Issuer, options map[string]any) error { acmeWrapper, ok := issuer.(acmeCapable) if !ok { return nil } acmeIssuer := acmeWrapper.GetACMEIssuer() if acmeIssuer == nil { return nil } globalEmail := options["email"] globalACMECA := options["acme_ca"] globalACMECARoot := options["acme_ca_root"] globalACMEDNS, globalACMEDNSok := options["acme_dns"] // can be set to nil (to use globally-defined "dns" value instead), but it is still set globalACMEEAB := options["acme_eab"] globalPreferredChains := options["preferred_chains"] globalCertLifetime := options["cert_lifetime"] globalHTTPPort, globalHTTPSPort := options["http_port"], options["https_port"] globalDefaultBind := options["default_bind"] if globalEmail != nil && acmeIssuer.Email == "" { acmeIssuer.Email = globalEmail.(string) } if globalACMECA != nil && acmeIssuer.CA == "" { acmeIssuer.CA = globalACMECA.(string) } if globalACMECARoot != nil && !slices.Contains(acmeIssuer.TrustedRootsPEMFiles, globalACMECARoot.(string)) { acmeIssuer.TrustedRootsPEMFiles = append(acmeIssuer.TrustedRootsPEMFiles, globalACMECARoot.(string)) } if globalACMEDNSok && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.DNS == nil || acmeIssuer.Challenges.DNS.ProviderRaw == nil) { globalDNS := options["dns"] if globalDNS == nil && globalACMEDNS == nil { return fmt.Errorf("acme_dns specified without DNS provider config, but no provider specified with 'dns' global option") } if acmeIssuer.Challenges == nil { acmeIssuer.Challenges = new(caddytls.ChallengesConfig) } if acmeIssuer.Challenges.DNS == nil { acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig) } if globalACMEDNS != nil && acmeIssuer.Challenges.DNS.ProviderRaw == nil { // Set a global DNS provider if `acme_dns` is set acmeIssuer.Challenges.DNS.ProviderRaw = caddyconfig.JSONModuleObject(globalACMEDNS, "name", globalACMEDNS.(caddy.Module).CaddyModule().ID.Name(), nil) } } if globalACMEEAB != nil && acmeIssuer.ExternalAccount == nil { acmeIssuer.ExternalAccount = globalACMEEAB.(*acme.EAB) } if globalPreferredChains != nil && acmeIssuer.PreferredChains == nil { acmeIssuer.PreferredChains = globalPreferredChains.(*caddytls.ChainPreference) } // only configure alt HTTP and TLS-ALPN ports if the DNS challenge is not enabled (wouldn't hurt, but isn't necessary since the DNS challenge is exclusive of others) if globalHTTPPort != nil && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.DNS == nil) && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.HTTP == nil || acmeIssuer.Challenges.HTTP.AlternatePort == 0) { if acmeIssuer.Challenges == nil { acmeIssuer.Challenges = new(caddytls.ChallengesConfig) } if acmeIssuer.Challenges.HTTP == nil { acmeIssuer.Challenges.HTTP = new(caddytls.HTTPChallengeConfig) } acmeIssuer.Challenges.HTTP.AlternatePort = globalHTTPPort.(int) } if globalHTTPSPort != nil && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.DNS == nil) && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.TLSALPN == nil || acmeIssuer.Challenges.TLSALPN.AlternatePort == 0) { if acmeIssuer.Challenges == nil { acmeIssuer.Challenges = new(caddytls.ChallengesConfig) } if acmeIssuer.Challenges.TLSALPN == nil { acmeIssuer.Challenges.TLSALPN = new(caddytls.TLSALPNChallengeConfig) } acmeIssuer.Challenges.TLSALPN.AlternatePort = globalHTTPSPort.(int) } // If BindHost is still unset, fall back to the first default_bind address if set // This avoids binding the automation policy to the wildcard socket, which is unexpected behavior when a more selective socket is specified via default_bind // In BSD it is valid to bind to the wildcard socket even though a more selective socket is already open (still unexpected behavior by the caller though) // In Linux the same call will error with EADDRINUSE whenever the listener for the automation policy is opened if acmeIssuer.Challenges == nil || (acmeIssuer.Challenges.DNS == nil && acmeIssuer.Challenges.BindHost == "") { if defBinds, ok := globalDefaultBind.([]ConfigValue); ok && len(defBinds) > 0 { if abp, ok := defBinds[0].Value.(addressesWithProtocols); ok && len(abp.addresses) > 0 { if acmeIssuer.Challenges == nil { acmeIssuer.Challenges = new(caddytls.ChallengesConfig) } acmeIssuer.Challenges.BindHost = abp.addresses[0] } } } if globalCertLifetime != nil && acmeIssuer.CertificateLifetime == 0 { acmeIssuer.CertificateLifetime = globalCertLifetime.(caddy.Duration) } // apply global resolvers if DNS challenge is configured and resolvers are not already set globalResolvers := options["tls_resolvers"] if globalResolvers != nil && acmeIssuer.Challenges != nil && acmeIssuer.Challenges.DNS != nil { // Check if DNS challenge is actually configured hasDNSChallenge := globalACMEDNSok || acmeIssuer.Challenges.DNS.ProviderRaw != nil if hasDNSChallenge && len(acmeIssuer.Challenges.DNS.Resolvers) == 0 { acmeIssuer.Challenges.DNS.Resolvers = globalResolvers.([]string) } } return nil } // newBaseAutomationPolicy returns a new TLS automation policy that gets // its values from the global options map. It should be used as the base // for any other automation policies. A nil policy (and no error) will be // returned if there are no default/global options. However, if always is // true, a non-nil value will always be returned (unless there is an error). func newBaseAutomationPolicy( options map[string]any, _ []caddyconfig.Warning, always bool, ) (*caddytls.AutomationPolicy, error) { issuers, hasIssuers := options["cert_issuer"] _, hasLocalCerts := options["local_certs"] keyType, hasKeyType := options["key_type"] ocspStapling, hasOCSPStapling := options["ocsp_stapling"] renewalWindowRatio, hasRenewalWindowRatio := options["renewal_window_ratio"] hasGlobalAutomationOpts := hasIssuers || hasLocalCerts || hasKeyType || hasOCSPStapling || hasRenewalWindowRatio globalACMECA := options["acme_ca"] globalACMECARoot := options["acme_ca_root"] _, globalACMEDNS := options["acme_dns"] // can be set to nil (to use globally-defined "dns" value instead), but it is still set globalACMEEAB := options["acme_eab"] globalPreferredChains := options["preferred_chains"] hasGlobalACMEDefaults := globalACMECA != nil || globalACMECARoot != nil || globalACMEDNS || globalACMEEAB != nil || globalPreferredChains != nil // if there are no global options related to automation policies // set, then we can just return right away if !hasGlobalAutomationOpts && !hasGlobalACMEDefaults { if always { return new(caddytls.AutomationPolicy), nil } return nil, nil } ap := new(caddytls.AutomationPolicy) if hasKeyType { ap.KeyType = keyType.(string) } if hasIssuers && hasLocalCerts { return nil, fmt.Errorf("global options are ambiguous: local_certs is confusing when combined with cert_issuer, because local_certs is also a specific kind of issuer") } if hasIssuers { ap.Issuers = issuers.([]certmagic.Issuer) } else if hasLocalCerts { ap.Issuers = []certmagic.Issuer{new(caddytls.InternalIssuer)} } if hasGlobalACMEDefaults { for i := range ap.Issuers { if err := fillInGlobalACMEDefaults(ap.Issuers[i], options); err != nil { return nil, fmt.Errorf("filling in global issuer defaults for issuer %d: %v", i, err) } } } if hasOCSPStapling { ocspConfig := ocspStapling.(certmagic.OCSPConfig) ap.DisableOCSPStapling = ocspConfig.DisableStapling ap.OCSPOverrides = ocspConfig.ResponderOverrides } if hasRenewalWindowRatio { ap.RenewalWindowRatio = renewalWindowRatio.(float64) } return ap, nil } // consolidateAutomationPolicies combines automation policies that are the same, // for a cleaner overall output. func consolidateAutomationPolicies(aps []*caddytls.AutomationPolicy) []*caddytls.AutomationPolicy { // sort from most specific to least specific; we depend on this ordering sort.SliceStable(aps, func(i, j int) bool { if automationPolicyIsSubset(aps[i], aps[j]) { return true } if automationPolicyIsSubset(aps[j], aps[i]) { return false } return len(aps[i].SubjectsRaw) > len(aps[j].SubjectsRaw) }) emptyAPCount := 0 origLenAPs := len(aps) // compute the number of empty policies (disregarding subjects) - see #4128 emptyAP := new(caddytls.AutomationPolicy) for i := 0; i < len(aps); i++ { emptyAP.SubjectsRaw = aps[i].SubjectsRaw if reflect.DeepEqual(aps[i], emptyAP) { emptyAPCount++ if !automationPolicyHasAllPublicNames(aps[i]) { // if this automation policy has internal names, we might as well remove it // so auto-https can implicitly use the internal issuer aps = slices.Delete(aps, i, i+1) i-- } } } // If all policies are empty, we can return nil, as there is no need to set any policy if emptyAPCount == origLenAPs { return nil } // remove or combine duplicate policies outer: for i := 0; i < len(aps); i++ { // compare only with next policies; we sorted by specificity so we must not delete earlier policies for j := i + 1; j < len(aps); j++ { // if they're exactly equal in every way, just keep one of them if reflect.DeepEqual(aps[i], aps[j]) { aps = slices.Delete(aps, j, j+1) // must re-evaluate current i against next j; can't skip it! // even if i decrements to -1, will be incremented to 0 immediately i-- continue outer } // if the policy is the same, we can keep just one, but we have // to be careful which one we keep; if only one has any hostnames // defined, then we need to keep the one without any hostnames, // otherwise the one without any subjects (a catch-all) would be // eaten up by the one with subjects; and if both have subjects, we // need to combine their lists if reflect.DeepEqual(aps[i].IssuersRaw, aps[j].IssuersRaw) && reflect.DeepEqual(aps[i].ManagersRaw, aps[j].ManagersRaw) && bytes.Equal(aps[i].StorageRaw, aps[j].StorageRaw) && aps[i].MustStaple == aps[j].MustStaple && aps[i].KeyType == aps[j].KeyType && aps[i].OnDemand == aps[j].OnDemand && aps[i].ReusePrivateKeys == aps[j].ReusePrivateKeys && aps[i].RenewalWindowRatio == aps[j].RenewalWindowRatio { if len(aps[i].SubjectsRaw) > 0 && len(aps[j].SubjectsRaw) == 0 { // later policy (at j) has no subjects ("catch-all"), so we can // remove the identical-but-more-specific policy that comes first // AS LONG AS it is not shadowed by another policy before it; e.g. // if policy i is for example.com, policy i+1 is '*.com', and policy // j is catch-all, we cannot remove policy i because that would // cause example.com to be served by the less specific policy for // '*.com', which might be different (yes we've seen this happen) if automationPolicyShadows(i, aps) >= j { aps = slices.Delete(aps, i, i+1) i-- continue outer } } else { // avoid repeated subjects for _, subj := range aps[j].SubjectsRaw { if !slices.Contains(aps[i].SubjectsRaw, subj) { aps[i].SubjectsRaw = append(aps[i].SubjectsRaw, subj) } } aps = slices.Delete(aps, j, j+1) j-- } } } } return aps } // automationPolicyIsSubset returns true if a's subjects are a subset // of b's subjects. func automationPolicyIsSubset(a, b *caddytls.AutomationPolicy) bool { if len(b.SubjectsRaw) == 0 { return true } if len(a.SubjectsRaw) == 0 { return false } for _, aSubj := range a.SubjectsRaw { inSuperset := slices.ContainsFunc(b.SubjectsRaw, func(bSubj string) bool { return certmagic.MatchWildcard(aSubj, bSubj) }) if !inSuperset { return false } } return true } // automationPolicyShadows returns the index of a policy that aps[i] shadows; // in other words, for all policies after position i, if that policy covers // the same subjects but is less specific, that policy's position is returned, // or -1 if no shadowing is found. For example, if policy i is for // "foo.example.com" and policy i+2 is for "*.example.com", then i+2 will be // returned, since that policy is shadowed by i, which is in front. func automationPolicyShadows(i int, aps []*caddytls.AutomationPolicy) int { for j := i + 1; j < len(aps); j++ { if automationPolicyIsSubset(aps[i], aps[j]) { return j } } return -1 } // subjectQualifiesForPublicCert is like certmagic.SubjectQualifiesForPublicCert() except // that this allows domains with multiple wildcard levels like '*.*.example.com' to qualify // if the automation policy has OnDemand enabled (i.e. this function is more lenient). // // IP subjects are considered as non-qualifying for public certs. Technically, there are // now public ACME CAs as well as non-ACME CAs that issue IP certificates. But this function // is used solely for implicit automation (defaults), where it gets really complicated to // keep track of which issuers support IP certificates in which circumstances. Currently, // issuers that support IP certificates are very few, and all require some sort of config // from the user anyway (such as an account credential). Since we cannot implicitly and // automatically get public IP certs without configuration from the user, we treat IPs as // not qualifying for public certificates. Users should expressly configure an issuer // that supports IP certs for that purpose. func subjectQualifiesForPublicCert(ap *caddytls.AutomationPolicy, subj string) bool { return !certmagic.SubjectIsIP(subj) && !certmagic.SubjectIsInternal(subj) && (strings.Count(subj, "*.") < 2 || ap.OnDemand) } // automationPolicyHasAllPublicNames returns true if all the names on the policy // do NOT qualify for public certs OR are tailscale domains. func automationPolicyHasAllPublicNames(ap *caddytls.AutomationPolicy) bool { return !slices.ContainsFunc(ap.SubjectsRaw, func(i string) bool { return !subjectQualifiesForPublicCert(ap, i) || isTailscaleDomain(i) }) } func isTailscaleDomain(name string) bool { return strings.HasSuffix(strings.ToLower(name), ".ts.net") } ================================================ FILE: caddyconfig/httpcaddyfile/tlsapp_test.go ================================================ package httpcaddyfile import ( "testing" "github.com/caddyserver/caddy/v2/modules/caddytls" ) func TestAutomationPolicyIsSubset(t *testing.T) { for i, test := range []struct { a, b []string expect bool }{ { a: []string{"example.com"}, b: []string{}, expect: true, }, { a: []string{}, b: []string{"example.com"}, expect: false, }, { a: []string{"foo.example.com"}, b: []string{"*.example.com"}, expect: true, }, { a: []string{"foo.example.com"}, b: []string{"foo.example.com"}, expect: true, }, { a: []string{"foo.example.com"}, b: []string{"example.com"}, expect: false, }, { a: []string{"example.com", "foo.example.com"}, b: []string{"*.com", "*.*.com"}, expect: true, }, { a: []string{"example.com", "foo.example.com"}, b: []string{"*.com"}, expect: false, }, } { apA := &caddytls.AutomationPolicy{SubjectsRaw: test.a} apB := &caddytls.AutomationPolicy{SubjectsRaw: test.b} if actual := automationPolicyIsSubset(apA, apB); actual != test.expect { t.Errorf("Test %d: Expected %t but got %t (A: %v B: %v)", i, test.expect, actual, test.a, test.b) } } } ================================================ FILE: caddyconfig/httploader.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyconfig import ( "crypto/tls" "crypto/x509" "fmt" "io" "net/http" "os" "time" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(HTTPLoader{}) } // HTTPLoader can load Caddy configs over HTTP(S). // // If the response is not a JSON config, a config adapter must be specified // either in the loader config (`adapter`), or in the Content-Type HTTP header // returned in the HTTP response from the server. The Content-Type header is // read just like the admin API's `/load` endpoint. If you don't have control // over the HTTP server (but can still trust its response), you can override // the Content-Type header by setting the `adapter` property in this config. type HTTPLoader struct { // The method for the request. Default: GET Method string `json:"method,omitempty"` // The URL of the request. URL string `json:"url,omitempty"` // HTTP headers to add to the request. Headers http.Header `json:"header,omitempty"` // Maximum time allowed for a complete connection and request. Timeout caddy.Duration `json:"timeout,omitempty"` // The name of the config adapter to use, if any. Only needed // if the HTTP response is not a JSON config and if the server's // Content-Type header is missing or incorrect. Adapter string `json:"adapter,omitempty"` TLS *struct { // Present this instance's managed remote identity credentials to the server. UseServerIdentity bool `json:"use_server_identity,omitempty"` // PEM-encoded client certificate filename to present to the server. ClientCertificateFile string `json:"client_certificate_file,omitempty"` // PEM-encoded key to use with the client certificate. ClientCertificateKeyFile string `json:"client_certificate_key_file,omitempty"` // List of PEM-encoded CA certificate files to add to the same trust // store as RootCAPool (or root_ca_pool in the JSON). RootCAPEMFiles []string `json:"root_ca_pem_files,omitempty"` } `json:"tls,omitempty"` } // CaddyModule returns the Caddy module information. func (HTTPLoader) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.config_loaders.http", New: func() caddy.Module { return new(HTTPLoader) }, } } // LoadConfig loads a Caddy config. func (hl HTTPLoader) LoadConfig(ctx caddy.Context) ([]byte, error) { repl := caddy.NewReplacer() client, err := hl.makeClient(ctx) if err != nil { return nil, err } method := repl.ReplaceAll(hl.Method, "") if method == "" { method = http.MethodGet } url := repl.ReplaceAll(hl.URL, "") req, err := http.NewRequest(method, url, nil) if err != nil { return nil, err } for key, vals := range hl.Headers { for _, val := range vals { req.Header.Add(repl.ReplaceAll(key, ""), repl.ReplaceKnown(val, "")) } } resp, err := doHttpCallWithRetries(ctx, client, req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode >= 400 { return nil, fmt.Errorf("server responded with HTTP %d", resp.StatusCode) } body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } // adapt the config based on either manually-configured adapter or server's response header ct := resp.Header.Get("Content-Type") if hl.Adapter != "" { ct = "text/" + hl.Adapter } result, warnings, err := adaptByContentType(ct, body) if err != nil { return nil, err } for _, warn := range warnings { ctx.Logger().Warn(warn.String()) } return result, nil } func attemptHttpCall(client *http.Client, request *http.Request) (*http.Response, error) { resp, err := client.Do(request) //nolint:gosec // no SSRF; comes from trusted config if err != nil { return nil, fmt.Errorf("problem calling http loader url: %v", err) } else if resp.StatusCode < 200 || resp.StatusCode > 499 { resp.Body.Close() return nil, fmt.Errorf("bad response status code from http loader url: %v", resp.StatusCode) } return resp, nil } func doHttpCallWithRetries(ctx caddy.Context, client *http.Client, request *http.Request) (*http.Response, error) { var resp *http.Response var err error const maxAttempts = 10 for i := range maxAttempts { resp, err = attemptHttpCall(client, request) if err != nil && i < maxAttempts-1 { select { case <-time.After(time.Millisecond * 500): case <-ctx.Done(): return resp, ctx.Err() } } else { break } } return resp, err } func (hl HTTPLoader) makeClient(ctx caddy.Context) (*http.Client, error) { client := &http.Client{ Timeout: time.Duration(hl.Timeout), } if hl.TLS != nil { var tlsConfig *tls.Config // client authentication if hl.TLS.UseServerIdentity { certs, err := ctx.IdentityCredentials(ctx.Logger()) if err != nil { return nil, fmt.Errorf("getting server identity credentials: %v", err) } // See https://github.com/securego/gosec/issues/1054#issuecomment-2072235199 //nolint:gosec tlsConfig = &tls.Config{Certificates: certs} } else if hl.TLS.ClientCertificateFile != "" && hl.TLS.ClientCertificateKeyFile != "" { cert, err := tls.LoadX509KeyPair(hl.TLS.ClientCertificateFile, hl.TLS.ClientCertificateKeyFile) if err != nil { return nil, err } //nolint:gosec tlsConfig = &tls.Config{Certificates: []tls.Certificate{cert}} } // trusted server certs if len(hl.TLS.RootCAPEMFiles) > 0 { rootPool := x509.NewCertPool() for _, pemFile := range hl.TLS.RootCAPEMFiles { pemData, err := os.ReadFile(pemFile) if err != nil { return nil, fmt.Errorf("failed reading ca cert: %v", err) } rootPool.AppendCertsFromPEM(pemData) } if tlsConfig == nil { tlsConfig = new(tls.Config) } tlsConfig.RootCAs = rootPool } client.Transport = &http.Transport{TLSClientConfig: tlsConfig} } return client, nil } var _ caddy.ConfigLoader = (*HTTPLoader)(nil) ================================================ FILE: caddyconfig/load.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyconfig import ( "bytes" "encoding/json" "fmt" "io" "mime" "net/http" "strings" "sync" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(adminLoad{}) } // adminLoad is a module that provides the /load endpoint // for the Caddy admin API. The only reason it's not baked // into the caddy package directly is because of the import // of the caddyconfig package for its GetAdapter function. // If the caddy package depends on the caddyconfig package, // then the caddyconfig package will not be able to import // the caddy package, and it can more easily cause backward // edges in the dependency tree (i.e. import cycle). // Fortunately, the admin API has first-class support for // adding endpoints from modules. type adminLoad struct{} // CaddyModule returns the Caddy module information. func (adminLoad) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "admin.api.load", New: func() caddy.Module { return new(adminLoad) }, } } // Routes returns a route for the /load endpoint. func (al adminLoad) Routes() []caddy.AdminRoute { return []caddy.AdminRoute{ { Pattern: "/load", Handler: caddy.AdminHandlerFunc(al.handleLoad), }, { Pattern: "/adapt", Handler: caddy.AdminHandlerFunc(al.handleAdapt), }, } } // handleLoad replaces the entire current configuration with // a new one provided in the response body. It supports config // adapters through the use of the Content-Type header. A // config that is identical to the currently-running config // will be a no-op unless Cache-Control: must-revalidate is set. func (adminLoad) handleLoad(w http.ResponseWriter, r *http.Request) error { if r.Method != http.MethodPost { return caddy.APIError{ HTTPStatus: http.StatusMethodNotAllowed, Err: fmt.Errorf("method not allowed"), } } buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) _, err := io.Copy(buf, r.Body) if err != nil { return caddy.APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("reading request body: %v", err), } } body := buf.Bytes() // if the config is formatted other than Caddy's native // JSON, we need to adapt it before loading it if ctHeader := r.Header.Get("Content-Type"); ctHeader != "" { result, warnings, err := adaptByContentType(ctHeader, body) if err != nil { return caddy.APIError{ HTTPStatus: http.StatusBadRequest, Err: err, } } if len(warnings) > 0 { respBody, err := json.Marshal(warnings) if err != nil { caddy.Log().Named("admin.api.load").Error(err.Error()) } _, _ = w.Write(respBody) //nolint:gosec // false positive: no XSS here } body = result } forceReload := r.Header.Get("Cache-Control") == "must-revalidate" err = caddy.Load(body, forceReload) if err != nil { return caddy.APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("loading config: %v", err), } } // If this request changed the config, clear the last // config info we have stored, if it is different from // the original source. caddy.ClearLastConfigIfDifferent( r.Header.Get("Caddy-Config-Source-File"), r.Header.Get("Caddy-Config-Source-Adapter")) caddy.Log().Named("admin.api").Info("load complete") return nil } // handleAdapt adapts the given Caddy config to JSON and responds with the result. func (adminLoad) handleAdapt(w http.ResponseWriter, r *http.Request) error { if r.Method != http.MethodPost { return caddy.APIError{ HTTPStatus: http.StatusMethodNotAllowed, Err: fmt.Errorf("method not allowed"), } } buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) _, err := io.Copy(buf, r.Body) if err != nil { return caddy.APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("reading request body: %v", err), } } result, warnings, err := adaptByContentType(r.Header.Get("Content-Type"), buf.Bytes()) if err != nil { return caddy.APIError{ HTTPStatus: http.StatusBadRequest, Err: err, } } out := struct { Warnings []Warning `json:"warnings,omitempty"` Result json.RawMessage `json:"result"` }{ Warnings: warnings, Result: result, } w.Header().Set("Content-Type", "application/json") return json.NewEncoder(w).Encode(out) } // adaptByContentType adapts body to Caddy JSON using the adapter specified by contentType. // If contentType is empty or ends with "/json", the input will be returned, as a no-op. func adaptByContentType(contentType string, body []byte) ([]byte, []Warning, error) { // assume JSON as the default if contentType == "" { return body, nil, nil } ct, _, err := mime.ParseMediaType(contentType) if err != nil { return nil, nil, caddy.APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("invalid Content-Type: %v", err), } } // if already JSON, no need to adapt if strings.HasSuffix(ct, "/json") { return body, nil, nil } // adapter name should be suffix of MIME type _, adapterName, slashFound := strings.Cut(ct, "/") if !slashFound { return nil, nil, fmt.Errorf("malformed Content-Type") } cfgAdapter := GetAdapter(adapterName) if cfgAdapter == nil { return nil, nil, fmt.Errorf("unrecognized config adapter '%s'", adapterName) } result, warnings, err := cfgAdapter.Adapt(body, nil) if err != nil { return nil, nil, fmt.Errorf("adapting config using %s adapter: %v", adapterName, err) } return result, warnings, nil } var bufPool = sync.Pool{ New: func() any { return new(bytes.Buffer) }, } ================================================ FILE: caddytest/a.caddy.localhost.crt ================================================ -----BEGIN CERTIFICATE----- MIID5zCCAs8CFG4+w/pqR5AZQ+aVB330uRRRKMF0MA0GCSqGSIb3DQEBCwUAMIGv MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxGzAZBgNVBAoMEkxvY2FsIERldmVs b3BlbWVudDEbMBkGA1UEBwwSTG9jYWwgRGV2ZWxvcGVtZW50MRowGAYDVQQDDBFh LmNhZGR5LmxvY2FsaG9zdDEbMBkGA1UECwwSTG9jYWwgRGV2ZWxvcGVtZW50MSAw HgYJKoZIhvcNAQkBFhFhZG1pbkBjYWRkeS5sb2NhbDAeFw0yMDAzMTMxODUwMTda Fw0zMDAzMTExODUwMTdaMIGvMQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxGzAZ BgNVBAoMEkxvY2FsIERldmVsb3BlbWVudDEbMBkGA1UEBwwSTG9jYWwgRGV2ZWxv cGVtZW50MRowGAYDVQQDDBFhLmNhZGR5LmxvY2FsaG9zdDEbMBkGA1UECwwSTG9j YWwgRGV2ZWxvcGVtZW50MSAwHgYJKoZIhvcNAQkBFhFhZG1pbkBjYWRkeS5sb2Nh bDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMd9pC9wF7j0459FndPs Deud/rq41jEZFsVOVtjQgjS1A5ct6NfeMmSlq8i1F7uaTMPZjbOHzY6y6hzLc9+y /VWNgyUC543HjXnNTnp9Xug6tBBxOxvRMw5mv2nAyzjBGDePPgN84xKhOXG2Wj3u fOZ+VPVISefRNvjKfN87WLJ0B0HI9wplG5ASVdPQsWDY1cndrZgt2sxQ/3fjIno4 VvrgRWC9Penizgps/a0ZcFZMD/6HJoX/mSZVa1LjopwbMTXvyHCpXkth21E+rBt6 I9DMHerdioVQcX25CqPmAwePxPZSNGEQo/Qu32kzcmscmYxTtYBhDa+yLuHgGggI j7ECAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAP/94KPtkpYtkWADnhtzDmgQ6Q1pH SubTUZdCwQtm6/LrvpT+uFNsOj4L3Mv3TVUnIQDmKd5VvR42W2MRBiTN2LQptgEn C7g9BB+UA9kjL3DPk1pJMjzxLHohh0uNLi7eh4mAj8eNvjz9Z4qMWPQoVS0y7/ZK cCBRKh2GkIqKm34ih6pX7xmMpPEQsFoTVPRHYJfYD1SZ8Iui+EN+7WqLuJWPsPXw JM1HuZKn7pZmJU2MZZBsrupHGUvNMbBg2mFJcxt4D1VvU+p+a67PSjpFQ6dJG2re pZoF+N1vMGAFkxe6UqhcC/bXDX+ILVQHJ+RNhzDO6DcWf8dRrC2LaJk3WA== -----END CERTIFICATE----- ================================================ FILE: caddytest/a.caddy.localhost.key ================================================ -----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAx32kL3AXuPTjn0Wd0+wN653+urjWMRkWxU5W2NCCNLUDly3o 194yZKWryLUXu5pMw9mNs4fNjrLqHMtz37L9VY2DJQLnjceNec1Oen1e6Dq0EHE7 G9EzDma/acDLOMEYN48+A3zjEqE5cbZaPe585n5U9UhJ59E2+Mp83ztYsnQHQcj3 CmUbkBJV09CxYNjVyd2tmC3azFD/d+MiejhW+uBFYL096eLOCmz9rRlwVkwP/ocm hf+ZJlVrUuOinBsxNe/IcKleS2HbUT6sG3oj0Mwd6t2KhVBxfbkKo+YDB4/E9lI0 YRCj9C7faTNyaxyZjFO1gGENr7Iu4eAaCAiPsQIDAQABAoIBAQDD/YFIBeWYlifn e9risQDAIrp3sk7lb9O6Rwv1+Wxi4hBEABvJsYhq74VFK/3EF4UhyWR5JIvkjYyK e6w887oGyoA05ZSe65XoO7fFidSrbbkoikZbPv3dQT7/ZCWEfdkQBNAVVyY0UGeC e3hPbjYRsb5AOSQ694X9idqC6uhqcOrBDjITFrctUoP4S6l9A6a+mLSUIwiICcuh mrNl+j0lzy7DMXRp/Z5Hyo5kuUlrC0dCLa1UHqtrrK7MR55AVEOihSNp1w+OC+vw f0VjE4JUtO7RQEQUmD1tDfLXwNfMFeWaobB2W0WMvRg0IqoitiqPxsPHRm56OxfM SRo/Q7QBAoGBAP8DapzBMuaIcJ7cE8Yl07ZGndWWf8buIKIItGF8rkEO3BXhrIke EmpOi+ELtpbMOG0APhORZyQ58f4ZOVrqZfneNKtDiEZV4mJZaYUESm1pU+2Y6+y5 g4bpQSVKN0ow0xR+MH7qDYtSlsmBU7qAOz775L7BmMA1Bnu72aN/H1JBAoGBAMhD OzqCSakHOjUbEd22rPwqWmcIyVyo04gaSmcVVT2dHbqR4/t0gX5a9D9U2qwyO6xi /R+PXyMd32xIeVR2D/7SQ0x6dK68HXICLV8ofHZ5UQcHbxy5og4v/YxSZVTkN374 cEsUeyB0s/UPOHLktFU5hpIlON72/Rp7b+pNIwFxAoGAczpq+Qu/YTWzlcSh1r4O 7OT5uqI3eH7vFehTAV3iKxl4zxZa7NY+wfRd9kFhrr/2myIp6pOgBFl+hC+HoBIc JAyIxf5M3GNAWOpH6MfojYmzV7/qktu8l8BcJGplk0t+hVsDtMUze4nFAqZCXBpH Kw2M7bjyuZ78H/rgu6TcVUECgYEAo1M5ldE2U/VCApeuLX1TfWDpU8i1uK0zv3d5 oLKkT1i5KzTak3SEO9HgC1qf8PoS8tfUio26UICHe99rnHehOfivzEq+qNdgyF+A M3BoeZMdgzcL5oh640k+Zte4LtDlddcWdhUhCepD7iPYrNNbQ3pkBwL2a9lRuOxc 7OC2IPECgYBH8f3OrwXjDltIG1dDvuDPNljxLZbFEFbQyVzMePYNftgZknAyGEdh NW/LuWeTzstnmz/s6RE3jN5ZrrMa4sW77VA9+yU9QW2dkHqFyukQ4sfuNg6kDDNZ +lqZYMCLw0M5P9fIbmnIYwey7tXkHfmzoCpnYHGQDN6hL0Bh0zGwmg== -----END RSA PRIVATE KEY----- ================================================ FILE: caddytest/caddy.ca.cer ================================================ -----BEGIN CERTIFICATE----- MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQEL BQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkw ODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcN AQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU 7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl0 3WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45t wOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNx tdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTU ApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAd BgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS 2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5u NY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkq hkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfK D66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEO fG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnk oNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZ ks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdle Ih6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ== -----END CERTIFICATE----- ================================================ FILE: caddytest/caddy.localhost.crt ================================================ -----BEGIN CERTIFICATE----- MIID5zCCAs8CFFmAAFKV79uhzxc5qXbUw3oBNsYXMA0GCSqGSIb3DQEBCwUAMIGv MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxGzAZBgNVBAoMEkxvY2FsIERldmVs b3BlbWVudDEbMBkGA1UEBwwSTG9jYWwgRGV2ZWxvcGVtZW50MRowGAYDVQQDDBEq LmNhZGR5LmxvY2FsaG9zdDEbMBkGA1UECwwSTG9jYWwgRGV2ZWxvcGVtZW50MSAw HgYJKoZIhvcNAQkBFhFhZG1pbkBjYWRkeS5sb2NhbDAeFw0yMDAzMDIwODAxMTZa Fw0zMDAyMjgwODAxMTZaMIGvMQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxGzAZ BgNVBAoMEkxvY2FsIERldmVsb3BlbWVudDEbMBkGA1UEBwwSTG9jYWwgRGV2ZWxv cGVtZW50MRowGAYDVQQDDBEqLmNhZGR5LmxvY2FsaG9zdDEbMBkGA1UECwwSTG9j YWwgRGV2ZWxvcGVtZW50MSAwHgYJKoZIhvcNAQkBFhFhZG1pbkBjYWRkeS5sb2Nh bDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJngfeirQkWaU8ihgIC5 SKpRQX/3koRjljDK/oCbhLs+wg592kIwVv06l7+mn7NSaNBloabjuA1GqyLRsNLL ptrv0HvXa5qLx28+icsb2Ny3dJnQaj9w9PwjxQ1qZqEJfWRH1D8Vz9AmB+QSV/Gu 8e8alGFewlYZVfH1kbxoTT6QorF37TeA3bh1fgKFtzsGYKswcaZNdDBBHzLunCKZ HU6U6L45hm+yLADj3mmDLafUeiVOt6MRLLoSD1eLRVSXGrNo+brJ87zkZntI9+W1 JxOBoXtZCwka7k2DlAtLihsrmBZA2ZC9yVeu/SQy3qb3iCNnTFTCyAnWeTCr6Tcq 6w8CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAOWfXqpAmD4C3wGiMeZAeaaS4hDAR +JmN+avPDA6F6Bq7DB4NJuIwVUlaDL2s07w5VJJtW52aZVKoBlgHR5yG/XUli6J7 YUJRmdQJvHUSu26cmKvyoOaTrEYbmvtGICWtZc8uTlMf9wQZbJA4KyxTgEQJDXsZ B2XFe+wVdhAgEpobYDROi+l/p8TL5z3U24LpwVTcJy5sEZVv7Wfs886IyxU8ORt8 VZNcDiH6V53OIGeiufIhia/mPe6jbLntfGZfIFxtCcow4IA/lTy1ned7K5fmvNNb ZilxOQUk+wVK8genjdrZVAnAxsYLHJIb5yf9O7rr6fWciVMF3a0k5uNK1w== -----END CERTIFICATE----- ================================================ FILE: caddytest/caddy.localhost.key ================================================ -----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAmeB96KtCRZpTyKGAgLlIqlFBf/eShGOWMMr+gJuEuz7CDn3a QjBW/TqXv6afs1Jo0GWhpuO4DUarItGw0sum2u/Qe9drmovHbz6JyxvY3Ld0mdBq P3D0/CPFDWpmoQl9ZEfUPxXP0CYH5BJX8a7x7xqUYV7CVhlV8fWRvGhNPpCisXft N4DduHV+AoW3OwZgqzBxpk10MEEfMu6cIpkdTpTovjmGb7IsAOPeaYMtp9R6JU63 oxEsuhIPV4tFVJcas2j5usnzvORme0j35bUnE4Ghe1kLCRruTYOUC0uKGyuYFkDZ kL3JV679JDLepveII2dMVMLICdZ5MKvpNyrrDwIDAQABAoIBAFcPK01zb6hfm12c +k5aBiHOnUdgc/YRPg1XHEz5MEycQkDetZjTLrRQ7UBSbnKPgpu9lIsOtbhVLkgh 6XAqJroiCou2oruqr+hhsqZGmBiwdvj7cNF6ADGTr05az7v22YneFdinZ481pStF sZocx+bm2+KHMV5zMSwXKyA0xtdJLxs2yklniDBxSZRppgppq1pDPprP5DkgKPfe 3ekUmbQd5bHmivhW8ItbJLuf82XSsMBZ9ZhKiKIlWlbKAgiSV3SqnUQb5fi7l8hG yYZxbuCUIGFwKmEpUBBt/nyxrOlMiNtDh9JhrPmijTV3slq70pCLwLL/Ai2aeear EVA5VhkCgYEAyAmxfPqc2P7BsDAp67/sA7OEPso9qM4WyuWiVdlX2gb9TLNLYbPX Kk/UmpAIVzpoTAGY5Zp3wkvdD/ou8uUQsE8ioNn4S1a4G9XURH1wVhcEbUiAKI1S QVBH9B/Pj3eIp5OTKwob0Wj7DNdxoH7ed/Eok0EaTWzOA8pCWADKv/MCgYEAxOzY YsX7Nl+eyZr2+9unKyeAK/D1DCT/o99UUAHx72/xaBVP/06cfzpvKBNcF9iYc+fq R1yIUIrDRoSmYKBq+Kb3+nOg1nrqih/NBTokbTiI4Q+/30OQt0Al1e7y9iNKqV8H jYZItzluGNrWKedZbATwBwbVCY2jnNl6RMDnS3UCgYBxj3cwQUHLuoyQjjcuO80r qLzZvIxWiXDNDKIk5HcIMlGYOmz/8U2kGp/SgxQJGQJeq8V2C0QTjGfaCyieAcaA oNxCvptDgd6RBsoze5bLeNOtiqwe2WOp6n5+q5R0mOJ+Z7vzghCayGNFPgWmnH+F TeW/+wSIkc0+v5L8TK7NWwKBgBrlWlyLO9deUfqpHqihhICBYaEexOlGuF+yZfqT eW7BdFBJ8OYm33sFCR+JHV/oZlIWT8o1Wizd9vPPtEWoQ1P4wg/D8Si6GwSIeWEI YudD/HX4x7T/rmlI6qIAg9CYW18sqoRq3c2gm2fro6qPfYgiWIItLbWjUcBfd7Ki QjTtAoGARKdRv3jMWL84rlEx1nBRgL3pe9Dt+Uxzde2xT3ZeF+5Hp9NfU01qE6M6 1I6H64smqpetlsXmCEVKwBemP3pJa6avLKgIYiQvHAD/v4rs9mqgy1RTqtYyGNhR 1A/6dKkbiZ6wzePLLPasXVZxSKEviXf5gJooqumQVSVhCswyCZ0= -----END RSA PRIVATE KEY----- ================================================ FILE: caddytest/caddytest.go ================================================ package caddytest import ( "bytes" "context" "crypto/tls" "encoding/json" "errors" "fmt" "io" "io/fs" "log" "net" "net/http" "net/http/cookiejar" "os" "path" "reflect" "regexp" "runtime" "strings" "testing" "time" "github.com/aryann/difflib" caddycmd "github.com/caddyserver/caddy/v2/cmd" "github.com/caddyserver/caddy/v2/caddyconfig" // plug in Caddy modules here _ "github.com/caddyserver/caddy/v2/modules/standard" ) // Config store any configuration required to make the tests run type Config struct { // Port we expect caddy to listening on AdminPort int // Certificates we expect to be loaded before attempting to run the tests Certificates []string // TestRequestTimeout is the time to wait for a http request to TestRequestTimeout time.Duration // LoadRequestTimeout is the time to wait for the config to be loaded against the caddy server LoadRequestTimeout time.Duration } // Default testing values var Default = Config{ AdminPort: 2999, // different from what a real server also running on a developer's machine might be Certificates: []string{"/caddy.localhost.crt", "/caddy.localhost.key"}, TestRequestTimeout: 5 * time.Second, LoadRequestTimeout: 5 * time.Second, } var ( matchKey = regexp.MustCompile(`(/[\w\d\.]+\.key)`) matchCert = regexp.MustCompile(`(/[\w\d\.]+\.crt)`) ) // Tester represents an instance of a test client. type Tester struct { Client *http.Client configLoaded bool t testing.TB config Config } // NewTester will create a new testing client with an attached cookie jar func NewTester(t testing.TB) *Tester { jar, err := cookiejar.New(nil) if err != nil { t.Fatalf("failed to create cookiejar: %s", err) } return &Tester{ Client: &http.Client{ Transport: CreateTestingTransport(), Jar: jar, Timeout: Default.TestRequestTimeout, }, configLoaded: false, t: t, config: Default, } } // WithDefaultOverrides this will override the default test configuration with the provided values. func (tc *Tester) WithDefaultOverrides(overrides Config) *Tester { if overrides.AdminPort != 0 { tc.config.AdminPort = overrides.AdminPort } if len(overrides.Certificates) > 0 { tc.config.Certificates = overrides.Certificates } if overrides.TestRequestTimeout != 0 { tc.config.TestRequestTimeout = overrides.TestRequestTimeout tc.Client.Timeout = overrides.TestRequestTimeout } if overrides.LoadRequestTimeout != 0 { tc.config.LoadRequestTimeout = overrides.LoadRequestTimeout } return tc } type configLoadError struct { Response string } func (e configLoadError) Error() string { return e.Response } func timeElapsed(start time.Time, name string) { elapsed := time.Since(start) log.Printf("%s took %s", name, elapsed) } // InitServer this will configure the server with a configurion of a specific // type. The configType must be either "json" or the adapter type. func (tc *Tester) InitServer(rawConfig string, configType string) { if err := tc.initServer(rawConfig, configType); err != nil { tc.t.Logf("failed to load config: %s", err) tc.t.Fail() } if err := tc.ensureConfigRunning(rawConfig, configType); err != nil { tc.t.Logf("failed ensuring config is running: %s", err) tc.t.Fail() } } // InitServer this will configure the server with a configurion of a specific // type. The configType must be either "json" or the adapter type. func (tc *Tester) initServer(rawConfig string, configType string) error { if testing.Short() { tc.t.SkipNow() return nil } err := validateTestPrerequisites(tc) if err != nil { tc.t.Skipf("skipping tests as failed integration prerequisites. %s", err) return nil } tc.t.Cleanup(func() { if tc.t.Failed() && tc.configLoaded { res, err := http.Get(fmt.Sprintf("http://localhost:%d/config/", tc.config.AdminPort)) if err != nil { tc.t.Log("unable to read the current config") return } defer res.Body.Close() body, _ := io.ReadAll(res.Body) var out bytes.Buffer _ = json.Indent(&out, body, "", " ") tc.t.Logf("----------- failed with config -----------\n%s", out.String()) } }) rawConfig = prependCaddyFilePath(rawConfig) // normalize JSON config if configType == "json" { tc.t.Logf("Before: %s", rawConfig) var conf any if err := json.Unmarshal([]byte(rawConfig), &conf); err != nil { return err } c, err := json.Marshal(conf) if err != nil { return err } rawConfig = string(c) tc.t.Logf("After: %s", rawConfig) } client := &http.Client{ Timeout: tc.config.LoadRequestTimeout, } start := time.Now() req, err := http.NewRequest("POST", fmt.Sprintf("http://localhost:%d/load", tc.config.AdminPort), strings.NewReader(rawConfig)) if err != nil { tc.t.Errorf("failed to create request. %s", err) return err } if configType == "json" { req.Header.Add("Content-Type", "application/json") } else { req.Header.Add("Content-Type", "text/"+configType) } res, err := client.Do(req) //nolint:gosec // no SSRF because URL is hard-coded to localhost, and port comes from config if err != nil { tc.t.Errorf("unable to contact caddy server. %s", err) return err } timeElapsed(start, "caddytest: config load time") defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { tc.t.Errorf("unable to read response. %s", err) return err } if res.StatusCode != 200 { return configLoadError{Response: string(body)} } tc.configLoaded = true return nil } func (tc *Tester) ensureConfigRunning(rawConfig string, configType string) error { expectedBytes := []byte(prependCaddyFilePath(rawConfig)) if configType != "json" { adapter := caddyconfig.GetAdapter(configType) if adapter == nil { return fmt.Errorf("adapter of config type is missing: %s", configType) } expectedBytes, _, _ = adapter.Adapt([]byte(rawConfig), nil) } var expected any err := json.Unmarshal(expectedBytes, &expected) if err != nil { return err } client := &http.Client{ Timeout: tc.config.LoadRequestTimeout, } fetchConfig := func(client *http.Client) any { resp, err := client.Get(fmt.Sprintf("http://localhost:%d/config/", tc.config.AdminPort)) if err != nil { return nil } defer resp.Body.Close() actualBytes, err := io.ReadAll(resp.Body) if err != nil { return nil } var actual any err = json.Unmarshal(actualBytes, &actual) if err != nil { return nil } return actual } for retries := 10; retries > 0; retries-- { if reflect.DeepEqual(expected, fetchConfig(client)) { return nil } time.Sleep(1 * time.Second) } tc.t.Errorf("POSTed configuration isn't active") return errors.New("EnsureConfigRunning: POSTed configuration isn't active") } const initConfig = `{ admin localhost:%d } ` // validateTestPrerequisites ensures the certificates are available in the // designated path and Caddy sub-process is running. func validateTestPrerequisites(tc *Tester) error { // check certificates are found for _, certName := range tc.config.Certificates { if _, err := os.Stat(getIntegrationDir() + certName); errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("caddy integration test certificates (%s) not found", certName) } } if isCaddyAdminRunning(tc) != nil { // setup the init config file, and set the cleanup afterwards f, err := os.CreateTemp("", "") if err != nil { return err } tc.t.Cleanup(func() { os.Remove(f.Name()) //nolint:gosec // false positive, filename comes from std lib, no path traversal }) if _, err := fmt.Fprintf(f, initConfig, tc.config.AdminPort); err != nil { return err } // start inprocess caddy server os.Args = []string{"caddy", "run", "--config", f.Name(), "--adapter", "caddyfile"} go func() { caddycmd.Main() }() // wait for caddy to start serving the initial config for retries := 10; retries > 0 && isCaddyAdminRunning(tc) != nil; retries-- { time.Sleep(1 * time.Second) } } // one more time to return the error return isCaddyAdminRunning(tc) } func isCaddyAdminRunning(tc *Tester) error { // assert that caddy is running client := &http.Client{ Timeout: tc.config.LoadRequestTimeout, } resp, err := client.Get(fmt.Sprintf("http://localhost:%d/config/", tc.config.AdminPort)) if err != nil { return fmt.Errorf("caddy integration test caddy server not running. Expected to be listening on localhost:%d", tc.config.AdminPort) } resp.Body.Close() return nil } func getIntegrationDir() string { _, filename, _, ok := runtime.Caller(1) if !ok { panic("unable to determine the current file path") } return path.Dir(filename) } // use the convention to replace /[certificatename].[crt|key] with the full path // this helps reduce the noise in test configurations and also allow this // to run in any path func prependCaddyFilePath(rawConfig string) string { r := matchKey.ReplaceAllString(rawConfig, getIntegrationDir()+"$1") r = matchCert.ReplaceAllString(r, getIntegrationDir()+"$1") return r } // CreateTestingTransport creates a testing transport that forces call dialing connections to happen locally func CreateTestingTransport() *http.Transport { dialer := net.Dialer{ Timeout: 5 * time.Second, KeepAlive: 5 * time.Second, DualStack: true, } dialContext := func(ctx context.Context, network, addr string) (net.Conn, error) { parts := strings.Split(addr, ":") destAddr := fmt.Sprintf("127.0.0.1:%s", parts[1]) log.Printf("caddytest: redirecting the dialer from %s to %s", addr, destAddr) return dialer.DialContext(ctx, network, destAddr) } return &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: dialContext, ForceAttemptHTTP2: true, MaxIdleConns: 100, IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 5 * time.Second, ExpectContinueTimeout: 1 * time.Second, TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec } } // AssertLoadError will load a config and expect an error func AssertLoadError(t *testing.T, rawConfig string, configType string, expectedError string) { t.Helper() tc := NewTester(t) err := tc.initServer(rawConfig, configType) if !strings.Contains(err.Error(), expectedError) { t.Errorf("expected error \"%s\" but got \"%s\"", expectedError, err.Error()) } } // AssertRedirect makes a request and asserts the redirection happens func (tc *Tester) AssertRedirect(requestURI string, expectedToLocation string, expectedStatusCode int) *http.Response { tc.t.Helper() redirectPolicyFunc := func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse } // using the existing client, we override the check redirect policy for this test old := tc.Client.CheckRedirect tc.Client.CheckRedirect = redirectPolicyFunc defer func() { tc.Client.CheckRedirect = old }() resp, err := tc.Client.Get(requestURI) if err != nil { tc.t.Errorf("failed to call server %s", err) return nil } if expectedStatusCode != resp.StatusCode { tc.t.Errorf("requesting \"%s\" expected status code: %d but got %d", requestURI, expectedStatusCode, resp.StatusCode) } loc, err := resp.Location() if err != nil { tc.t.Errorf("requesting \"%s\" expected location: \"%s\" but got error: %s", requestURI, expectedToLocation, err) } if loc == nil && expectedToLocation != "" { tc.t.Errorf("requesting \"%s\" expected a Location header, but didn't get one", requestURI) } if loc != nil { if expectedToLocation != loc.String() { tc.t.Errorf("requesting \"%s\" expected location: \"%s\" but got \"%s\"", requestURI, expectedToLocation, loc.String()) } } return resp } // CompareAdapt adapts a config and then compares it against an expected result func CompareAdapt(t testing.TB, filename, rawConfig string, adapterName string, expectedResponse string) bool { t.Helper() cfgAdapter := caddyconfig.GetAdapter(adapterName) if cfgAdapter == nil { t.Logf("unrecognized config adapter '%s'", adapterName) return false } options := make(map[string]any) result, warnings, err := cfgAdapter.Adapt([]byte(rawConfig), options) if err != nil { t.Logf("adapting config using %s adapter: %v", adapterName, err) return false } // prettify results to keep tests human-manageable var prettyBuf bytes.Buffer err = json.Indent(&prettyBuf, result, "", "\t") if err != nil { return false } result = prettyBuf.Bytes() if len(warnings) > 0 { for _, w := range warnings { t.Logf("warning: %s:%d: %s: %s", filename, w.Line, w.Directive, w.Message) } } diff := difflib.Diff( strings.Split(expectedResponse, "\n"), strings.Split(string(result), "\n")) // scan for failure failed := false for _, d := range diff { if d.Delta != difflib.Common { failed = true break } } if failed { for _, d := range diff { switch d.Delta { case difflib.Common: fmt.Printf(" %s\n", d.Payload) case difflib.LeftOnly: fmt.Printf(" - %s\n", d.Payload) case difflib.RightOnly: fmt.Printf(" + %s\n", d.Payload) } } return false } return true } // AssertAdapt adapts a config and then tests it against an expected result func AssertAdapt(t testing.TB, rawConfig string, adapterName string, expectedResponse string) { t.Helper() ok := CompareAdapt(t, "Caddyfile", rawConfig, adapterName, expectedResponse) if !ok { t.Fail() } } // Generic request functions func applyHeaders(t testing.TB, req *http.Request, requestHeaders []string) { requestContentType := "" for _, requestHeader := range requestHeaders { arr := strings.SplitAfterN(requestHeader, ":", 2) k := strings.TrimRight(arr[0], ":") v := strings.TrimSpace(arr[1]) if k == "Content-Type" { requestContentType = v } t.Logf("Request header: %s => %s", k, v) req.Header.Set(k, v) } if requestContentType == "" { t.Logf("Content-Type header not provided") } } // AssertResponseCode will execute the request and verify the status code, returns a response for additional assertions func (tc *Tester) AssertResponseCode(req *http.Request, expectedStatusCode int) *http.Response { tc.t.Helper() resp, err := tc.Client.Do(req) //nolint:gosec // no SSRFs demonstrated if err != nil { tc.t.Fatalf("failed to call server %s", err) } if expectedStatusCode != resp.StatusCode { tc.t.Errorf("requesting \"%s\" expected status code: %d but got %d", req.URL.RequestURI(), expectedStatusCode, resp.StatusCode) } return resp } // AssertResponse request a URI and assert the status code and the body contains a string func (tc *Tester) AssertResponse(req *http.Request, expectedStatusCode int, expectedBody string) (*http.Response, string) { tc.t.Helper() resp := tc.AssertResponseCode(req, expectedStatusCode) defer resp.Body.Close() bytes, err := io.ReadAll(resp.Body) if err != nil { tc.t.Fatalf("unable to read the response body %s", err) } body := string(bytes) if body != expectedBody { tc.t.Errorf("requesting \"%s\" expected response body \"%s\" but got \"%s\"", req.RequestURI, expectedBody, body) } return resp, body } // Verb specific test functions // AssertGetResponse GET a URI and expect a statusCode and body text func (tc *Tester) AssertGetResponse(requestURI string, expectedStatusCode int, expectedBody string) (*http.Response, string) { tc.t.Helper() req, err := http.NewRequest("GET", requestURI, nil) if err != nil { tc.t.Fatalf("unable to create request %s", err) } return tc.AssertResponse(req, expectedStatusCode, expectedBody) } // AssertDeleteResponse request a URI and expect a statusCode and body text func (tc *Tester) AssertDeleteResponse(requestURI string, expectedStatusCode int, expectedBody string) (*http.Response, string) { tc.t.Helper() req, err := http.NewRequest("DELETE", requestURI, nil) if err != nil { tc.t.Fatalf("unable to create request %s", err) } return tc.AssertResponse(req, expectedStatusCode, expectedBody) } // AssertPostResponseBody POST to a URI and assert the response code and body func (tc *Tester) AssertPostResponseBody(requestURI string, requestHeaders []string, requestBody *bytes.Buffer, expectedStatusCode int, expectedBody string) (*http.Response, string) { tc.t.Helper() req, err := http.NewRequest("POST", requestURI, requestBody) if err != nil { tc.t.Errorf("failed to create request %s", err) return nil, "" } applyHeaders(tc.t, req, requestHeaders) return tc.AssertResponse(req, expectedStatusCode, expectedBody) } // AssertPutResponseBody PUT to a URI and assert the response code and body func (tc *Tester) AssertPutResponseBody(requestURI string, requestHeaders []string, requestBody *bytes.Buffer, expectedStatusCode int, expectedBody string) (*http.Response, string) { tc.t.Helper() req, err := http.NewRequest("PUT", requestURI, requestBody) if err != nil { tc.t.Errorf("failed to create request %s", err) return nil, "" } applyHeaders(tc.t, req, requestHeaders) return tc.AssertResponse(req, expectedStatusCode, expectedBody) } // AssertPatchResponseBody PATCH to a URI and assert the response code and body func (tc *Tester) AssertPatchResponseBody(requestURI string, requestHeaders []string, requestBody *bytes.Buffer, expectedStatusCode int, expectedBody string) (*http.Response, string) { tc.t.Helper() req, err := http.NewRequest("PATCH", requestURI, requestBody) if err != nil { tc.t.Errorf("failed to create request %s", err) return nil, "" } applyHeaders(tc.t, req, requestHeaders) return tc.AssertResponse(req, expectedStatusCode, expectedBody) } ================================================ FILE: caddytest/caddytest_test.go ================================================ package caddytest import ( "bytes" "net/http" "strings" "testing" ) func TestReplaceCertificatePaths(t *testing.T) { rawConfig := `a.caddy.localhost:9443 { tls /caddy.localhost.crt /caddy.localhost.key { } redir / https://b.caddy.localhost:9443/version 301 respond /version 200 { body "hello from a.caddy.localhost" } }` r := prependCaddyFilePath(rawConfig) if !strings.Contains(r, getIntegrationDir()+"/caddy.localhost.crt") { t.Error("expected the /caddy.localhost.crt to be expanded to include the full path") } if !strings.Contains(r, getIntegrationDir()+"/caddy.localhost.key") { t.Error("expected the /caddy.localhost.crt to be expanded to include the full path") } if !strings.Contains(r, "https://b.caddy.localhost:9443/version") { t.Error("expected redirect uri to be unchanged") } } func TestLoadUnorderedJSON(t *testing.T) { tester := NewTester(t) tester.InitServer(` { "logging": { "logs": { "default": { "level": "DEBUG", "writer": { "output": "stdout" } }, "sStdOutLogs": { "level": "DEBUG", "writer": { "output": "stdout" }, "include": [ "http.*", "admin.*" ] }, "sFileLogs": { "level": "DEBUG", "writer": { "output": "stdout" }, "include": [ "http.*", "admin.*" ] } } }, "admin": { "listen": "localhost:2999" }, "apps": { "pki": { "certificate_authorities" : { "local" : { "install_trust": false } } }, "http": { "http_port": 9080, "https_port": 9443, "servers": { "s_server": { "listen": [ ":9080" ], "routes": [ { "handle": [ { "handler": "static_response", "body": "Hello" } ] }, { "match": [ { "host": [ "localhost", "127.0.0.1" ] } ] } ], "logs": { "default_logger_name": "sStdOutLogs", "logger_names": { "localhost": "sStdOutLogs", "127.0.0.1": "sFileLogs" } } } } } } } `, "json") req, err := http.NewRequest(http.MethodGet, "http://localhost:9080/", nil) if err != nil { t.Fail() return } tester.AssertResponseCode(req, 200) } func TestCheckID(t *testing.T) { tester := NewTester(t) tester.InitServer(`{ "admin": { "listen": "localhost:2999" }, "apps": { "http": { "http_port": 9080, "servers": { "s_server": { "@id": "s_server", "listen": [ ":9080" ], "routes": [ { "handle": [ { "handler": "static_response", "body": "Hello" } ] } ] } } } } } `, "json") headers := []string{"Content-Type:application/json"} sServer1 := []byte(`{"@id":"s_server","listen":[":9080"],"routes":[{"@id":"route1","handle":[{"handler":"static_response","body":"Hello 2"}]}]}`) // PUT to an existing ID should fail with a 409 conflict tester.AssertPutResponseBody( "http://localhost:2999/id/s_server", headers, bytes.NewBuffer(sServer1), 409, `{"error":"[/config/apps/http/servers/s_server] key already exists: s_server"}`+"\n") // POST replaces the object fully tester.AssertPostResponseBody( "http://localhost:2999/id/s_server", headers, bytes.NewBuffer(sServer1), 200, "") // Verify the server is running the new route tester.AssertGetResponse( "http://localhost:9080/", 200, "Hello 2") // Update the existing route to ensure IDs are handled correctly when replaced tester.AssertPostResponseBody( "http://localhost:2999/id/s_server", headers, bytes.NewBuffer([]byte(`{"@id":"s_server","listen":[":9080"],"routes":[{"@id":"route1","handle":[{"handler":"static_response","body":"Hello2"}],"match":[{"path":["/route_1/*"]}]}]}`)), 200, "") sServer2 := []byte(`{"@id":"s_server","listen":[":9080"],"routes":[{"@id":"route1","handle":[{"handler":"static_response","body":"Hello2"}],"match":[{"path":["/route_1/*"]}]}]}`) // Identical patch should succeed and return 200 (config is unchanged branch) tester.AssertPatchResponseBody( "http://localhost:2999/id/s_server", headers, bytes.NewBuffer(sServer2), 200, "") route2 := []byte(`{"@id":"route2","handle": [{"handler": "static_response","body": "route2"}],"match":[{"path":["/route_2/*"]}]}`) // Put a new route2 object before the route1 object due to the path of /id/route1 // Being translated to: /config/apps/http/servers/s_server/routes/0 tester.AssertPutResponseBody( "http://localhost:2999/id/route1", headers, bytes.NewBuffer(route2), 200, "") // Verify that the whole config looks correct, now containing both route1 and route2 tester.AssertGetResponse( "http://localhost:2999/config/", 200, `{"admin":{"listen":"localhost:2999"},"apps":{"http":{"http_port":9080,"servers":{"s_server":{"@id":"s_server","listen":[":9080"],"routes":[{"@id":"route2","handle":[{"body":"route2","handler":"static_response"}],"match":[{"path":["/route_2/*"]}]},{"@id":"route1","handle":[{"body":"Hello2","handler":"static_response"}],"match":[{"path":["/route_1/*"]}]}]}}}}}`+"\n") // Try to add another copy of route2 using POST to test duplicate ID handling // Since the first route2 ended up at array index 0, and we are appending to the array, the index for the new element would be 2 tester.AssertPostResponseBody( "http://localhost:2999/id/route2", headers, bytes.NewBuffer(route2), 400, `{"error":"indexing config: duplicate ID 'route2' found at /config/apps/http/servers/s_server/routes/0 and /config/apps/http/servers/s_server/routes/2"}`+"\n") // Use PATCH to modify an existing object successfully tester.AssertPatchResponseBody( "http://localhost:2999/id/route1", headers, bytes.NewBuffer([]byte(`{"@id":"route1","handle":[{"handler":"static_response","body":"route1"}],"match":[{"path":["/route_1/*"]}]}`)), 200, "") // Verify the PATCH updated the server state tester.AssertGetResponse( "http://localhost:9080/route_1/", 200, "route1") } ================================================ FILE: caddytest/integration/acme_test.go ================================================ package integration import ( "context" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "fmt" "log/slog" "net" "net/http" "strings" "testing" "github.com/mholt/acmez/v3" "github.com/mholt/acmez/v3/acme" smallstepacme "github.com/smallstep/certificates/acme" "go.uber.org/zap" "go.uber.org/zap/exp/zapslog" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddytest" ) const acmeChallengePort = 9081 // Test the basic functionality of Caddy's ACME server func TestACMEServerWithDefaults(t *testing.T) { ctx := context.Background() logger, err := zap.NewDevelopment() if err != nil { t.Error(err) return } tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 local_certs } acme.localhost { acme_server } `, "caddyfile") client := acmez.Client{ Client: &acme.Client{ Directory: "https://acme.localhost:9443/acme/local/directory", HTTPClient: tester.Client, Logger: slog.New(zapslog.NewHandler(logger.Core(), zapslog.WithName("acmez"))), }, ChallengeSolvers: map[string]acmez.Solver{ acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger}, }, } accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { t.Errorf("generating account key: %v", err) } account := acme.Account{ Contact: []string{"mailto:you@example.com"}, TermsOfServiceAgreed: true, PrivateKey: accountPrivateKey, } account, err = client.NewAccount(ctx, account) if err != nil { t.Errorf("new account: %v", err) return } // Every certificate needs a key. certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { t.Errorf("generating certificate key: %v", err) return } certs, err := client.ObtainCertificateForSANs(ctx, account, certPrivateKey, []string{"localhost"}) if err != nil { t.Errorf("obtaining certificate: %v", err) return } // ACME servers should usually give you the entire certificate chain // in PEM format, and sometimes even alternate chains! It's up to you // which one(s) to store and use, but whatever you do, be sure to // store the certificate and key somewhere safe and secure, i.e. don't // lose them! for _, cert := range certs { t.Logf("Certificate %q:\n%s\n\n", cert.URL, cert.ChainPEM) } } func TestACMEServerWithMismatchedChallenges(t *testing.T) { ctx := context.Background() logger := caddy.Log().Named("acmez") tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 local_certs } acme.localhost { acme_server { challenges tls-alpn-01 } } `, "caddyfile") client := acmez.Client{ Client: &acme.Client{ Directory: "https://acme.localhost:9443/acme/local/directory", HTTPClient: tester.Client, Logger: slog.New(zapslog.NewHandler(logger.Core(), zapslog.WithName("acmez"))), }, ChallengeSolvers: map[string]acmez.Solver{ acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger}, }, } accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { t.Errorf("generating account key: %v", err) } account := acme.Account{ Contact: []string{"mailto:you@example.com"}, TermsOfServiceAgreed: true, PrivateKey: accountPrivateKey, } account, err = client.NewAccount(ctx, account) if err != nil { t.Errorf("new account: %v", err) return } // Every certificate needs a key. certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { t.Errorf("generating certificate key: %v", err) return } certs, err := client.ObtainCertificateForSANs(ctx, account, certPrivateKey, []string{"localhost"}) if len(certs) > 0 { t.Errorf("expected '0' certificates, but received '%d'", len(certs)) } if err == nil { t.Error("expected errors, but received none") } const expectedErrMsg = "no solvers available for remaining challenges (configured=[http-01] offered=[tls-alpn-01] remaining=[tls-alpn-01])" if !strings.Contains(err.Error(), expectedErrMsg) { t.Errorf(`received error message does not match expectation: expected="%s" received="%s"`, expectedErrMsg, err.Error()) } } // naiveHTTPSolver is a no-op acmez.Solver for example purposes only. type naiveHTTPSolver struct { srv *http.Server logger *zap.Logger } func (s *naiveHTTPSolver) Present(ctx context.Context, challenge acme.Challenge) error { smallstepacme.InsecurePortHTTP01 = acmeChallengePort s.srv = &http.Server{ Addr: fmt.Sprintf(":%d", acmeChallengePort), Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { host, _, err := net.SplitHostPort(r.Host) if err != nil { host = r.Host } s.logger.Info("received request on challenge server", zap.String("path", r.URL.Path)) if r.Method == "GET" && r.URL.Path == challenge.HTTP01ResourcePath() && strings.EqualFold(host, challenge.Identifier.Value) { w.Header().Add("Content-Type", "text/plain") w.Write([]byte(challenge.KeyAuthorization)) r.Close = true s.logger.Info("served key authentication", zap.String("identifier", challenge.Identifier.Value), zap.String("challenge", "http-01"), zap.String("remote", r.RemoteAddr), ) } }), } l, err := net.Listen("tcp", fmt.Sprintf(":%d", acmeChallengePort)) if err != nil { return err } s.logger.Info("present challenge", zap.Any("challenge", challenge)) go s.srv.Serve(l) return nil } func (s naiveHTTPSolver) CleanUp(ctx context.Context, challenge acme.Challenge) error { smallstepacme.InsecurePortHTTP01 = 0 s.logger.Info("cleanup", zap.Any("challenge", challenge)) if s.srv != nil { s.srv.Close() } return nil } ================================================ FILE: caddytest/integration/acmeserver_test.go ================================================ package integration import ( "context" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "log/slog" "strings" "testing" "github.com/mholt/acmez/v3" "github.com/mholt/acmez/v3/acme" "go.uber.org/zap" "go.uber.org/zap/exp/zapslog" "github.com/caddyserver/caddy/v2/caddytest" ) func TestACMEServerDirectory(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust local_certs admin localhost:2999 http_port 9080 https_port 9443 pki { ca local { name "Caddy Local Authority" } } } acme.localhost:9443 { acme_server } `, "caddyfile") tester.AssertGetResponse( "https://acme.localhost:9443/acme/local/directory", 200, `{"newNonce":"https://acme.localhost:9443/acme/local/new-nonce","newAccount":"https://acme.localhost:9443/acme/local/new-account","newOrder":"https://acme.localhost:9443/acme/local/new-order","revokeCert":"https://acme.localhost:9443/acme/local/revoke-cert","keyChange":"https://acme.localhost:9443/acme/local/key-change"} `) } func TestACMEServerAllowPolicy(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust local_certs admin localhost:2999 http_port 9080 https_port 9443 pki { ca local { name "Caddy Local Authority" } } } acme.localhost { acme_server { challenges http-01 allow { domains localhost } } } `, "caddyfile") ctx := context.Background() logger, err := zap.NewDevelopment() if err != nil { t.Error(err) return } client := acmez.Client{ Client: &acme.Client{ Directory: "https://acme.localhost:9443/acme/local/directory", HTTPClient: tester.Client, Logger: slog.New(zapslog.NewHandler(logger.Core())), }, ChallengeSolvers: map[string]acmez.Solver{ acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger}, }, } accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { t.Errorf("generating account key: %v", err) } account := acme.Account{ Contact: []string{"mailto:you@example.com"}, TermsOfServiceAgreed: true, PrivateKey: accountPrivateKey, } account, err = client.NewAccount(ctx, account) if err != nil { t.Errorf("new account: %v", err) return } // Every certificate needs a key. certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { t.Errorf("generating certificate key: %v", err) return } { certs, err := client.ObtainCertificateForSANs(ctx, account, certPrivateKey, []string{"localhost"}) if err != nil { t.Errorf("obtaining certificate for allowed domain: %v", err) return } // ACME servers should usually give you the entire certificate chain // in PEM format, and sometimes even alternate chains! It's up to you // which one(s) to store and use, but whatever you do, be sure to // store the certificate and key somewhere safe and secure, i.e. don't // lose them! for _, cert := range certs { t.Logf("Certificate %q:\n%s\n\n", cert.URL, cert.ChainPEM) } } { _, err := client.ObtainCertificateForSANs(ctx, account, certPrivateKey, []string{"not-matching.localhost"}) if err == nil { t.Errorf("obtaining certificate for 'not-matching.localhost' domain") } else if !strings.Contains(err.Error(), "urn:ietf:params:acme:error:rejectedIdentifier") { t.Logf("unexpected error: %v", err) } } } func TestACMEServerDenyPolicy(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust local_certs admin localhost:2999 http_port 9080 https_port 9443 pki { ca local { name "Caddy Local Authority" } } } acme.localhost { acme_server { deny { domains deny.localhost } } } `, "caddyfile") ctx := context.Background() logger, err := zap.NewDevelopment() if err != nil { t.Error(err) return } client := acmez.Client{ Client: &acme.Client{ Directory: "https://acme.localhost:9443/acme/local/directory", HTTPClient: tester.Client, Logger: slog.New(zapslog.NewHandler(logger.Core())), }, ChallengeSolvers: map[string]acmez.Solver{ acme.ChallengeTypeHTTP01: &naiveHTTPSolver{logger: logger}, }, } accountPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { t.Errorf("generating account key: %v", err) } account := acme.Account{ Contact: []string{"mailto:you@example.com"}, TermsOfServiceAgreed: true, PrivateKey: accountPrivateKey, } account, err = client.NewAccount(ctx, account) if err != nil { t.Errorf("new account: %v", err) return } // Every certificate needs a key. certPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { t.Errorf("generating certificate key: %v", err) return } { _, err := client.ObtainCertificateForSANs(ctx, account, certPrivateKey, []string{"deny.localhost"}) if err == nil { t.Errorf("obtaining certificate for 'deny.localhost' domain") } else if !strings.Contains(err.Error(), "urn:ietf:params:acme:error:rejectedIdentifier") { t.Logf("unexpected error: %v", err) } } } ================================================ FILE: caddytest/integration/autohttps_test.go ================================================ package integration import ( "net/http" "testing" "github.com/caddyserver/caddy/v2/caddytest" ) func TestAutoHTTPtoHTTPSRedirectsImplicitPort(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 skip_install_trust http_port 9080 https_port 9443 } localhost respond "Yahaha! You found me!" `, "caddyfile") tester.AssertRedirect("http://localhost:9080/", "https://localhost/", http.StatusPermanentRedirect) } func TestAutoHTTPtoHTTPSRedirectsExplicitPortSameAsHTTPSPort(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 } localhost:9443 respond "Yahaha! You found me!" `, "caddyfile") tester.AssertRedirect("http://localhost:9080/", "https://localhost/", http.StatusPermanentRedirect) } func TestAutoHTTPtoHTTPSRedirectsExplicitPortDifferentFromHTTPSPort(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 } localhost:1234 respond "Yahaha! You found me!" `, "caddyfile") tester.AssertRedirect("http://localhost:9080/", "https://localhost:1234/", http.StatusPermanentRedirect) } func TestAutoHTTPRedirectsWithHTTPListenerFirstInAddresses(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { "admin": { "listen": "localhost:2999" }, "apps": { "http": { "http_port": 9080, "https_port": 9443, "servers": { "ingress_server": { "listen": [ ":9080", ":9443" ], "routes": [ { "match": [ { "host": ["localhost"] } ] } ] } } }, "pki": { "certificate_authorities": { "local": { "install_trust": false } } } } } `, "json") tester.AssertRedirect("http://localhost:9080/", "https://localhost/", http.StatusPermanentRedirect) } func TestAutoHTTPRedirectsInsertedBeforeUserDefinedCatchAll(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 local_certs } http://:9080 { respond "Foo" } http://baz.localhost:9080 { respond "Baz" } bar.localhost { respond "Bar" } `, "caddyfile") tester.AssertRedirect("http://bar.localhost:9080/", "https://bar.localhost/", http.StatusPermanentRedirect) tester.AssertGetResponse("http://foo.localhost:9080/", 200, "Foo") tester.AssertGetResponse("http://baz.localhost:9080/", 200, "Baz") } func TestAutoHTTPRedirectsInsertedBeforeUserDefinedCatchAllWithNoExplicitHTTPSite(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 local_certs } http://:9080 { respond "Foo" } bar.localhost { respond "Bar" } `, "caddyfile") tester.AssertRedirect("http://bar.localhost:9080/", "https://bar.localhost/", http.StatusPermanentRedirect) tester.AssertGetResponse("http://foo.localhost:9080/", 200, "Foo") tester.AssertGetResponse("http://baz.localhost:9080/", 200, "Foo") } func TestAutoHTTPSRedirectSortingExactMatchOverWildcard(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 local_certs } *.localhost:10443 { respond "Wildcard" } dev.localhost { respond "Exact" } `, "caddyfile") tester.AssertRedirect("http://dev.localhost:9080/", "https://dev.localhost/", http.StatusPermanentRedirect) tester.AssertRedirect("http://foo.localhost:9080/", "https://foo.localhost:10443/", http.StatusPermanentRedirect) } ================================================ FILE: caddytest/integration/caddyfile_adapt/acme_dns_configured.caddyfiletest ================================================ { acme_dns mock foo } example.com { respond "Hello World" } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Hello World", "handler": "static_response" } ] } ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "issuers": [ { "challenges": { "dns": { "provider": { "argument": "foo", "name": "mock" } } }, "module": "acme" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/acme_dns_naked_use_dns_defaults.caddyfiletest ================================================ { dns mock acme_dns } example.com { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "issuers": [ { "challenges": { "dns": {} }, "module": "acme" } ] } ] }, "dns": { "name": "mock" } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/acme_dns_naked_without_dns.caddyfiletest ================================================ { acme_dns } example.com { respond "Hello World" } ---------- acme_dns specified without DNS provider config, but no provider specified with 'dns' global option ================================================ FILE: caddytest/integration/caddyfile_adapt/acme_server_custom_challenges.caddyfiletest ================================================ { pki { ca custom-ca { name "Custom CA" } } } acme.example.com { acme_server { ca custom-ca challenges dns-01 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "acme.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "ca": "custom-ca", "challenges": [ "dns-01" ], "handler": "acme_server" } ] } ] } ], "terminal": true } ] } } }, "pki": { "certificate_authorities": { "custom-ca": { "name": "Custom CA" } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/acme_server_default_challenges.caddyfiletest ================================================ { pki { ca custom-ca { name "Custom CA" } } } acme.example.com { acme_server { ca custom-ca challenges } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "acme.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "ca": "custom-ca", "handler": "acme_server" } ] } ] } ], "terminal": true } ] } } }, "pki": { "certificate_authorities": { "custom-ca": { "name": "Custom CA" } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/acme_server_lifetime.caddyfiletest ================================================ { pki { ca internal { name "Internal" root_cn "Internal Root Cert" intermediate_cn "Internal Intermediate Cert" } ca internal-long-lived { name "Long-lived" root_cn "Internal Root Cert 2" intermediate_cn "Internal Intermediate Cert 2" } } } acme-internal.example.com { acme_server { ca internal } } acme-long-lived.example.com { acme_server { ca internal-long-lived lifetime 7d } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "acme-long-lived.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "ca": "internal-long-lived", "handler": "acme_server", "lifetime": 604800000000000 } ] } ] } ], "terminal": true }, { "match": [ { "host": [ "acme-internal.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "ca": "internal", "handler": "acme_server" } ] } ] } ], "terminal": true } ] } } }, "pki": { "certificate_authorities": { "internal": { "name": "Internal", "root_common_name": "Internal Root Cert", "intermediate_common_name": "Internal Intermediate Cert" }, "internal-long-lived": { "name": "Long-lived", "root_common_name": "Internal Root Cert 2", "intermediate_common_name": "Internal Intermediate Cert 2" } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/acme_server_multi_custom_challenges.caddyfiletest ================================================ { pki { ca custom-ca { name "Custom CA" } } } acme.example.com { acme_server { ca custom-ca challenges dns-01 http-01 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "acme.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "ca": "custom-ca", "challenges": [ "dns-01", "http-01" ], "handler": "acme_server" } ] } ] } ], "terminal": true } ] } } }, "pki": { "certificate_authorities": { "custom-ca": { "name": "Custom CA" } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/acme_server_policy-allow.caddyfiletest ================================================ { pki { ca custom-ca { name "Custom CA" } } } acme.example.com { acme_server { ca custom-ca allow { domains host-1.internal.example.com host-2.internal.example.com } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "acme.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "ca": "custom-ca", "handler": "acme_server", "policy": { "allow": { "domains": [ "host-1.internal.example.com", "host-2.internal.example.com" ] } } } ] } ] } ], "terminal": true } ] } } }, "pki": { "certificate_authorities": { "custom-ca": { "name": "Custom CA" } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/acme_server_policy-both.caddyfiletest ================================================ { pki { ca custom-ca { name "Custom CA" } } } acme.example.com { acme_server { ca custom-ca allow { domains host-1.internal.example.com host-2.internal.example.com } deny { domains dc.internal.example.com } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "acme.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "ca": "custom-ca", "handler": "acme_server", "policy": { "allow": { "domains": [ "host-1.internal.example.com", "host-2.internal.example.com" ] }, "deny": { "domains": [ "dc.internal.example.com" ] } } } ] } ] } ], "terminal": true } ] } } }, "pki": { "certificate_authorities": { "custom-ca": { "name": "Custom CA" } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/acme_server_policy-deny.caddyfiletest ================================================ { pki { ca custom-ca { name "Custom CA" } } } acme.example.com { acme_server { ca custom-ca deny { domains dc.internal.example.com } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "acme.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "ca": "custom-ca", "handler": "acme_server", "policy": { "deny": { "domains": [ "dc.internal.example.com" ] } } } ] } ] } ], "terminal": true } ] } } }, "pki": { "certificate_authorities": { "custom-ca": { "name": "Custom CA" } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/acme_server_sign_with_root.caddyfiletest ================================================ { pki { ca internal { name "Internal" root_cn "Internal Root Cert" intermediate_cn "Internal Intermediate Cert" } } } acme.example.com { acme_server { ca internal sign_with_root } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "acme.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "ca": "internal", "handler": "acme_server", "sign_with_root": true } ] } ] } ], "terminal": true } ] } } }, "pki": { "certificate_authorities": { "internal": { "name": "Internal", "root_common_name": "Internal Root Cert", "intermediate_common_name": "Internal Intermediate Cert" } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/ambiguous_site_definition.caddyfiletest ================================================ example.com handle { respond "one" } example.com handle { respond "two" } ---------- Caddyfile:6: unrecognized directive: example.com Did you mean to define a second site? If so, you must use curly braces around each site to separate their configurations. ================================================ FILE: caddytest/integration/caddyfile_adapt/ambiguous_site_definition_duplicate_key.caddyfiletest ================================================ :8080 { respond "one" } :8080 { respond "two" } ---------- ambiguous site definition: :8080 ================================================ FILE: caddytest/integration/caddyfile_adapt/auto_https_disable_redirects.caddyfiletest ================================================ { auto_https disable_redirects } localhost ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ], "automatic_https": { "disable_redirects": true } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/auto_https_ignore_loaded_certs.caddyfiletest ================================================ { auto_https ignore_loaded_certs } localhost ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ], "automatic_https": { "ignore_loaded_certificates": true } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/auto_https_off.caddyfiletest ================================================ { auto_https off } localhost ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ], "tls_connection_policies": [ {} ], "automatic_https": { "disable": true } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/bind_fd_fdgram_h123.caddyfiletest ================================================ { auto_https disable_redirects admin off } http://localhost { bind fd/{env.CADDY_HTTP_FD} { protocols h1 } log respond "Hello, HTTP!" } https://localhost { bind fd/{env.CADDY_HTTPS_FD} { protocols h1 h2 } bind fdgram/{env.CADDY_HTTP3_FD} { protocols h3 } log respond "Hello, HTTPS!" } ---------- { "admin": { "disabled": true }, "apps": { "http": { "servers": { "srv0": { "listen": [ "fd/{env.CADDY_HTTPS_FD}", "fdgram/{env.CADDY_HTTP3_FD}" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Hello, HTTPS!", "handler": "static_response" } ] } ] } ], "terminal": true } ], "automatic_https": { "disable_redirects": true }, "logs": { "logger_names": { "localhost": [ "" ] } }, "listen_protocols": [ [ "h1", "h2" ], [ "h3" ] ] }, "srv1": { "automatic_https": { "disable_redirects": true } }, "srv2": { "listen": [ "fd/{env.CADDY_HTTP_FD}" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Hello, HTTP!", "handler": "static_response" } ] } ] } ], "terminal": true } ], "automatic_https": { "disable_redirects": true, "skip": [ "localhost" ] }, "logs": { "logger_names": { "localhost": [ "" ] } }, "listen_protocols": [ [ "h1" ] ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/bind_ipv6.caddyfiletest ================================================ example.com { bind tcp6/[::] } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ "tcp6/[::]:443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/directive_as_site_address.caddyfiletest ================================================ handle respond "should not work" ---------- Caddyfile:1: parsed 'handle' as a site address, but it is a known directive; directives must appear in a site block ================================================ FILE: caddytest/integration/caddyfile_adapt/duplicate_listener_address_global.caddyfiletest ================================================ { servers { srv0 { listen :8080 } srv1 { listen :8080 } } } ---------- parsing caddyfile tokens for 'servers': unrecognized servers option 'srv0', at Caddyfile:3 ================================================ FILE: caddytest/integration/caddyfile_adapt/enable_tls_for_catch_all_site.caddyfiletest ================================================ :8443 { tls internal { on_demand } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8443" ], "tls_connection_policies": [ {} ] } } }, "tls": { "automation": { "policies": [ { "issuers": [ { "module": "internal" } ], "on_demand": true } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/encode_options.caddyfiletest ================================================ :80 # All the options encode gzip zstd { minimum_length 256 match { status 2xx 4xx 500 header Content-Type text/* header Content-Type application/json* header Content-Type application/javascript* header Content-Type application/xhtml+xml* header Content-Type application/atom+xml* header Content-Type application/rss+xml* header Content-Type application/wasm* header Content-Type image/svg+xml* } } # Long way with a block for each encoding encode { zstd gzip 5 } encode ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "handle": [ { "encodings": { "gzip": {}, "zstd": {} }, "handler": "encode", "match": { "headers": { "Content-Type": [ "text/*", "application/json*", "application/javascript*", "application/xhtml+xml*", "application/atom+xml*", "application/rss+xml*", "application/wasm*", "image/svg+xml*" ] }, "status_code": [ 2, 4, 500 ] }, "minimum_length": 256, "prefer": [ "gzip", "zstd" ] }, { "encodings": { "gzip": { "level": 5 }, "zstd": {} }, "handler": "encode", "prefer": [ "zstd", "gzip" ] }, { "encodings": { "gzip": {}, "zstd": {} }, "handler": "encode", "prefer": [ "zstd", "gzip" ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/error_example.caddyfiletest ================================================ example.com { root * /srv # Trigger errors for certain paths error /private* "Unauthorized" 403 error /hidden* "Not found" 404 # Handle the error by serving an HTML page handle_errors { rewrite * /{http.error.status_code}.html file_server } file_server } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "vars", "root": "/srv" } ] }, { "handle": [ { "error": "Unauthorized", "handler": "error", "status_code": 403 } ], "match": [ { "path": [ "/private*" ] } ] }, { "handle": [ { "error": "Not found", "handler": "error", "status_code": 404 } ], "match": [ { "path": [ "/hidden*" ] } ] }, { "handle": [ { "handler": "file_server", "hide": [ "./Caddyfile" ] } ] } ] } ], "terminal": true } ], "errors": { "routes": [ { "match": [ { "host": [ "example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "group": "group0", "handle": [ { "handler": "rewrite", "uri": "/{http.error.status_code}.html" } ] }, { "handle": [ { "handler": "file_server", "hide": [ "./Caddyfile" ] } ] } ] } ] } ] } ], "terminal": true } ] } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/error_multi_site_blocks.caddyfiletest ================================================ foo.localhost { root * /srv error /private* "Unauthorized" 410 error /fivehundred* "Internal Server Error" 500 handle_errors 5xx { respond "Error In range [500 .. 599]" } handle_errors 410 { respond "404 or 410 error" } } bar.localhost { root * /srv error /private* "Unauthorized" 410 error /fivehundred* "Internal Server Error" 500 handle_errors 5xx { respond "Error In range [500 .. 599] from second site" } handle_errors 410 { respond "404 or 410 error from second site" } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "foo.localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "vars", "root": "/srv" } ] }, { "handle": [ { "error": "Internal Server Error", "handler": "error", "status_code": 500 } ], "match": [ { "path": [ "/fivehundred*" ] } ] }, { "handle": [ { "error": "Unauthorized", "handler": "error", "status_code": 410 } ], "match": [ { "path": [ "/private*" ] } ] } ] } ], "terminal": true }, { "match": [ { "host": [ "bar.localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "vars", "root": "/srv" } ] }, { "handle": [ { "error": "Internal Server Error", "handler": "error", "status_code": 500 } ], "match": [ { "path": [ "/fivehundred*" ] } ] }, { "handle": [ { "error": "Unauthorized", "handler": "error", "status_code": 410 } ], "match": [ { "path": [ "/private*" ] } ] } ] } ], "terminal": true } ], "errors": { "routes": [ { "match": [ { "host": [ "foo.localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "404 or 410 error", "handler": "static_response" } ] } ] } ], "match": [ { "expression": "{http.error.status_code} in [410]" } ] }, { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Error In range [500 .. 599]", "handler": "static_response" } ] } ] } ], "match": [ { "expression": "{http.error.status_code} \u003e= 500 \u0026\u0026 {http.error.status_code} \u003c= 599" } ] } ] } ], "terminal": true }, { "match": [ { "host": [ "bar.localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "404 or 410 error from second site", "handler": "static_response" } ] } ] } ], "match": [ { "expression": "{http.error.status_code} in [410]" } ] }, { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Error In range [500 .. 599] from second site", "handler": "static_response" } ] } ] } ], "match": [ { "expression": "{http.error.status_code} \u003e= 500 \u0026\u0026 {http.error.status_code} \u003c= 599" } ] } ] } ], "terminal": true } ] } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/error_range_codes.caddyfiletest ================================================ { http_port 3010 } localhost:3010 { root * /srv error /private* "Unauthorized" 410 error /hidden* "Not found" 404 handle_errors 4xx { respond "Error in the [400 .. 499] range" } } ---------- { "apps": { "http": { "http_port": 3010, "servers": { "srv0": { "listen": [ ":3010" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "vars", "root": "/srv" } ] }, { "handle": [ { "error": "Unauthorized", "handler": "error", "status_code": 410 } ], "match": [ { "path": [ "/private*" ] } ] }, { "handle": [ { "error": "Not found", "handler": "error", "status_code": 404 } ], "match": [ { "path": [ "/hidden*" ] } ] } ] } ], "terminal": true } ], "errors": { "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Error in the [400 .. 499] range", "handler": "static_response" } ] } ] } ], "match": [ { "expression": "{http.error.status_code} \u003e= 400 \u0026\u0026 {http.error.status_code} \u003c= 499" } ] } ] } ], "terminal": true } ] } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/error_range_simple_codes.caddyfiletest ================================================ { http_port 2099 } localhost:2099 { root * /srv error /private* "Unauthorized" 410 error /threehundred* "Moved Permanently" 301 error /internalerr* "Internal Server Error" 500 handle_errors 500 3xx { respond "Error code is equal to 500 or in the [300..399] range" } handle_errors 4xx { respond "Error in the [400 .. 499] range" } } ---------- { "apps": { "http": { "http_port": 2099, "servers": { "srv0": { "listen": [ ":2099" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "vars", "root": "/srv" } ] }, { "handle": [ { "error": "Moved Permanently", "handler": "error", "status_code": 301 } ], "match": [ { "path": [ "/threehundred*" ] } ] }, { "handle": [ { "error": "Internal Server Error", "handler": "error", "status_code": 500 } ], "match": [ { "path": [ "/internalerr*" ] } ] }, { "handle": [ { "error": "Unauthorized", "handler": "error", "status_code": 410 } ], "match": [ { "path": [ "/private*" ] } ] } ] } ], "terminal": true } ], "errors": { "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Error in the [400 .. 499] range", "handler": "static_response" } ] } ] } ], "match": [ { "expression": "{http.error.status_code} \u003e= 400 \u0026\u0026 {http.error.status_code} \u003c= 499" } ] }, { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Error code is equal to 500 or in the [300..399] range", "handler": "static_response" } ] } ] } ], "match": [ { "expression": "{http.error.status_code} \u003e= 300 \u0026\u0026 {http.error.status_code} \u003c= 399 || {http.error.status_code} in [500]" } ] } ] } ], "terminal": true } ] } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/error_simple_codes.caddyfiletest ================================================ { http_port 3010 } localhost:3010 { root * /srv error /private* "Unauthorized" 410 error /hidden* "Not found" 404 handle_errors 404 410 { respond "404 or 410 error" } } ---------- { "apps": { "http": { "http_port": 3010, "servers": { "srv0": { "listen": [ ":3010" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "vars", "root": "/srv" } ] }, { "handle": [ { "error": "Unauthorized", "handler": "error", "status_code": 410 } ], "match": [ { "path": [ "/private*" ] } ] }, { "handle": [ { "error": "Not found", "handler": "error", "status_code": 404 } ], "match": [ { "path": [ "/hidden*" ] } ] } ] } ], "terminal": true } ], "errors": { "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "404 or 410 error", "handler": "static_response" } ] } ] } ], "match": [ { "expression": "{http.error.status_code} in [404, 410]" } ] } ] } ], "terminal": true } ] } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/error_sort.caddyfiletest ================================================ { http_port 2099 } localhost:2099 { root * /srv error /private* "Unauthorized" 410 error /hidden* "Not found" 404 error /internalerr* "Internal Server Error" 500 handle_errors { respond "Fallback route: code outside the [400..499] range" } handle_errors 4xx { respond "Error in the [400 .. 499] range" } } ---------- { "apps": { "http": { "http_port": 2099, "servers": { "srv0": { "listen": [ ":2099" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "vars", "root": "/srv" } ] }, { "handle": [ { "error": "Internal Server Error", "handler": "error", "status_code": 500 } ], "match": [ { "path": [ "/internalerr*" ] } ] }, { "handle": [ { "error": "Unauthorized", "handler": "error", "status_code": 410 } ], "match": [ { "path": [ "/private*" ] } ] }, { "handle": [ { "error": "Not found", "handler": "error", "status_code": 404 } ], "match": [ { "path": [ "/hidden*" ] } ] } ] } ], "terminal": true } ], "errors": { "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Error in the [400 .. 499] range", "handler": "static_response" } ] } ] } ], "match": [ { "expression": "{http.error.status_code} \u003e= 400 \u0026\u0026 {http.error.status_code} \u003c= 499" } ] }, { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Fallback route: code outside the [400..499] range", "handler": "static_response" } ] } ] } ] } ] } ], "terminal": true } ] } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/error_subhandlers.caddyfiletest ================================================ { http_port 2099 } localhost:2099 { root * /var/www/ file_server handle_errors 404 { handle /en/* { respond "not found" 404 } handle /es/* { respond "no encontrado" } handle { respond "default not found" } } handle_errors { handle /en/* { respond "English error" } handle /es/* { respond "Spanish error" } handle { respond "Default error" } } } ---------- { "apps": { "http": { "http_port": 2099, "servers": { "srv0": { "listen": [ ":2099" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "vars", "root": "/var/www/" }, { "handler": "file_server", "hide": [ "./Caddyfile" ] } ] } ] } ], "terminal": true } ], "errors": { "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "group": "group3", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "not found", "handler": "static_response", "status_code": 404 } ] } ] } ], "match": [ { "path": [ "/en/*" ] } ] }, { "group": "group3", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "no encontrado", "handler": "static_response" } ] } ] } ], "match": [ { "path": [ "/es/*" ] } ] }, { "group": "group3", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "default not found", "handler": "static_response" } ] } ] } ] } ] } ], "match": [ { "expression": "{http.error.status_code} in [404]" } ] }, { "handle": [ { "handler": "subroute", "routes": [ { "group": "group8", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "English error", "handler": "static_response" } ] } ] } ], "match": [ { "path": [ "/en/*" ] } ] }, { "group": "group8", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Spanish error", "handler": "static_response" } ] } ] } ], "match": [ { "path": [ "/es/*" ] } ] }, { "group": "group8", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Default error", "handler": "static_response" } ] } ] } ] } ] } ] } ] } ], "terminal": true } ] } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/expression_quotes.caddyfiletest ================================================ (snippet) { @g `{http.error.status_code} == 404` } example.com @a expression {http.error.status_code} == 400 abort @a @b expression {http.error.status_code} == "401" abort @b @c expression {http.error.status_code} == `402` abort @c @d expression "{http.error.status_code} == 403" abort @d @e expression `{http.error.status_code} == 404` abort @e @f `{http.error.status_code} == 404` abort @f import snippet abort @g ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "abort": true, "handler": "static_response" } ], "match": [ { "expression": "{http.error.status_code} == 400" } ] }, { "handle": [ { "abort": true, "handler": "static_response" } ], "match": [ { "expression": "{http.error.status_code} == \"401\"" } ] }, { "handle": [ { "abort": true, "handler": "static_response" } ], "match": [ { "expression": "{http.error.status_code} == `402`" } ] }, { "handle": [ { "abort": true, "handler": "static_response" } ], "match": [ { "expression": { "expr": "{http.error.status_code} == 403", "name": "d" } } ] }, { "handle": [ { "abort": true, "handler": "static_response" } ], "match": [ { "expression": { "expr": "{http.error.status_code} == 404", "name": "e" } } ] }, { "handle": [ { "abort": true, "handler": "static_response" } ], "match": [ { "expression": { "expr": "{http.error.status_code} == 404", "name": "f" } } ] }, { "handle": [ { "abort": true, "handler": "static_response" } ], "match": [ { "expression": { "expr": "{http.error.status_code} == 404", "name": "g" } } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/file_server_disable_canonical_uris.caddyfiletest ================================================ :80 file_server { disable_canonical_uris } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "handle": [ { "canonical_uris": false, "handler": "file_server", "hide": [ "./Caddyfile" ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/file_server_etag_file_extensions.caddyfiletest ================================================ :8080 { root * ./ file_server { etag_file_extensions .b3sum .sha256 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8080" ], "routes": [ { "handle": [ { "handler": "vars", "root": "./" }, { "etag_file_extensions": [ ".b3sum", ".sha256" ], "handler": "file_server", "hide": [ "./Caddyfile" ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/file_server_file_limit.caddyfiletest ================================================ :80 file_server { browse { file_limit 4000 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "handle": [ { "browse": { "file_limit": 4000 }, "handler": "file_server", "hide": [ "./Caddyfile" ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/file_server_pass_thru.caddyfiletest ================================================ :80 file_server { pass_thru } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "handle": [ { "handler": "file_server", "hide": [ "./Caddyfile" ], "pass_thru": true } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/file_server_precompressed.caddyfiletest ================================================ :80 file_server { precompressed zstd br gzip } file_server { precompressed } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "handle": [ { "handler": "file_server", "hide": [ "./Caddyfile" ], "precompressed": { "br": {}, "gzip": {}, "zstd": {} }, "precompressed_order": [ "zstd", "br", "gzip" ] }, { "handler": "file_server", "hide": [ "./Caddyfile" ], "precompressed": { "br": {}, "gzip": {}, "zstd": {} }, "precompressed_order": [ "br", "zstd", "gzip" ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/file_server_sort.caddyfiletest ================================================ :80 file_server { browse { sort size desc } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "handle": [ { "browse": { "sort": [ "size", "desc" ] }, "handler": "file_server", "hide": [ "./Caddyfile" ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/file_server_status.caddyfiletest ================================================ localhost root * /srv handle /nope* { file_server { status 403 } } handle /custom-status* { file_server { status {env.CUSTOM_STATUS} } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "vars", "root": "/srv" } ] }, { "group": "group2", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "file_server", "hide": [ "./Caddyfile" ], "status_code": "{env.CUSTOM_STATUS}" } ] } ] } ], "match": [ { "path": [ "/custom-status*" ] } ] }, { "group": "group2", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "file_server", "hide": [ "./Caddyfile" ], "status_code": 403 } ] } ] } ], "match": [ { "path": [ "/nope*" ] } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/forward_auth_authelia.caddyfiletest ================================================ app.example.com { forward_auth authelia:9091 { uri /api/authz/forward-auth copy_headers Remote-User Remote-Groups Remote-Name Remote-Email } reverse_proxy backend:8080 } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "app.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handle_response": [ { "match": { "status_code": [ 2 ] }, "routes": [ { "handle": [ { "handler": "vars" } ] }, { "handle": [ { "handler": "headers", "request": { "delete": [ "Remote-Email" ] } } ] }, { "handle": [ { "handler": "headers", "request": { "set": { "Remote-Email": [ "{http.reverse_proxy.header.Remote-Email}" ] } } } ], "match": [ { "not": [ { "vars": { "{http.reverse_proxy.header.Remote-Email}": [ "" ] } } ] } ] }, { "handle": [ { "handler": "headers", "request": { "delete": [ "Remote-Groups" ] } } ] }, { "handle": [ { "handler": "headers", "request": { "set": { "Remote-Groups": [ "{http.reverse_proxy.header.Remote-Groups}" ] } } } ], "match": [ { "not": [ { "vars": { "{http.reverse_proxy.header.Remote-Groups}": [ "" ] } } ] } ] }, { "handle": [ { "handler": "headers", "request": { "delete": [ "Remote-Name" ] } } ] }, { "handle": [ { "handler": "headers", "request": { "set": { "Remote-Name": [ "{http.reverse_proxy.header.Remote-Name}" ] } } } ], "match": [ { "not": [ { "vars": { "{http.reverse_proxy.header.Remote-Name}": [ "" ] } } ] } ] }, { "handle": [ { "handler": "headers", "request": { "delete": [ "Remote-User" ] } } ] }, { "handle": [ { "handler": "headers", "request": { "set": { "Remote-User": [ "{http.reverse_proxy.header.Remote-User}" ] } } } ], "match": [ { "not": [ { "vars": { "{http.reverse_proxy.header.Remote-User}": [ "" ] } } ] } ] } ] } ], "handler": "reverse_proxy", "headers": { "request": { "set": { "X-Forwarded-Method": [ "{http.request.method}" ], "X-Forwarded-Uri": [ "{http.request.uri}" ] } } }, "rewrite": { "method": "GET", "uri": "/api/authz/forward-auth" }, "upstreams": [ { "dial": "authelia:9091" } ] }, { "handler": "reverse_proxy", "upstreams": [ { "dial": "backend:8080" } ] } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/forward_auth_copy_headers_strip.caddyfiletest ================================================ :8080 forward_auth 127.0.0.1:9091 { uri / copy_headers X-User-Id X-User-Role } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8080" ], "routes": [ { "handle": [ { "handle_response": [ { "match": { "status_code": [ 2 ] }, "routes": [ { "handle": [ { "handler": "vars" } ] }, { "handle": [ { "handler": "headers", "request": { "delete": [ "X-User-Id" ] } } ] }, { "handle": [ { "handler": "headers", "request": { "set": { "X-User-Id": [ "{http.reverse_proxy.header.X-User-Id}" ] } } } ], "match": [ { "not": [ { "vars": { "{http.reverse_proxy.header.X-User-Id}": [ "" ] } } ] } ] }, { "handle": [ { "handler": "headers", "request": { "delete": [ "X-User-Role" ] } } ] }, { "handle": [ { "handler": "headers", "request": { "set": { "X-User-Role": [ "{http.reverse_proxy.header.X-User-Role}" ] } } } ], "match": [ { "not": [ { "vars": { "{http.reverse_proxy.header.X-User-Role}": [ "" ] } } ] } ] } ] } ], "handler": "reverse_proxy", "headers": { "request": { "set": { "X-Forwarded-Method": [ "{http.request.method}" ], "X-Forwarded-Uri": [ "{http.request.uri}" ] } } }, "rewrite": { "method": "GET", "uri": "/" }, "upstreams": [ { "dial": "127.0.0.1:9091" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/forward_auth_rename_headers.caddyfiletest ================================================ :8881 forward_auth localhost:9000 { uri /auth copy_headers A>1 B C>3 { D E>5 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8881" ], "routes": [ { "handle": [ { "handle_response": [ { "match": { "status_code": [ 2 ] }, "routes": [ { "handle": [ { "handler": "vars" } ] }, { "handle": [ { "handler": "headers", "request": { "delete": [ "1" ] } } ] }, { "handle": [ { "handler": "headers", "request": { "set": { "1": [ "{http.reverse_proxy.header.A}" ] } } } ], "match": [ { "not": [ { "vars": { "{http.reverse_proxy.header.A}": [ "" ] } } ] } ] }, { "handle": [ { "handler": "headers", "request": { "delete": [ "B" ] } } ] }, { "handle": [ { "handler": "headers", "request": { "set": { "B": [ "{http.reverse_proxy.header.B}" ] } } } ], "match": [ { "not": [ { "vars": { "{http.reverse_proxy.header.B}": [ "" ] } } ] } ] }, { "handle": [ { "handler": "headers", "request": { "delete": [ "3" ] } } ] }, { "handle": [ { "handler": "headers", "request": { "set": { "3": [ "{http.reverse_proxy.header.C}" ] } } } ], "match": [ { "not": [ { "vars": { "{http.reverse_proxy.header.C}": [ "" ] } } ] } ] }, { "handle": [ { "handler": "headers", "request": { "delete": [ "D" ] } } ] }, { "handle": [ { "handler": "headers", "request": { "set": { "D": [ "{http.reverse_proxy.header.D}" ] } } } ], "match": [ { "not": [ { "vars": { "{http.reverse_proxy.header.D}": [ "" ] } } ] } ] }, { "handle": [ { "handler": "headers", "request": { "delete": [ "5" ] } } ] }, { "handle": [ { "handler": "headers", "request": { "set": { "5": [ "{http.reverse_proxy.header.E}" ] } } } ], "match": [ { "not": [ { "vars": { "{http.reverse_proxy.header.E}": [ "" ] } } ] } ] } ] } ], "handler": "reverse_proxy", "headers": { "request": { "set": { "X-Forwarded-Method": [ "{http.request.method}" ], "X-Forwarded-Uri": [ "{http.request.uri}" ] } } }, "rewrite": { "method": "GET", "uri": "/auth" }, "upstreams": [ { "dial": "localhost:9000" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options.caddyfiletest ================================================ { debug http_port 8080 https_port 8443 grace_period 5s shutdown_delay 10s default_sni localhost order root first storage file_system { root /data } storage_check off storage_clean_interval off acme_ca https://example.com acme_ca_root /path/to/ca.crt ocsp_stapling off email test@example.com admin off on_demand_tls { ask https://example.com } local_certs key_type ed25519 } :80 ---------- { "admin": { "disabled": true }, "logging": { "logs": { "default": { "level": "DEBUG" } } }, "storage": { "module": "file_system", "root": "/data" }, "apps": { "http": { "http_port": 8080, "https_port": 8443, "grace_period": 5000000000, "shutdown_delay": 10000000000, "servers": { "srv0": { "listen": [ ":80" ] } } }, "tls": { "automation": { "policies": [ { "issuers": [ { "module": "internal" } ], "key_type": "ed25519", "disable_ocsp_stapling": true } ], "on_demand": { "permission": { "endpoint": "https://example.com", "module": "http" } } }, "disable_ocsp_stapling": true, "disable_storage_check": true, "disable_storage_clean": true } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_acme.caddyfiletest ================================================ { debug http_port 8080 https_port 8443 default_sni localhost order root first storage file_system { root /data } acme_ca https://example.com acme_eab { key_id 4K2scIVbBpNd-78scadB2g mac_key abcdefghijklmnopqrstuvwx-abcdefghijklnopqrstuvwxyz12ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefgh } acme_ca_root /path/to/ca.crt email test@example.com admin off on_demand_tls { ask https://example.com } storage_clean_interval 7d renew_interval 1d ocsp_interval 2d key_type ed25519 } :80 ---------- { "admin": { "disabled": true }, "logging": { "logs": { "default": { "level": "DEBUG" } } }, "storage": { "module": "file_system", "root": "/data" }, "apps": { "http": { "http_port": 8080, "https_port": 8443, "servers": { "srv0": { "listen": [ ":80" ] } } }, "tls": { "automation": { "policies": [ { "issuers": [ { "ca": "https://example.com", "challenges": { "http": { "alternate_port": 8080 }, "tls-alpn": { "alternate_port": 8443 } }, "email": "test@example.com", "external_account": { "key_id": "4K2scIVbBpNd-78scadB2g", "mac_key": "abcdefghijklmnopqrstuvwx-abcdefghijklnopqrstuvwxyz12ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefgh" }, "module": "acme", "trusted_roots_pem_files": [ "/path/to/ca.crt" ] } ], "key_type": "ed25519" } ], "on_demand": { "permission": { "endpoint": "https://example.com", "module": "http" } }, "ocsp_interval": 172800000000000, "renew_interval": 86400000000000, "storage_clean_interval": 604800000000000 } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_admin.caddyfiletest ================================================ { debug http_port 8080 https_port 8443 default_sni localhost order root first storage file_system { root /data } acme_ca https://example.com acme_ca_root /path/to/ca.crt email test@example.com admin { origins localhost:2019 [::1]:2019 127.0.0.1:2019 192.168.10.128 } on_demand_tls { ask https://example.com } local_certs key_type ed25519 } :80 ---------- { "admin": { "listen": "localhost:2019", "origins": [ "localhost:2019", "[::1]:2019", "127.0.0.1:2019", "192.168.10.128" ] }, "logging": { "logs": { "default": { "level": "DEBUG" } } }, "storage": { "module": "file_system", "root": "/data" }, "apps": { "http": { "http_port": 8080, "https_port": 8443, "servers": { "srv0": { "listen": [ ":80" ] } } }, "tls": { "automation": { "policies": [ { "issuers": [ { "module": "internal" } ], "key_type": "ed25519" } ], "on_demand": { "permission": { "endpoint": "https://example.com", "module": "http" } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_admin_with_persist_config_off.caddyfiletest ================================================ { http_port 8080 persist_config off admin { origins localhost:2019 [::1]:2019 127.0.0.1:2019 192.168.10.128 } } :80 ---------- { "admin": { "listen": "localhost:2019", "origins": [ "localhost:2019", "[::1]:2019", "127.0.0.1:2019", "192.168.10.128" ], "config": { "persist": false } }, "apps": { "http": { "http_port": 8080, "servers": { "srv0": { "listen": [ ":80" ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_debug_with_access_log.caddyfiletest ================================================ { debug } :8881 { log { format console } } ---------- { "logging": { "logs": { "default": { "level": "DEBUG", "exclude": [ "http.log.access.log0" ] }, "log0": { "encoder": { "format": "console" }, "level": "DEBUG", "include": [ "http.log.access.log0" ] } } }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":8881" ], "logs": { "default_logger_name": "log0" } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_default_bind.caddyfiletest ================================================ { default_bind tcp4/0.0.0.0 tcp6/[::] } example.com { } example.org:12345 { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ "tcp4/0.0.0.0:12345", "tcp6/[::]:12345" ], "routes": [ { "match": [ { "host": [ "example.org" ] } ], "terminal": true } ] }, "srv1": { "listen": [ "tcp4/0.0.0.0:443", "tcp6/[::]:443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_log_and_site.caddyfiletest ================================================ { log { output file caddy.log include some-log-source exclude admin.api admin2.api } log custom-logger { output file caddy.log level WARN include custom-log-source } } :8884 { log { format json output file access.log } } ---------- { "logging": { "logs": { "custom-logger": { "writer": { "filename": "caddy.log", "output": "file" }, "level": "WARN", "include": [ "custom-log-source" ] }, "default": { "writer": { "filename": "caddy.log", "output": "file" }, "include": [ "some-log-source" ], "exclude": [ "admin.api", "admin2.api", "custom-log-source", "http.log.access.log0" ] }, "log0": { "writer": { "filename": "access.log", "output": "file" }, "encoder": { "format": "json" }, "include": [ "http.log.access.log0" ] } } }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "logs": { "default_logger_name": "log0" } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_log_basic.caddyfiletest ================================================ { log { output file foo.log } } ---------- { "logging": { "logs": { "default": { "writer": { "filename": "foo.log", "output": "file" } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_log_custom.caddyfiletest ================================================ { log custom-logger { format filter { wrap console fields { request>remote_ip ip_mask { ipv4 24 ipv6 32 } } } } } ---------- { "logging": { "logs": { "custom-logger": { "encoder": { "fields": { "request\u003eremote_ip": { "filter": "ip_mask", "ipv4_cidr": 24, "ipv6_cidr": 32 } }, "format": "filter", "wrap": { "format": "console" } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_log_multi.caddyfiletest ================================================ { log first { output file foo.log } log second { format json } } ---------- { "logging": { "logs": { "first": { "writer": { "filename": "foo.log", "output": "file" } }, "second": { "encoder": { "format": "json" } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_log_sampling.caddyfiletest ================================================ { log { sampling { interval 300 first 50 thereafter 40 } } } ---------- { "logging": { "logs": { "default": { "sampling": { "interval": 300, "first": 50, "thereafter": 40 } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_persist_config.caddyfiletest ================================================ { persist_config off } :8881 { } ---------- { "admin": { "config": { "persist": false } }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":8881" ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_preferred_chains.caddyfiletest ================================================ { preferred_chains smallest } example.com ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "issuers": [ { "module": "acme", "preferred_chains": { "smallest": true } } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_resolvers.caddyfiletest ================================================ { email test@example.com dns mock tls_resolvers 1.1.1.1 8.8.8.8 acme_dns } example.com { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "issuers": [ { "challenges": { "dns": { "resolvers": [ "1.1.1.1", "8.8.8.8" ] } }, "email": "test@example.com", "module": "acme" }, { "ca": "https://acme.zerossl.com/v2/DV90", "challenges": { "dns": { "resolvers": [ "1.1.1.1", "8.8.8.8" ] } }, "email": "test@example.com", "module": "acme" } ] } ] }, "dns": { "name": "mock" }, "resolvers": [ "1.1.1.1", "8.8.8.8" ] } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_resolvers_http_challenge.caddyfiletest ================================================ { tls_resolvers 1.1.1.1 8.8.8.8 } example.com { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] } } }, "tls": { "resolvers": [ "1.1.1.1", "8.8.8.8" ] } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_resolvers_local_dns_inherit.caddyfiletest ================================================ { email test@example.com dns mock tls_resolvers 1.1.1.1 8.8.8.8 } example.com { tls { dns mock } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "example.com" ], "issuers": [ { "challenges": { "dns": { "provider": { "name": "mock" }, "resolvers": [ "1.1.1.1", "8.8.8.8" ] } }, "email": "test@example.com", "module": "acme" } ] } ] }, "dns": { "name": "mock" }, "resolvers": [ "1.1.1.1", "8.8.8.8" ] } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_resolvers_local_override.caddyfiletest ================================================ { email test@example.com dns mock tls_resolvers 1.1.1.1 8.8.8.8 acme_dns } example.com { tls { resolvers 9.9.9.9 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "example.com" ], "issuers": [ { "challenges": { "dns": { "resolvers": [ "9.9.9.9" ] } }, "email": "test@example.com", "module": "acme" } ] }, { "issuers": [ { "challenges": { "dns": { "resolvers": [ "1.1.1.1", "8.8.8.8" ] } }, "email": "test@example.com", "module": "acme" }, { "ca": "https://acme.zerossl.com/v2/DV90", "challenges": { "dns": { "resolvers": [ "1.1.1.1", "8.8.8.8" ] } }, "email": "test@example.com", "module": "acme" } ] } ] }, "dns": { "name": "mock" }, "resolvers": [ "1.1.1.1", "8.8.8.8" ] } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_resolvers_mixed.caddyfiletest ================================================ { email test@example.com dns mock tls_resolvers 1.1.1.1 8.8.8.8 acme_dns } site1.example.com { } site2.example.com { tls { resolvers 9.9.9.9 8.8.4.4 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "site1.example.com" ] } ], "terminal": true }, { "match": [ { "host": [ "site2.example.com" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "site2.example.com" ], "issuers": [ { "challenges": { "dns": { "resolvers": [ "9.9.9.9", "8.8.4.4" ] } }, "email": "test@example.com", "module": "acme" } ] }, { "issuers": [ { "challenges": { "dns": { "resolvers": [ "1.1.1.1", "8.8.8.8" ] } }, "email": "test@example.com", "module": "acme" }, { "ca": "https://acme.zerossl.com/v2/DV90", "challenges": { "dns": { "resolvers": [ "1.1.1.1", "8.8.8.8" ] } }, "email": "test@example.com", "module": "acme" } ] } ] }, "dns": { "name": "mock" }, "resolvers": [ "1.1.1.1", "8.8.8.8" ] } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_options_skip_install_trust.caddyfiletest ================================================ { skip_install_trust pki { ca { name "Local" root_cn "Custom Local Root Name" intermediate_cn "Custom Local Intermediate Name" root { cert /path/to/cert.pem key /path/to/key.pem format pem_file } intermediate { cert /path/to/cert.pem key /path/to/key.pem format pem_file } } ca foo { name "Foo" root_cn "Custom Foo Root Name" intermediate_cn "Custom Foo Intermediate Name" } } } a.example.com { tls internal } acme.example.com { acme_server { ca foo } } acme-bar.example.com { acme_server { ca bar } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "acme-bar.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "ca": "bar", "handler": "acme_server" } ] } ] } ], "terminal": true }, { "match": [ { "host": [ "acme.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "ca": "foo", "handler": "acme_server" } ] } ] } ], "terminal": true }, { "match": [ { "host": [ "a.example.com" ] } ], "terminal": true } ] } } }, "pki": { "certificate_authorities": { "bar": { "install_trust": false }, "foo": { "name": "Foo", "root_common_name": "Custom Foo Root Name", "intermediate_common_name": "Custom Foo Intermediate Name", "install_trust": false }, "local": { "name": "Local", "root_common_name": "Custom Local Root Name", "intermediate_common_name": "Custom Local Intermediate Name", "install_trust": false, "root": { "certificate": "/path/to/cert.pem", "private_key": "/path/to/key.pem", "format": "pem_file" }, "intermediate": { "certificate": "/path/to/cert.pem", "private_key": "/path/to/key.pem", "format": "pem_file" } } } }, "tls": { "automation": { "policies": [ { "subjects": [ "acme-bar.example.com", "acme.example.com" ] }, { "subjects": [ "a.example.com" ], "issuers": [ { "module": "internal" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_server_options_multi.caddyfiletest ================================================ { servers { timeouts { idle 90s } strict_sni_host insecure_off } servers :80 { timeouts { idle 60s } } servers :443 { timeouts { idle 30s } strict_sni_host } } foo.com { } http://bar.com { } :8080 { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "idle_timeout": 30000000000, "routes": [ { "match": [ { "host": [ "foo.com" ] } ], "terminal": true } ], "strict_sni_host": true }, "srv1": { "listen": [ ":80" ], "idle_timeout": 60000000000, "routes": [ { "match": [ { "host": [ "bar.com" ] } ], "terminal": true } ] }, "srv2": { "listen": [ ":8080" ], "idle_timeout": 90000000000, "strict_sni_host": false } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/global_server_options_single.caddyfiletest ================================================ { servers { listener_wrappers { http_redirect tls } timeouts { read_body 30s read_header 30s write 30s idle 30s } max_header_size 100MB enable_full_duplex log_credentials protocols h1 h2 h2c h3 strict_sni_host trusted_proxies static private_ranges client_ip_headers Custom-Real-Client-IP X-Forwarded-For client_ip_headers A-Third-One keepalive_interval 20s keepalive_idle 20s keepalive_count 10 0rtt off } } foo.com { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "listener_wrappers": [ { "wrapper": "http_redirect" }, { "wrapper": "tls" } ], "read_timeout": 30000000000, "read_header_timeout": 30000000000, "write_timeout": 30000000000, "idle_timeout": 30000000000, "keepalive_interval": 20000000000, "keepalive_idle": 20000000000, "keepalive_count": 10, "max_header_bytes": 100000000, "enable_full_duplex": true, "routes": [ { "match": [ { "host": [ "foo.com" ] } ], "terminal": true } ], "strict_sni_host": true, "trusted_proxies": { "ranges": [ "192.168.0.0/16", "172.16.0.0/12", "10.0.0.0/8", "127.0.0.1/8", "fd00::/8", "::1" ], "source": "static" }, "client_ip_headers": [ "Custom-Real-Client-IP", "X-Forwarded-For", "A-Third-One" ], "logs": { "should_log_credentials": true }, "protocols": [ "h1", "h2", "h2c", "h3" ], "allow_0rtt": false } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/handle_nested_in_route.caddyfiletest ================================================ :8881 { route { handle /foo/* { respond "Foo" } handle { respond "Bar" } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8881" ], "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "group": "group2", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Foo", "handler": "static_response" } ] } ] } ], "match": [ { "path": [ "/foo/*" ] } ] }, { "group": "group2", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Bar", "handler": "static_response" } ] } ] } ] } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/handle_path.caddyfiletest ================================================ :80 handle_path /api/v1/* { respond "API v1" } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "match": [ { "path": [ "/api/v1/*" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "rewrite", "strip_path_prefix": "/api/v1" } ] }, { "handle": [ { "body": "API v1", "handler": "static_response" } ] } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/handle_path_sorting.caddyfiletest ================================================ :80 { handle /api/* { respond "api" } handle_path /static/* { respond "static" } handle { respond "handle" } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "group": "group3", "match": [ { "path": [ "/static/*" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "rewrite", "strip_path_prefix": "/static" } ] }, { "handle": [ { "body": "static", "handler": "static_response" } ] } ] } ] }, { "group": "group3", "match": [ { "path": [ "/api/*" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "api", "handler": "static_response" } ] } ] } ] }, { "group": "group3", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "handle", "handler": "static_response" } ] } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/header.caddyfiletest ================================================ :80 { header Denis "Ritchie" header +Edsger "Dijkstra" header ?John "von Neumann" header -Wolfram header { Grace: "Hopper" # some users habitually suffix field names with a colon +Ray "Solomonoff" ?Tim "Berners-Lee" defer } @images path /images/* header @images { Cache-Control "public, max-age=3600, stale-while-revalidate=86400" match { status 200 } } header { +Link "Foo" +Link "Bar" match status 200 } header >Set Defer header >Replace Deferred Replacement } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "match": [ { "path": [ "/images/*" ] } ], "handle": [ { "handler": "headers", "response": { "require": { "status_code": [ 200 ] }, "set": { "Cache-Control": [ "public, max-age=3600, stale-while-revalidate=86400" ] } } } ] }, { "handle": [ { "handler": "headers", "response": { "set": { "Denis": [ "Ritchie" ] } } }, { "handler": "headers", "response": { "add": { "Edsger": [ "Dijkstra" ] } } }, { "handler": "headers", "response": { "require": { "headers": { "John": null } }, "set": { "John": [ "von Neumann" ] } } }, { "handler": "headers", "response": { "deferred": true, "delete": [ "Wolfram" ] } }, { "handler": "headers", "response": { "add": { "Ray": [ "Solomonoff" ] }, "deferred": true, "set": { "Grace": [ "Hopper" ] } } }, { "handler": "headers", "response": { "require": { "headers": { "Tim": null } }, "set": { "Tim": [ "Berners-Lee" ] } } }, { "handler": "headers", "response": { "add": { "Link": [ "Foo", "Bar" ] }, "require": { "status_code": [ 200 ] } } }, { "handler": "headers", "response": { "deferred": true, "set": { "Set": [ "Defer" ] } } }, { "handler": "headers", "response": { "deferred": true, "replace": { "Replace": [ { "replace": "Replacement", "search_regexp": "Deferred" } ] } } } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/header_placeholder_search.caddyfiletest ================================================ :80 { header Test-Static ":443" "STATIC-WORKS" header Test-Dynamic ":{http.request.local.port}" "DYNAMIC-WORKS" header Test-Complex "port-{http.request.local.port}-end" "COMPLEX-{http.request.method}" } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "handle": [ { "handler": "headers", "response": { "replace": { "Test-Static": [ { "replace": "STATIC-WORKS", "search_regexp": ":443" } ] } } }, { "handler": "headers", "response": { "replace": { "Test-Dynamic": [ { "replace": "DYNAMIC-WORKS", "search_regexp": ":{http.request.local.port}" } ] } } }, { "handler": "headers", "response": { "replace": { "Test-Complex": [ { "replace": "COMPLEX-{http.request.method}", "search_regexp": "port-{http.request.local.port}-end" } ] } } } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/heredoc.caddyfiletest ================================================ example.com { respond < Foo Foo EOF 200 } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "\u003chtml\u003e\n \u003chead\u003e\u003ctitle\u003eFoo\u003c/title\u003e\n \u003cbody\u003eFoo\u003c/body\u003e\n\u003c/html\u003e", "handler": "static_response", "status_code": 200 } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/heredoc_extra_indentation.caddyfiletest ================================================ :80 handle { respond <headers>Server delete } } } ---------- { "logging": { "logs": { "default": { "exclude": [ "http.log.access.log0" ] }, "log0": { "writer": { "output": "stdout" }, "encoder": { "fields": { "request\u003eheaders\u003eServer": { "filter": "delete" } }, "format": "filter" }, "include": [ "http.log.access.log0" ] } } }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "logs": { "default_logger_name": "log0" } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/log_filter_with_header.txt ================================================ localhost { log { output file ./caddy.access.log } log health_check_log { output file ./caddy.access.health.log no_hostname } log general_log { output file ./caddy.access.general.log no_hostname } @healthCheck `header_regexp('User-Agent', '^some-regexp$') || path('/healthz*')` handle @healthCheck { log_name health_check_log general_log respond "Healthy" } handle { respond "Hello World" } } ---------- { "logging": { "logs": { "default": { "exclude": [ "http.log.access.general_log", "http.log.access.health_check_log", "http.log.access.log0" ] }, "general_log": { "writer": { "filename": "./caddy.access.general.log", "output": "file" }, "include": [ "http.log.access.general_log" ] }, "health_check_log": { "writer": { "filename": "./caddy.access.health.log", "output": "file" }, "include": [ "http.log.access.health_check_log" ] }, "log0": { "writer": { "filename": "./caddy.access.log", "output": "file" }, "include": [ "http.log.access.log0" ] } } }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "group": "group2", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "access_logger_names": [ "health_check_log", "general_log" ], "handler": "vars" }, { "body": "Healthy", "handler": "static_response" } ] } ] } ], "match": [ { "expression": { "expr": "header_regexp('User-Agent', '^some-regexp$') || path('/healthz*')", "name": "healthCheck" } } ] }, { "group": "group2", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Hello World", "handler": "static_response" } ] } ] } ] } ] } ], "terminal": true } ], "logs": { "logger_names": { "localhost": [ "log0" ] } } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/log_filters.caddyfiletest ================================================ :80 log { output stdout format filter { wrap console # long form, with "fields" wrapper fields { uri query { replace foo REDACTED delete bar hash baz } } # short form, flatter structure request>headers>Authorization replace REDACTED request>headers>Server delete request>headers>Cookie cookie { replace foo REDACTED delete bar hash baz } request>remote_ip ip_mask { ipv4 24 ipv6 32 } request>client_ip ip_mask 16 32 request>headers>Regexp regexp secret REDACTED request>headers>Hash hash } } ---------- { "logging": { "logs": { "default": { "exclude": [ "http.log.access.log0" ] }, "log0": { "writer": { "output": "stdout" }, "encoder": { "fields": { "request\u003eclient_ip": { "filter": "ip_mask", "ipv4_cidr": 16, "ipv6_cidr": 32 }, "request\u003eheaders\u003eAuthorization": { "filter": "replace", "value": "REDACTED" }, "request\u003eheaders\u003eCookie": { "actions": [ { "name": "foo", "type": "replace", "value": "REDACTED" }, { "name": "bar", "type": "delete" }, { "name": "baz", "type": "hash" } ], "filter": "cookie" }, "request\u003eheaders\u003eHash": { "filter": "hash" }, "request\u003eheaders\u003eRegexp": { "filter": "regexp", "regexp": "secret", "value": "REDACTED" }, "request\u003eheaders\u003eServer": { "filter": "delete" }, "request\u003eremote_ip": { "filter": "ip_mask", "ipv4_cidr": 24, "ipv6_cidr": 32 }, "uri": { "actions": [ { "parameter": "foo", "type": "replace", "value": "REDACTED" }, { "parameter": "bar", "type": "delete" }, { "parameter": "baz", "type": "hash" } ], "filter": "query" } }, "format": "filter", "wrap": { "format": "console" } }, "include": [ "http.log.access.log0" ] } } }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "logs": { "default_logger_name": "log0" } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/log_multi_logger_name.caddyfiletest ================================================ (log-both) { log {args[0]}-json { hostnames {args[0]} output file /var/log/{args[0]}.log format json } log {args[0]}-console { hostnames {args[0]} output file /var/log/{args[0]}.json format console } } *.example.com { # Subdomains log to multiple files at once, with # different output files and formats. import log-both foo.example.com import log-both bar.example.com } ---------- { "logging": { "logs": { "bar.example.com-console": { "writer": { "filename": "/var/log/bar.example.com.json", "output": "file" }, "encoder": { "format": "console" }, "include": [ "http.log.access.bar.example.com-console" ] }, "bar.example.com-json": { "writer": { "filename": "/var/log/bar.example.com.log", "output": "file" }, "encoder": { "format": "json" }, "include": [ "http.log.access.bar.example.com-json" ] }, "default": { "exclude": [ "http.log.access.bar.example.com-console", "http.log.access.bar.example.com-json", "http.log.access.foo.example.com-console", "http.log.access.foo.example.com-json" ] }, "foo.example.com-console": { "writer": { "filename": "/var/log/foo.example.com.json", "output": "file" }, "encoder": { "format": "console" }, "include": [ "http.log.access.foo.example.com-console" ] }, "foo.example.com-json": { "writer": { "filename": "/var/log/foo.example.com.log", "output": "file" }, "encoder": { "format": "json" }, "include": [ "http.log.access.foo.example.com-json" ] } } }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "*.example.com" ] } ], "terminal": true } ], "logs": { "logger_names": { "bar.example.com": [ "bar.example.com-json", "bar.example.com-console" ], "foo.example.com": [ "foo.example.com-json", "foo.example.com-console" ] } } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/log_multiple_regexp_filters.caddyfiletest ================================================ :80 log { output stdout format filter { wrap console # Multiple regexp filters for the same field - this should work now! request>headers>Authorization regexp "Bearer\s+([A-Za-z0-9_-]+)" "Bearer [REDACTED]" request>headers>Authorization regexp "Basic\s+([A-Za-z0-9+/=]+)" "Basic [REDACTED]" request>headers>Authorization regexp "token=([^&\s]+)" "token=[REDACTED]" # Single regexp filter - this should continue to work as before request>headers>Cookie regexp "sessionid=[^;]+" "sessionid=[REDACTED]" # Mixed filters (non-regexp) - these should work normally request>headers>Server delete request>remote_ip ip_mask { ipv4 24 ipv6 32 } } } ---------- { "logging": { "logs": { "default": { "exclude": [ "http.log.access.log0" ] }, "log0": { "writer": { "output": "stdout" }, "encoder": { "fields": { "request\u003eheaders\u003eAuthorization": { "filter": "multi_regexp", "operations": [ { "regexp": "Bearer\\s+([A-Za-z0-9_-]+)", "value": "Bearer [REDACTED]" }, { "regexp": "Basic\\s+([A-Za-z0-9+/=]+)", "value": "Basic [REDACTED]" }, { "regexp": "token=([^\u0026\\s]+)", "value": "token=[REDACTED]" } ] }, "request\u003eheaders\u003eCookie": { "filter": "regexp", "regexp": "sessionid=[^;]+", "value": "sessionid=[REDACTED]" }, "request\u003eheaders\u003eServer": { "filter": "delete" }, "request\u003eremote_ip": { "filter": "ip_mask", "ipv4_cidr": 24, "ipv6_cidr": 32 } }, "format": "filter", "wrap": { "format": "console" } }, "include": [ "http.log.access.log0" ] } } }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "logs": { "default_logger_name": "log0" } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/log_override_hostname.caddyfiletest ================================================ *.example.com { log { hostnames foo.example.com bar.example.com output file /foo-bar.txt } log { hostnames baz.example.com output file /baz.txt } } example.com:8443 { log { output file /port.txt } } ---------- { "logging": { "logs": { "default": { "exclude": [ "http.log.access.log0", "http.log.access.log1", "http.log.access.log2" ] }, "log0": { "writer": { "filename": "/foo-bar.txt", "output": "file" }, "include": [ "http.log.access.log0" ] }, "log1": { "writer": { "filename": "/baz.txt", "output": "file" }, "include": [ "http.log.access.log1" ] }, "log2": { "writer": { "filename": "/port.txt", "output": "file" }, "include": [ "http.log.access.log2" ] } } }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "*.example.com" ] } ], "terminal": true } ], "logs": { "logger_names": { "bar.example.com": [ "log0" ], "baz.example.com": [ "log1" ], "foo.example.com": [ "log0" ] } } }, "srv1": { "listen": [ ":8443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ], "logs": { "logger_names": { "example.com": [ "log2" ] } } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/log_override_name_multiaccess.caddyfiletest ================================================ { log access-console { include http.log.access.foo output file access-localhost.log format console } log access-json { include http.log.access.foo output file access-localhost.json format json } } http://localhost:8881 { log foo } ---------- { "logging": { "logs": { "access-console": { "writer": { "filename": "access-localhost.log", "output": "file" }, "encoder": { "format": "console" }, "include": [ "http.log.access.foo" ] }, "access-json": { "writer": { "filename": "access-localhost.json", "output": "file" }, "encoder": { "format": "json" }, "include": [ "http.log.access.foo" ] }, "default": { "exclude": [ "http.log.access.foo" ] } } }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":8881" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ], "automatic_https": { "skip": [ "localhost" ] }, "logs": { "logger_names": { "localhost": [ "foo" ] } } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/log_override_name_multiaccess_debug.caddyfiletest ================================================ { debug log access-console { include http.log.access.foo output file access-localhost.log format console } log access-json { include http.log.access.foo output file access-localhost.json format json } } http://localhost:8881 { log foo } ---------- { "logging": { "logs": { "access-console": { "writer": { "filename": "access-localhost.log", "output": "file" }, "encoder": { "format": "console" }, "level": "DEBUG", "include": [ "http.log.access.foo" ] }, "access-json": { "writer": { "filename": "access-localhost.json", "output": "file" }, "encoder": { "format": "json" }, "level": "DEBUG", "include": [ "http.log.access.foo" ] }, "default": { "level": "DEBUG", "exclude": [ "http.log.access.foo" ] } } }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":8881" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ], "automatic_https": { "skip": [ "localhost" ] }, "logs": { "logger_names": { "localhost": [ "foo" ] } } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/log_roll_days.caddyfiletest ================================================ :80 log one { output file /var/log/access.log { mode 0644 dir_mode 0755 roll_size 1gb roll_uncompressed roll_compression none roll_local_time roll_keep 5 roll_keep_for 90d } } log two { output file /var/log/access-2.log { mode 0777 dir_mode from_file roll_size 1gib roll_compression zstd roll_interval 12h roll_at 00:00 06:00 12:00,18:00 roll_minutes 10 40 45,46 roll_keep 10 roll_keep_for 90d } } ---------- { "logging": { "logs": { "default": { "exclude": [ "http.log.access.one", "http.log.access.two" ] }, "one": { "writer": { "dir_mode": "0755", "filename": "/var/log/access.log", "mode": "0644", "output": "file", "roll_compression": "none", "roll_gzip": false, "roll_keep": 5, "roll_keep_days": 90, "roll_local_time": true, "roll_size_mb": 954 }, "include": [ "http.log.access.one" ] }, "two": { "writer": { "dir_mode": "from_file", "filename": "/var/log/access-2.log", "mode": "0777", "output": "file", "roll_at": [ "00:00", "06:00", "12:00", "18:00" ], "roll_compression": "zstd", "roll_interval": 43200000000000, "roll_keep": 10, "roll_keep_days": 90, "roll_minutes": [ 10, 40, 45, 46 ], "roll_size_mb": 1024 }, "include": [ "http.log.access.two" ] } } }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "logs": { "default_logger_name": "two" } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/log_sampling.caddyfiletest ================================================ :80 { log { sampling { interval 300 first 50 thereafter 40 } } } ---------- { "logging": { "logs": { "default": { "exclude": [ "http.log.access.log0" ] }, "log0": { "sampling": { "interval": 300, "first": 50, "thereafter": 40 }, "include": [ "http.log.access.log0" ] } } }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "logs": { "default_logger_name": "log0" } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/log_skip_hosts.caddyfiletest ================================================ one.example.com { log } two.example.com { } three.example.com { } example.com { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "three.example.com" ] } ], "terminal": true }, { "match": [ { "host": [ "one.example.com" ] } ], "terminal": true }, { "match": [ { "host": [ "two.example.com" ] } ], "terminal": true }, { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ], "logs": { "logger_names": { "one.example.com": [ "" ] }, "skip_hosts": [ "example.com", "three.example.com", "two.example.com" ] } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/map_and_vars_with_raw_types.caddyfiletest ================================================ example.com map {host} {my_placeholder} {magic_number} { # Should output boolean "true" and an integer example.com true 3 # Should output a string and null foo.example.com "string value" # Should output two strings (quoted int) (.*)\.example.com "${1} subdomain" "5" # Should output null and a string (quoted int) ~.*\.net$ - `7` # Should output a float and the string "false" ~.*\.xyz$ 123.456 "false" # Should output two strings, second being escaped quote default "unknown domain" \""" } vars foo bar vars { abc true def 1 ghi 2.3 jkl "mn op" } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "defaults": [ "unknown domain", "\"" ], "destinations": [ "{my_placeholder}", "{magic_number}" ], "handler": "map", "mappings": [ { "input": "example.com", "outputs": [ true, 3 ] }, { "input": "foo.example.com", "outputs": [ "string value", null ] }, { "input": "(.*)\\.example.com", "outputs": [ "${1} subdomain", "5" ] }, { "input_regexp": ".*\\.net$", "outputs": [ null, "7" ] }, { "input_regexp": ".*\\.xyz$", "outputs": [ 123.456, "false" ] } ], "source": "{http.request.host}" }, { "abc": true, "def": 1, "ghi": 2.3, "handler": "vars", "jkl": "mn op" }, { "foo": "bar", "handler": "vars" } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/matcher_outside_site_block.caddyfiletest ================================================ @foo { path /foo } handle { respond "should not work" } ---------- request matchers may not be defined globally, they must be in a site block; found @foo, at Caddyfile:1 ================================================ FILE: caddytest/integration/caddyfile_adapt/matcher_syntax.caddyfiletest ================================================ :80 { @matcher { method GET } respond @matcher "get" @matcher2 method POST respond @matcher2 "post" @matcher3 not method PUT respond @matcher3 "not put" @matcher4 vars "{http.request.uri}" "/vars-matcher" respond @matcher4 "from vars matcher" @matcher5 vars_regexp static "{http.request.uri}" `\.([a-f0-9]{6})\.(css|js)$` respond @matcher5 "from vars_regexp matcher with name" @matcher6 vars_regexp "{http.request.uri}" `\.([a-f0-9]{6})\.(css|js)$` respond @matcher6 "from vars_regexp matcher without name" @matcher7 `path('/foo*') && method('GET')` respond @matcher7 "inline expression matcher shortcut" @matcher8 { header Foo bar header Foo foobar header Bar foo } respond @matcher8 "header matcher merging values of the same field" @matcher9 { query foo=bar foo=baz bar=foo query bar=baz } respond @matcher9 "query matcher merging pairs with the same keys" @matcher10 { header !Foo header Bar foo } respond @matcher10 "header matcher with null field matcher" @matcher11 remote_ip private_ranges respond @matcher11 "remote_ip matcher with private ranges" @matcher12 client_ip private_ranges respond @matcher12 "client_ip matcher with private ranges" @matcher13 { remote_ip 1.1.1.1 remote_ip 2.2.2.2 } respond @matcher13 "remote_ip merged" @matcher14 { client_ip 1.1.1.1 client_ip 2.2.2.2 } respond @matcher14 "client_ip merged" } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "match": [ { "method": [ "GET" ] } ], "handle": [ { "body": "get", "handler": "static_response" } ] }, { "match": [ { "method": [ "POST" ] } ], "handle": [ { "body": "post", "handler": "static_response" } ] }, { "match": [ { "not": [ { "method": [ "PUT" ] } ] } ], "handle": [ { "body": "not put", "handler": "static_response" } ] }, { "match": [ { "vars": { "{http.request.uri}": [ "/vars-matcher" ] } } ], "handle": [ { "body": "from vars matcher", "handler": "static_response" } ] }, { "match": [ { "vars_regexp": { "{http.request.uri}": { "name": "static", "pattern": "\\.([a-f0-9]{6})\\.(css|js)$" } } } ], "handle": [ { "body": "from vars_regexp matcher with name", "handler": "static_response" } ] }, { "match": [ { "vars_regexp": { "{http.request.uri}": { "name": "matcher6", "pattern": "\\.([a-f0-9]{6})\\.(css|js)$" } } } ], "handle": [ { "body": "from vars_regexp matcher without name", "handler": "static_response" } ] }, { "match": [ { "expression": { "expr": "path('/foo*') \u0026\u0026 method('GET')", "name": "matcher7" } } ], "handle": [ { "body": "inline expression matcher shortcut", "handler": "static_response" } ] }, { "match": [ { "header": { "Bar": [ "foo" ], "Foo": [ "bar", "foobar" ] } } ], "handle": [ { "body": "header matcher merging values of the same field", "handler": "static_response" } ] }, { "match": [ { "query": { "bar": [ "foo", "baz" ], "foo": [ "bar", "baz" ] } } ], "handle": [ { "body": "query matcher merging pairs with the same keys", "handler": "static_response" } ] }, { "match": [ { "header": { "Bar": [ "foo" ], "Foo": null } } ], "handle": [ { "body": "header matcher with null field matcher", "handler": "static_response" } ] }, { "match": [ { "remote_ip": { "ranges": [ "192.168.0.0/16", "172.16.0.0/12", "10.0.0.0/8", "127.0.0.1/8", "fd00::/8", "::1" ] } } ], "handle": [ { "body": "remote_ip matcher with private ranges", "handler": "static_response" } ] }, { "match": [ { "client_ip": { "ranges": [ "192.168.0.0/16", "172.16.0.0/12", "10.0.0.0/8", "127.0.0.1/8", "fd00::/8", "::1" ] } } ], "handle": [ { "body": "client_ip matcher with private ranges", "handler": "static_response" } ] }, { "match": [ { "remote_ip": { "ranges": [ "1.1.1.1", "2.2.2.2" ] } } ], "handle": [ { "body": "remote_ip merged", "handler": "static_response" } ] }, { "match": [ { "client_ip": { "ranges": [ "1.1.1.1", "2.2.2.2" ] } } ], "handle": [ { "body": "client_ip merged", "handler": "static_response" } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/matchers_in_route.caddyfiletest ================================================ :80 { route { # unused matchers should not panic # see https://github.com/caddyserver/caddy/issues/3745 @matcher1 path /path1 @matcher2 path /path2 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "handle": [ { "handler": "subroute" } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/method_directive.caddyfiletest ================================================ :8080 { method FOO } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8080" ], "routes": [ { "handle": [ { "handler": "rewrite", "method": "FOO" } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/metrics_disable_om.caddyfiletest ================================================ :80 { metrics /metrics { disable_openmetrics } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "match": [ { "path": [ "/metrics" ] } ], "handle": [ { "disable_openmetrics": true, "handler": "metrics" } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/metrics_merge_options.caddyfiletest ================================================ { metrics servers :80 { metrics { per_host } } } :80 { respond "Hello" } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "handle": [ { "body": "Hello", "handler": "static_response" } ] } ] } }, "metrics": { "per_host": true } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/metrics_perhost.caddyfiletest ================================================ { servers :80 { metrics { per_host } } } :80 { respond "Hello" } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "handle": [ { "body": "Hello", "handler": "static_response" } ] } ] } }, "metrics": { "per_host": true } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/metrics_syntax.caddyfiletest ================================================ :80 { metrics /metrics } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "match": [ { "path": [ "/metrics" ] } ], "handle": [ { "handler": "metrics" } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/not_block_merging.caddyfiletest ================================================ :80 @test { not { header Abc "123" header Bcd "123" } } respond @test 403 ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "match": [ { "not": [ { "header": { "Abc": [ "123" ], "Bcd": [ "123" ] } } ] } ], "handle": [ { "handler": "static_response", "status_code": 403 } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/php_fastcgi_expanded_form.caddyfiletest ================================================ :8886 route { # Add trailing slash for directory requests @canonicalPath { file { try_files {path}/index.php } not path */ } redir @canonicalPath {orig_path}/{orig_?query} 308 # If the requested file does not exist, try index files @indexFiles { file { try_files {path} {path}/index.php index.php split_path .php } } rewrite @indexFiles {file_match.relative} # Proxy PHP files to the FastCGI responder @phpFiles { path *.php } reverse_proxy @phpFiles 127.0.0.1:9000 { transport fastcgi { split .php } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8886" ], "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "static_response", "headers": { "Location": [ "{http.request.orig_uri.path}/{http.request.orig_uri.prefixed_query}" ] }, "status_code": 308 } ], "match": [ { "file": { "try_files": [ "{http.request.uri.path}/index.php" ] }, "not": [ { "path": [ "*/" ] } ] } ] }, { "group": "group0", "handle": [ { "handler": "rewrite", "uri": "{http.matchers.file.relative}" } ], "match": [ { "file": { "split_path": [ ".php" ], "try_files": [ "{http.request.uri.path}", "{http.request.uri.path}/index.php", "index.php" ] } } ] }, { "handle": [ { "handler": "reverse_proxy", "transport": { "protocol": "fastcgi", "split_path": [ ".php" ] }, "upstreams": [ { "dial": "127.0.0.1:9000" } ] } ], "match": [ { "path": [ "*.php" ] } ] } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/php_fastcgi_handle_response.caddyfiletest ================================================ :8881 { php_fastcgi app:9000 { env FOO bar @error status 4xx handle_response @error { root * /errors rewrite * /{http.reverse_proxy.status_code}.html file_server } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8881" ], "routes": [ { "match": [ { "file": { "try_files": [ "{http.request.uri.path}/index.php" ] }, "not": [ { "path": [ "*/" ] } ] } ], "handle": [ { "handler": "static_response", "headers": { "Location": [ "{http.request.orig_uri.path}/{http.request.orig_uri.prefixed_query}" ] }, "status_code": 308 } ] }, { "match": [ { "file": { "try_files": [ "{http.request.uri.path}", "{http.request.uri.path}/index.php", "index.php" ], "try_policy": "first_exist_fallback", "split_path": [ ".php" ] } } ], "handle": [ { "handler": "rewrite", "uri": "{http.matchers.file.relative}" } ] }, { "match": [ { "path": [ "*.php" ] } ], "handle": [ { "handle_response": [ { "match": { "status_code": [ 4 ] }, "routes": [ { "handle": [ { "handler": "vars", "root": "/errors" } ] }, { "group": "group0", "handle": [ { "handler": "rewrite", "uri": "/{http.reverse_proxy.status_code}.html" } ] }, { "handle": [ { "handler": "file_server", "hide": [ "./Caddyfile" ] } ] } ] } ], "handler": "reverse_proxy", "transport": { "env": { "FOO": "bar" }, "protocol": "fastcgi", "split_path": [ ".php" ] }, "upstreams": [ { "dial": "app:9000" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/php_fastcgi_index_off.caddyfiletest ================================================ :8884 php_fastcgi localhost:9000 { # some php_fastcgi-specific subdirectives split .php .php5 env VAR1 value1 env VAR2 value2 root /var/www index off dial_timeout 3s read_timeout 10s write_timeout 20s # passed through to reverse_proxy (directive order doesn't matter!) lb_policy random } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "match": [ { "path": [ "*.php", "*.php5" ] } ], "handle": [ { "handler": "reverse_proxy", "load_balancing": { "selection_policy": { "policy": "random" } }, "transport": { "dial_timeout": 3000000000, "env": { "VAR1": "value1", "VAR2": "value2" }, "protocol": "fastcgi", "read_timeout": 10000000000, "root": "/var/www", "split_path": [ ".php", ".php5" ], "write_timeout": 20000000000 }, "upstreams": [ { "dial": "localhost:9000" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/php_fastcgi_matcher.caddyfiletest ================================================ :8884 # the use of a host matcher here should cause this # site block to be wrapped in a subroute, even though # the site block does not have a hostname; this is # to prevent auto-HTTPS from picking up on this host # matcher because it is not a key on the site block @test host example.com php_fastcgi @test localhost:9000 ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "static_response", "headers": { "Location": [ "{http.request.orig_uri.path}/{http.request.orig_uri.prefixed_query}" ] }, "status_code": 308 } ], "match": [ { "file": { "try_files": [ "{http.request.uri.path}/index.php" ] }, "not": [ { "path": [ "*/" ] } ] } ] }, { "handle": [ { "handler": "rewrite", "uri": "{http.matchers.file.relative}" } ], "match": [ { "file": { "split_path": [ ".php" ], "try_files": [ "{http.request.uri.path}", "{http.request.uri.path}/index.php", "index.php" ], "try_policy": "first_exist_fallback" } } ] }, { "handle": [ { "handler": "reverse_proxy", "transport": { "protocol": "fastcgi", "split_path": [ ".php" ] }, "upstreams": [ { "dial": "localhost:9000" } ] } ], "match": [ { "path": [ "*.php" ] } ] } ] } ], "match": [ { "host": [ "example.com" ] } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/php_fastcgi_subdirectives.caddyfiletest ================================================ :8884 php_fastcgi localhost:9000 { # some php_fastcgi-specific subdirectives split .php .php5 env VAR1 value1 env VAR2 value2 root /var/www index index.php5 # passed through to reverse_proxy (directive order doesn't matter!) lb_policy random } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "match": [ { "file": { "try_files": [ "{http.request.uri.path}/index.php5" ] }, "not": [ { "path": [ "*/" ] } ] } ], "handle": [ { "handler": "static_response", "headers": { "Location": [ "{http.request.orig_uri.path}/{http.request.orig_uri.prefixed_query}" ] }, "status_code": 308 } ] }, { "match": [ { "file": { "try_files": [ "{http.request.uri.path}", "{http.request.uri.path}/index.php5", "index.php5" ], "try_policy": "first_exist_fallback", "split_path": [ ".php", ".php5" ] } } ], "handle": [ { "handler": "rewrite", "uri": "{http.matchers.file.relative}" } ] }, { "match": [ { "path": [ "*.php", "*.php5" ] } ], "handle": [ { "handler": "reverse_proxy", "load_balancing": { "selection_policy": { "policy": "random" } }, "transport": { "env": { "VAR1": "value1", "VAR2": "value2" }, "protocol": "fastcgi", "root": "/var/www", "split_path": [ ".php", ".php5" ] }, "upstreams": [ { "dial": "localhost:9000" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/php_fastcgi_try_files_override.caddyfiletest ================================================ :8884 php_fastcgi localhost:9000 { # some php_fastcgi-specific subdirectives split .php .php5 env VAR1 value1 env VAR2 value2 root /var/www try_files {path} {path}/index.php =404 dial_timeout 3s read_timeout 10s write_timeout 20s # passed through to reverse_proxy (directive order doesn't matter!) lb_policy random } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "match": [ { "file": { "try_files": [ "{http.request.uri.path}/index.php" ] }, "not": [ { "path": [ "*/" ] } ] } ], "handle": [ { "handler": "static_response", "headers": { "Location": [ "{http.request.orig_uri.path}/{http.request.orig_uri.prefixed_query}" ] }, "status_code": 308 } ] }, { "match": [ { "file": { "try_files": [ "{http.request.uri.path}", "{http.request.uri.path}/index.php", "=404" ], "split_path": [ ".php", ".php5" ] } } ], "handle": [ { "handler": "rewrite", "uri": "{http.matchers.file.relative}" } ] }, { "match": [ { "path": [ "*.php", "*.php5" ] } ], "handle": [ { "handler": "reverse_proxy", "load_balancing": { "selection_policy": { "policy": "random" } }, "transport": { "dial_timeout": 3000000000, "env": { "VAR1": "value1", "VAR2": "value2" }, "protocol": "fastcgi", "read_timeout": 10000000000, "root": "/var/www", "split_path": [ ".php", ".php5" ], "write_timeout": 20000000000 }, "upstreams": [ { "dial": "localhost:9000" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/php_fastcgi_try_files_override_no_dir_index.caddyfiletest ================================================ :8884 php_fastcgi localhost:9000 { # some php_fastcgi-specific subdirectives split .php .php5 env VAR1 value1 env VAR2 value2 root /var/www try_files {path} index.php dial_timeout 3s read_timeout 10s write_timeout 20s # passed through to reverse_proxy (directive order doesn't matter!) lb_policy random } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "match": [ { "file": { "try_files": [ "{http.request.uri.path}", "index.php" ], "try_policy": "first_exist_fallback", "split_path": [ ".php", ".php5" ] } } ], "handle": [ { "handler": "rewrite", "uri": "{http.matchers.file.relative}" } ] }, { "match": [ { "path": [ "*.php", "*.php5" ] } ], "handle": [ { "handler": "reverse_proxy", "load_balancing": { "selection_policy": { "policy": "random" } }, "transport": { "dial_timeout": 3000000000, "env": { "VAR1": "value1", "VAR2": "value2" }, "protocol": "fastcgi", "read_timeout": 10000000000, "root": "/var/www", "split_path": [ ".php", ".php5" ], "write_timeout": 20000000000 }, "upstreams": [ { "dial": "localhost:9000" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/portless_upstream.caddyfiletest ================================================ whoami.example.com { reverse_proxy whoami } app.example.com { reverse_proxy app:80 } unix.example.com { reverse_proxy unix//path/to/socket } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "whoami.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "upstreams": [ { "dial": "whoami:80" } ] } ] } ] } ], "terminal": true }, { "match": [ { "host": [ "unix.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "upstreams": [ { "dial": "unix//path/to/socket" } ] } ] } ] } ], "terminal": true }, { "match": [ { "host": [ "app.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "upstreams": [ { "dial": "app:80" } ] } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/push.caddyfiletest ================================================ :80 push * /foo.txt push { GET /foo.txt } push { GET /foo.txt HEAD /foo.txt } push { headers { Foo bar } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "handle": [ { "handler": "push", "resources": [ { "target": "/foo.txt" } ] }, { "handler": "push", "resources": [ { "method": "GET", "target": "/foo.txt" } ] }, { "handler": "push", "resources": [ { "method": "GET", "target": "/foo.txt" }, { "method": "HEAD", "target": "/foo.txt" } ] }, { "handler": "push", "headers": { "set": { "Foo": [ "bar" ] } } } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/renewal_window_ratio_global.caddyfiletest ================================================ { renewal_window_ratio 0.1666 } example.com { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "renewal_window_ratio": 0.1666 } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/renewal_window_ratio_tls_directive.caddyfiletest ================================================ { renewal_window_ratio 0.1666 } a.example.com { tls { renewal_window_ratio 0.25 } } b.example.com { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "a.example.com" ] } ], "terminal": true }, { "match": [ { "host": [ "b.example.com" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "a.example.com" ], "renewal_window_ratio": 0.25 }, { "renewal_window_ratio": 0.1666 } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/replaceable_upstream.caddyfiletest ================================================ *.sandbox.localhost { @sandboxPort { header_regexp first_label Host ^([0-9]{3})\.sandbox\. } handle @sandboxPort { reverse_proxy {re.first_label.1} } handle { redir {scheme}://application.localhost } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "*.sandbox.localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "group": "group2", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "upstreams": [ { "dial": "{http.regexp.first_label.1}" } ] } ] } ] } ], "match": [ { "header_regexp": { "Host": { "name": "first_label", "pattern": "^([0-9]{3})\\.sandbox\\." } } } ] }, { "group": "group2", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "static_response", "headers": { "Location": [ "{http.request.scheme}://application.localhost" ] }, "status_code": 302 } ] } ] } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/replaceable_upstream_partial_port.caddyfiletest ================================================ *.sandbox.localhost { @sandboxPort { header_regexp port Host ^([0-9]{3})\.sandbox\. } handle @sandboxPort { reverse_proxy app:6{re.port.1} } handle { redir {scheme}://application.localhost } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "*.sandbox.localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "group": "group2", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "upstreams": [ { "dial": "app:6{http.regexp.port.1}" } ] } ] } ] } ], "match": [ { "header_regexp": { "Host": { "name": "port", "pattern": "^([0-9]{3})\\.sandbox\\." } } } ] }, { "group": "group2", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "static_response", "headers": { "Location": [ "{http.request.scheme}://application.localhost" ] }, "status_code": 302 } ] } ] } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/replaceable_upstream_port.caddyfiletest ================================================ *.sandbox.localhost { @sandboxPort { header_regexp port Host ^([0-9]{3})\.sandbox\. } handle @sandboxPort { reverse_proxy app:{re.port.1} } handle { redir {scheme}://application.localhost } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "*.sandbox.localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "group": "group2", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "upstreams": [ { "dial": "app:{http.regexp.port.1}" } ] } ] } ] } ], "match": [ { "header_regexp": { "Host": { "name": "port", "pattern": "^([0-9]{3})\\.sandbox\\." } } } ] }, { "group": "group2", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "static_response", "headers": { "Location": [ "{http.request.scheme}://application.localhost" ] }, "status_code": 302 } ] } ] } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/request_body.caddyfiletest ================================================ localhost request_body { max_size 1MB } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "request_body", "max_size": 1000000 } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/request_header.caddyfiletest ================================================ :80 @matcher path /something* request_header @matcher Denis "Ritchie" request_header +Edsger "Dijkstra" request_header -Wolfram @images path /images/* request_header @images Cache-Control "public, max-age=3600, stale-while-revalidate=86400" ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "match": [ { "path": [ "/something*" ] } ], "handle": [ { "handler": "headers", "request": { "set": { "Denis": [ "Ritchie" ] } } } ] }, { "match": [ { "path": [ "/images/*" ] } ], "handle": [ { "handler": "headers", "request": { "set": { "Cache-Control": [ "public, max-age=3600, stale-while-revalidate=86400" ] } } } ] }, { "handle": [ { "handler": "headers", "request": { "add": { "Edsger": [ "Dijkstra" ] } } }, { "handler": "headers", "request": { "delete": [ "Wolfram" ] } } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_buffers.caddyfiletest ================================================ https://example.com { reverse_proxy https://localhost:54321 { request_buffers unlimited response_buffers unlimited } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "request_buffers": -1, "response_buffers": -1, "transport": { "protocol": "http", "tls": {} }, "upstreams": [ { "dial": "localhost:54321" } ] } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_dynamic_upstreams.caddyfiletest ================================================ :8884 { reverse_proxy { dynamic a foo 9000 } reverse_proxy { dynamic a { name foo port 9000 refresh 5m resolvers 8.8.8.8 8.8.4.4 dial_timeout 2s dial_fallback_delay 300ms versions ipv6 } } } :8885 { reverse_proxy { dynamic srv _api._tcp.example.com } reverse_proxy { dynamic srv { service api proto tcp name example.com refresh 5m resolvers 8.8.8.8 8.8.4.4 dial_timeout 1s dial_fallback_delay -1s } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "dynamic_upstreams": { "name": "foo", "port": "9000", "source": "a" }, "handler": "reverse_proxy" }, { "dynamic_upstreams": { "dial_fallback_delay": 300000000, "dial_timeout": 2000000000, "name": "foo", "port": "9000", "refresh": 300000000000, "resolver": { "addresses": [ "8.8.8.8", "8.8.4.4" ] }, "source": "a", "versions": { "ipv6": true } }, "handler": "reverse_proxy" } ] } ] }, "srv1": { "listen": [ ":8885" ], "routes": [ { "handle": [ { "dynamic_upstreams": { "name": "_api._tcp.example.com", "source": "srv" }, "handler": "reverse_proxy" }, { "dynamic_upstreams": { "dial_fallback_delay": -1000000000, "dial_timeout": 1000000000, "name": "example.com", "proto": "tcp", "refresh": 300000000000, "resolver": { "addresses": [ "8.8.8.8", "8.8.4.4" ] }, "service": "api", "source": "srv" }, "handler": "reverse_proxy" } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_dynamic_upstreams_grace_period.caddyfiletest ================================================ :8884 { reverse_proxy { dynamic srv { name foo refresh 5m grace_period 5s } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "dynamic_upstreams": { "grace_period": 5000000000, "name": "foo", "refresh": 300000000000, "source": "srv" }, "handler": "reverse_proxy" } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_empty_non_http_transport.caddyfiletest ================================================ :8884 reverse_proxy 127.0.0.1:65535 { transport fastcgi } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "transport": { "protocol": "fastcgi" }, "upstreams": [ { "dial": "127.0.0.1:65535" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_h2c_shorthand.caddyfiletest ================================================ :8884 reverse_proxy h2c://localhost:8080 reverse_proxy unix+h2c//run/app.sock ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "transport": { "protocol": "http", "versions": [ "h2c", "2" ] }, "upstreams": [ { "dial": "localhost:8080" } ] }, { "handler": "reverse_proxy", "transport": { "protocol": "http", "versions": [ "h2c", "2" ] }, "upstreams": [ { "dial": "unix//run/app.sock" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_handle_response.caddyfiletest ================================================ :8884 reverse_proxy 127.0.0.1:65535 { @500 status 500 replace_status @500 400 @all status 2xx 3xx 4xx 5xx replace_status @all {http.error.status_code} replace_status {http.error.status_code} @accel header X-Accel-Redirect * handle_response @accel { respond "Header X-Accel-Redirect!" } @another { header X-Another * } handle_response @another { respond "Header X-Another!" } @401 status 401 handle_response @401 { respond "Status 401!" } handle_response { respond "Any! This should be last in the JSON!" } @403 { status 403 } handle_response @403 { respond "Status 403!" } @multi { status 401 403 status 404 header Foo * header Bar * } handle_response @multi { respond "Headers Foo, Bar AND statuses 401, 403 and 404!" } @200 status 200 handle_response @200 { copy_response_headers { include Foo Bar } respond "Copied headers from the response" } @201 status 201 handle_response @201 { header Foo "Copying the response" copy_response 404 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handle_response": [ { "match": { "status_code": [ 500 ] }, "status_code": 400 }, { "match": { "status_code": [ 2, 3, 4, 5 ] }, "status_code": "{http.error.status_code}" }, { "match": { "headers": { "X-Accel-Redirect": [ "*" ] } }, "routes": [ { "handle": [ { "body": "Header X-Accel-Redirect!", "handler": "static_response" } ] } ] }, { "match": { "headers": { "X-Another": [ "*" ] } }, "routes": [ { "handle": [ { "body": "Header X-Another!", "handler": "static_response" } ] } ] }, { "match": { "status_code": [ 401 ] }, "routes": [ { "handle": [ { "body": "Status 401!", "handler": "static_response" } ] } ] }, { "match": { "status_code": [ 403 ] }, "routes": [ { "handle": [ { "body": "Status 403!", "handler": "static_response" } ] } ] }, { "match": { "headers": { "Bar": [ "*" ], "Foo": [ "*" ] }, "status_code": [ 401, 403, 404 ] }, "routes": [ { "handle": [ { "body": "Headers Foo, Bar AND statuses 401, 403 and 404!", "handler": "static_response" } ] } ] }, { "match": { "status_code": [ 200 ] }, "routes": [ { "handle": [ { "handler": "copy_response_headers", "include": [ "Foo", "Bar" ] }, { "body": "Copied headers from the response", "handler": "static_response" } ] } ] }, { "match": { "status_code": [ 201 ] }, "routes": [ { "handle": [ { "handler": "headers", "response": { "set": { "Foo": [ "Copying the response" ] } } }, { "handler": "copy_response", "status_code": 404 } ] } ] }, { "status_code": "{http.error.status_code}" }, { "routes": [ { "handle": [ { "body": "Any! This should be last in the JSON!", "handler": "static_response" } ] } ] } ], "handler": "reverse_proxy", "upstreams": [ { "dial": "127.0.0.1:65535" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_health_headers.caddyfiletest ================================================ :8884 reverse_proxy 127.0.0.1:65535 { health_headers { Host example.com X-Header-Key 95ca39e3cbe7 X-Header-Keys VbG4NZwWnipo 335Q9/MhqcNU3s2TO X-Empty-Value Same-Key 1 Same-Key 2 X-System-Hostname {system.hostname} } health_uri /health } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "health_checks": { "active": { "headers": { "Host": [ "example.com" ], "Same-Key": [ "1", "2" ], "X-Empty-Value": [ "" ], "X-Header-Key": [ "95ca39e3cbe7" ], "X-Header-Keys": [ "VbG4NZwWnipo", "335Q9/MhqcNU3s2TO" ], "X-System-Hostname": [ "{system.hostname}" ] }, "uri": "/health" } }, "upstreams": [ { "dial": "127.0.0.1:65535" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_health_method.caddyfiletest ================================================ :8884 reverse_proxy 127.0.0.1:65535 { health_uri /health health_method HEAD } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "health_checks": { "active": { "method": "HEAD", "uri": "/health" } }, "upstreams": [ { "dial": "127.0.0.1:65535" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_health_path_query.caddyfiletest ================================================ # Health with query in the uri :8443 { reverse_proxy localhost:54321 { health_uri /health?ready=1 health_status 2xx } } # Health without query in the uri :8444 { reverse_proxy localhost:54321 { health_uri /health health_status 200 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8443" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "health_checks": { "active": { "expect_status": 2, "uri": "/health?ready=1" } }, "upstreams": [ { "dial": "localhost:54321" } ] } ] } ] }, "srv1": { "listen": [ ":8444" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "health_checks": { "active": { "expect_status": 200, "uri": "/health" } }, "upstreams": [ { "dial": "localhost:54321" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_health_reqbody.caddyfiletest ================================================ :8884 reverse_proxy 127.0.0.1:65535 { health_uri /health health_request_body "test body" } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "health_checks": { "active": { "body": "test body", "uri": "/health" } }, "upstreams": [ { "dial": "127.0.0.1:65535" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_http_transport_forward_proxy_url.txt ================================================ :8884 reverse_proxy 127.0.0.1:65535 { transport http { forward_proxy_url http://localhost:8080 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "transport": { "network_proxy": { "from": "url", "url": "http://localhost:8080" }, "protocol": "http" }, "upstreams": [ { "dial": "127.0.0.1:65535" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_http_transport_none_proxy.txt ================================================ :8884 reverse_proxy 127.0.0.1:65535 { transport http { network_proxy none } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "transport": { "network_proxy": { "from": "none" }, "protocol": "http" }, "upstreams": [ { "dial": "127.0.0.1:65535" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_http_transport_tls_file_cert.txt ================================================ :8884 reverse_proxy 127.0.0.1:65535 { transport http { tls_trust_pool file { pem_file ../caddy.ca.cer } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "transport": { "protocol": "http", "tls": { "ca": { "pem_files": [ "../caddy.ca.cer" ], "provider": "file" } } }, "upstreams": [ { "dial": "127.0.0.1:65535" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_http_transport_tls_inline_cert.txt ================================================ :8884 reverse_proxy 127.0.0.1:65535 { transport http { tls_trust_pool inline { trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ== } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "transport": { "protocol": "http", "tls": { "ca": { "provider": "inline", "trusted_ca_certs": [ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==" ] } } }, "upstreams": [ { "dial": "127.0.0.1:65535" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_http_transport_url_proxy.txt ================================================ :8884 reverse_proxy 127.0.0.1:65535 { transport http { network_proxy url http://localhost:8080 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "transport": { "network_proxy": { "from": "url", "url": "http://localhost:8080" }, "protocol": "http" }, "upstreams": [ { "dial": "127.0.0.1:65535" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_load_balance.caddyfiletest ================================================ :8884 reverse_proxy 127.0.0.1:65535 { lb_policy first lb_retries 5 lb_try_duration 10s lb_try_interval 500ms lb_retry_match { path /foo* method POST } lb_retry_match path /bar* } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "load_balancing": { "retries": 5, "retry_match": [ { "method": [ "POST" ], "path": [ "/foo*" ] }, { "path": [ "/bar*" ] } ], "selection_policy": { "policy": "first" }, "try_duration": 10000000000, "try_interval": 500000000 }, "upstreams": [ { "dial": "127.0.0.1:65535" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_load_balance_wrr.caddyfiletest ================================================ :8884 reverse_proxy 127.0.0.1:65535 127.0.0.1:35535 { lb_policy weighted_round_robin 10 1 lb_retries 5 lb_try_duration 10s lb_try_interval 500ms lb_retry_match { path /foo* method POST } lb_retry_match path /bar* } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "load_balancing": { "retries": 5, "retry_match": [ { "method": [ "POST" ], "path": [ "/foo*" ] }, { "path": [ "/bar*" ] } ], "selection_policy": { "policy": "weighted_round_robin", "weights": [ 10, 1 ] }, "try_duration": 10000000000, "try_interval": 500000000 }, "upstreams": [ { "dial": "127.0.0.1:65535" }, { "dial": "127.0.0.1:35535" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_localaddr.caddyfiletest ================================================ https://example.com { reverse_proxy http://localhost:54321 { transport http { local_address 192.168.0.1 } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "transport": { "local_address": "192.168.0.1", "protocol": "http" }, "upstreams": [ { "dial": "localhost:54321" } ] } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_options.caddyfiletest ================================================ https://example.com { reverse_proxy /path https://localhost:54321 { header_up Host {upstream_hostport} header_up Foo bar method GET rewrite /rewritten?uri={uri} request_buffers 4KB transport http { read_buffer 10MB write_buffer 20MB max_response_header 30MB dial_timeout 3s dial_fallback_delay 5s response_header_timeout 8s expect_continue_timeout 9s resolvers 8.8.8.8 8.8.4.4 versions h2c 2 compression off max_conns_per_host 5 keepalive_idle_conns_per_host 2 keepalive_interval 30s tls_renegotiation freely tls_except_ports 8181 8182 } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "headers": { "request": { "set": { "Foo": [ "bar" ], "Host": [ "{http.reverse_proxy.upstream.hostport}" ] } } }, "request_buffers": 4000, "rewrite": { "method": "GET", "uri": "/rewritten?uri={http.request.uri}" }, "transport": { "compression": false, "dial_fallback_delay": 5000000000, "dial_timeout": 3000000000, "expect_continue_timeout": 9000000000, "keep_alive": { "max_idle_conns_per_host": 2, "probe_interval": 30000000000 }, "max_conns_per_host": 5, "max_response_header_size": 30000000, "protocol": "http", "read_buffer_size": 10000000, "resolver": { "addresses": [ "8.8.8.8", "8.8.4.4" ] }, "response_header_timeout": 8000000000, "tls": { "except_ports": [ "8181", "8182" ], "renegotiation": "freely" }, "versions": [ "h2c", "2" ], "write_buffer_size": 20000000 }, "upstreams": [ { "dial": "localhost:54321" } ] } ], "match": [ { "path": [ "/path" ] } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_port_range.caddyfiletest ================================================ :8884 { # Port range reverse_proxy localhost:8001-8002 # Port range with placeholder reverse_proxy {host}:8001-8002 # Port range with scheme reverse_proxy https://localhost:8001-8002 } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "upstreams": [ { "dial": "localhost:8001" }, { "dial": "localhost:8002" } ] }, { "handler": "reverse_proxy", "upstreams": [ { "dial": "{http.request.host}:8001" }, { "dial": "{http.request.host}:8002" } ] }, { "handler": "reverse_proxy", "transport": { "protocol": "http", "tls": {} }, "upstreams": [ { "dial": "localhost:8001" }, { "dial": "localhost:8002" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_trusted_proxies.caddyfiletest ================================================ :8884 reverse_proxy 127.0.0.1:65535 { trusted_proxies 127.0.0.1 } reverse_proxy 127.0.0.1:65535 { trusted_proxies private_ranges } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "trusted_proxies": [ "127.0.0.1" ], "upstreams": [ { "dial": "127.0.0.1:65535" } ] }, { "handler": "reverse_proxy", "trusted_proxies": [ "192.168.0.0/16", "172.16.0.0/12", "10.0.0.0/8", "127.0.0.1/8", "fd00::/8", "::1" ], "upstreams": [ { "dial": "127.0.0.1:65535" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_trusted_proxies_unix.caddyfiletest ================================================ { servers { trusted_proxies_unix } } example.com { reverse_proxy https://local:8080 } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "transport": { "protocol": "http", "tls": {} }, "upstreams": [ { "dial": "local:8080" } ] } ] } ] } ], "terminal": true } ], "trusted_proxies_unix": true } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/reverse_proxy_upstream_placeholder.caddyfiletest ================================================ :8884 { map {host} {upstream} { foo.example.com 1.2.3.4 default 2.3.4.5 } # Upstream placeholder with a port should retain the port reverse_proxy {upstream}:80 } :8885 { map {host} {upstream} { foo.example.com 1.2.3.4:8080 default 2.3.4.5:8080 } # Upstream placeholder with no port should not have a port joined reverse_proxy {upstream} } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8884" ], "routes": [ { "handle": [ { "defaults": [ "2.3.4.5" ], "destinations": [ "{upstream}" ], "handler": "map", "mappings": [ { "input": "foo.example.com", "outputs": [ "1.2.3.4" ] } ], "source": "{http.request.host}" }, { "handler": "reverse_proxy", "upstreams": [ { "dial": "{upstream}:80" } ] } ] } ] }, "srv1": { "listen": [ ":8885" ], "routes": [ { "handle": [ { "defaults": [ "2.3.4.5:8080" ], "destinations": [ "{upstream}" ], "handler": "map", "mappings": [ { "input": "foo.example.com", "outputs": [ "1.2.3.4:8080" ] } ], "source": "{http.request.host}" }, { "handler": "reverse_proxy", "upstreams": [ { "dial": "{upstream}" } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/rewrite_directive_permutations.caddyfiletest ================================================ :8080 # With explicit wildcard matcher route { rewrite * /a } # With path matcher route { rewrite /path /b } # With named matcher route { @named method GET rewrite @named /c } # With no matcher, assumed to be wildcard route { rewrite /d } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8080" ], "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "group": "group0", "handle": [ { "handler": "rewrite", "uri": "/a" } ] } ] }, { "handler": "subroute", "routes": [ { "group": "group1", "handle": [ { "handler": "rewrite", "uri": "/b" } ], "match": [ { "path": [ "/path" ] } ] } ] }, { "handler": "subroute", "routes": [ { "group": "group2", "handle": [ { "handler": "rewrite", "uri": "/c" } ], "match": [ { "method": [ "GET" ] } ] } ] }, { "handler": "subroute", "routes": [ { "group": "group3", "handle": [ { "handler": "rewrite", "uri": "/d" } ] } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/root_directive_permutations.caddyfiletest ================================================ :8080 # With explicit wildcard matcher route { root * /a } # With path matcher route { root /path /b } # With named matcher route { @named method GET root @named /c } # With no matcher, assumed to be wildcard route { root /d } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8080" ], "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "vars", "root": "/a" } ] } ] }, { "handler": "subroute", "routes": [ { "handle": [ { "handler": "vars", "root": "/b" } ], "match": [ { "path": [ "/path" ] } ] } ] }, { "handler": "subroute", "routes": [ { "handle": [ { "handler": "vars", "root": "/c" } ], "match": [ { "method": [ "GET" ] } ] } ] }, { "handler": "subroute", "routes": [ { "handle": [ { "handler": "vars", "root": "/d" } ] } ] } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/server_names.caddyfiletest ================================================ { servers :443 { name https } servers :8000 { name app1 } servers :8001 { name app2 } servers 123.123.123.123:8002 { name bind-server } } example.com { } :8000 { } :8001, :8002 { } :8002 { bind 123.123.123.123 222.222.222.222 } ---------- { "apps": { "http": { "servers": { "app1": { "listen": [ ":8000" ] }, "app2": { "listen": [ ":8001" ] }, "bind-server": { "listen": [ "123.123.123.123:8002", "222.222.222.222:8002" ] }, "https": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] }, "srv4": { "listen": [ ":8002" ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/shorthand_parameterized_placeholders.caddyfiletest ================================================ localhost:80 respond * "{header.content-type} {labels.0} {query.p} {path.0} {re.name.0}" @match path_regexp ^/foo(.*)$ respond @match "{re.1}" ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "{http.regexp.1}", "handler": "static_response" } ], "match": [ { "path_regexp": { "name": "match", "pattern": "^/foo(.*)$" } } ] }, { "handle": [ { "body": "{http.request.header.content-type} {http.request.host.labels.0} {http.request.uri.query.p} {http.request.uri.path.0} {http.regexp.name.0}", "handler": "static_response" } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/site_address_invalid_port.caddyfiletest ================================================ :70000 handle { respond "should not work" } ---------- port 70000 is out of range ================================================ FILE: caddytest/integration/caddyfile_adapt/site_address_negative_port.caddyfiletest ================================================ :-1 handle { respond "should not work" } ---------- port -1 is out of range ================================================ FILE: caddytest/integration/caddyfile_adapt/site_address_unsupported_scheme.caddyfiletest ================================================ foo://example.com handle { respond "hello" } ---------- unsupported URL scheme foo:// ================================================ FILE: caddytest/integration/caddyfile_adapt/site_address_wss_invalid_port.caddyfiletest ================================================ wss://example.com:70000 handle { respond "should not work" } ---------- port 70000 is out of range ================================================ FILE: caddytest/integration/caddyfile_adapt/site_address_wss_scheme.caddyfiletest ================================================ wss://example.com handle { respond "hello" } ---------- the scheme wss:// is only supported in browsers; use https:// instead ================================================ FILE: caddytest/integration/caddyfile_adapt/site_block_sorting.caddyfiletest ================================================ # https://caddy.community/t/caddy-suddenly-directs-my-site-to-the-wrong-directive/11597/2 abcdef { respond "abcdef" } abcdefg { respond "abcdefg" } abc { respond "abc" } abcde, http://abcde { respond "abcde" } :443, ab { respond "443 or ab" } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "abcdefg" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "abcdefg", "handler": "static_response" } ] } ] } ], "terminal": true }, { "match": [ { "host": [ "abcdef" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "abcdef", "handler": "static_response" } ] } ] } ], "terminal": true }, { "match": [ { "host": [ "abcde" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "abcde", "handler": "static_response" } ] } ] } ], "terminal": true }, { "match": [ { "host": [ "abc" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "abc", "handler": "static_response" } ] } ] } ], "terminal": true }, { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "443 or ab", "handler": "static_response" } ] } ] } ], "terminal": true } ] }, "srv1": { "listen": [ ":80" ], "routes": [ { "match": [ { "host": [ "abcde" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "abcde", "handler": "static_response" } ] } ] } ], "terminal": true } ] } } }, "tls": { "certificates": { "automate": [ "ab" ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/sort_directives_with_any_matcher_first.caddyfiletest ================================================ :80 respond 200 @untrusted not remote_ip 10.1.1.0/24 respond @untrusted 401 ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "match": [ { "not": [ { "remote_ip": { "ranges": [ "10.1.1.0/24" ] } } ] } ], "handle": [ { "handler": "static_response", "status_code": 401 } ] }, { "handle": [ { "handler": "static_response", "status_code": 200 } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/sort_directives_within_handle.caddyfiletest ================================================ *.example.com { @foo host foo.example.com handle @foo { handle_path /strip { respond "this should be first" } handle_path /strip* { respond "this should be second" } handle { respond "this should be last" } } handle { respond "this should be last" } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "*.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "group": "group6", "handle": [ { "handler": "subroute", "routes": [ { "group": "group3", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "rewrite", "strip_path_prefix": "/strip" } ] }, { "handle": [ { "body": "this should be first", "handler": "static_response" } ] } ] } ], "match": [ { "path": [ "/strip" ] } ] }, { "group": "group3", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "rewrite", "strip_path_prefix": "/strip" } ] }, { "handle": [ { "body": "this should be second", "handler": "static_response" } ] } ] } ], "match": [ { "path": [ "/strip*" ] } ] }, { "group": "group3", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "this should be last", "handler": "static_response" } ] } ] } ] } ] } ], "match": [ { "host": [ "foo.example.com" ] } ] }, { "group": "group6", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "this should be last", "handler": "static_response" } ] } ] } ] } ] } ], "terminal": true } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/sort_vars_in_reverse.caddyfiletest ================================================ :80 vars /foobar foo last vars /foo foo middle-last vars /foo* foo middle-first vars * foo first ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "handle": [ { "foo": "first", "handler": "vars" } ] }, { "match": [ { "path": [ "/foo*" ] } ], "handle": [ { "foo": "middle-first", "handler": "vars" } ] }, { "match": [ { "path": [ "/foo" ] } ], "handle": [ { "foo": "middle-last", "handler": "vars" } ] }, { "match": [ { "path": [ "/foobar" ] } ], "handle": [ { "foo": "last", "handler": "vars" } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_acme_dns_override_global_dns.caddyfiletest ================================================ { dns mock foo acme_dns mock bar } localhost { tls { resolvers 8.8.8.8 8.8.4.4 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "localhost" ], "issuers": [ { "challenges": { "dns": { "provider": { "argument": "bar", "name": "mock" }, "resolvers": [ "8.8.8.8", "8.8.4.4" ] } }, "module": "acme" } ] }, { "issuers": [ { "challenges": { "dns": { "provider": { "argument": "bar", "name": "mock" } } }, "module": "acme" } ] } ] }, "dns": { "argument": "foo", "name": "mock" } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_acme_preferred_chains.caddyfiletest ================================================ localhost tls { issuer acme { preferred_chains { any_common_name "Generic CA 1" "Generic CA 2" } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "localhost" ], "issuers": [ { "module": "acme", "preferred_chains": { "any_common_name": [ "Generic CA 1", "Generic CA 2" ] } } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_automation_policies_1.caddyfiletest ================================================ { local_certs } *.tld, *.*.tld { tls { on_demand } } foo.tld, www.foo.tld { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "foo.tld", "www.foo.tld" ] } ], "terminal": true }, { "match": [ { "host": [ "*.tld", "*.*.tld" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "foo.tld", "www.foo.tld" ], "issuers": [ { "module": "internal" } ] }, { "subjects": [ "*.*.tld", "*.tld" ], "issuers": [ { "module": "internal" } ], "on_demand": true }, { "issuers": [ { "module": "internal" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_automation_policies_10.caddyfiletest ================================================ # example from issue #4667 { auto_https off } https://, example.com { tls test.crt test.key respond "Hello World" } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "handle": [ { "body": "Hello World", "handler": "static_response" } ] } ], "tls_connection_policies": [ { "certificate_selection": { "any_tag": [ "cert0" ] } } ], "automatic_https": { "disable": true } } } }, "tls": { "certificates": { "load_files": [ { "certificate": "test.crt", "key": "test.key", "tags": [ "cert0" ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_automation_policies_11.caddyfiletest ================================================ # example from https://caddy.community/t/21415 a.com { tls { get_certificate http http://foo.com/get } } b.com { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "a.com" ] } ], "terminal": true }, { "match": [ { "host": [ "b.com" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "a.com" ], "get_certificate": [ { "url": "http://foo.com/get", "via": "http" } ] }, { "subjects": [ "b.com" ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_automation_policies_2.caddyfiletest ================================================ # issue #3953 { cert_issuer zerossl api_key } example.com { tls { on_demand key_type rsa2048 } } http://example.net { } :1234 { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":1234" ] }, "srv1": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] }, "srv2": { "listen": [ ":80" ], "routes": [ { "match": [ { "host": [ "example.net" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "example.com" ], "issuers": [ { "api_key": "api_key", "module": "zerossl" } ], "key_type": "rsa2048", "on_demand": true }, { "issuers": [ { "api_key": "api_key", "module": "zerossl" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_automation_policies_3.caddyfiletest ================================================ # https://caddy.community/t/caddyfile-having-individual-sites-differ-from-global-options/11297 { local_certs } a.example.com { tls internal } b.example.com { tls abc@example.com } c.example.com { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "a.example.com" ] } ], "terminal": true }, { "match": [ { "host": [ "b.example.com" ] } ], "terminal": true }, { "match": [ { "host": [ "c.example.com" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "b.example.com" ], "issuers": [ { "email": "abc@example.com", "module": "acme" }, { "ca": "https://acme.zerossl.com/v2/DV90", "email": "abc@example.com", "module": "acme" } ] }, { "issuers": [ { "module": "internal" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_automation_policies_4.caddyfiletest ================================================ { email my.email@example.com } :82 { redir https://example.com{uri} } :83 { redir https://example.com{uri} } :84 { redir https://example.com{uri} } abc.de { redir https://example.com{uri} } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "abc.de" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "static_response", "headers": { "Location": [ "https://example.com{http.request.uri}" ] }, "status_code": 302 } ] } ] } ], "terminal": true } ] }, "srv1": { "listen": [ ":82" ], "routes": [ { "handle": [ { "handler": "static_response", "headers": { "Location": [ "https://example.com{http.request.uri}" ] }, "status_code": 302 } ] } ] }, "srv2": { "listen": [ ":83" ], "routes": [ { "handle": [ { "handler": "static_response", "headers": { "Location": [ "https://example.com{http.request.uri}" ] }, "status_code": 302 } ] } ] }, "srv3": { "listen": [ ":84" ], "routes": [ { "handle": [ { "handler": "static_response", "headers": { "Location": [ "https://example.com{http.request.uri}" ] }, "status_code": 302 } ] } ] } } }, "tls": { "automation": { "policies": [ { "issuers": [ { "email": "my.email@example.com", "module": "acme" }, { "ca": "https://acme.zerossl.com/v2/DV90", "email": "my.email@example.com", "module": "acme" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_automation_policies_5.caddyfiletest ================================================ a.example.com { } b.example.com { } :443 { tls { on_demand } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "a.example.com" ] } ], "terminal": true }, { "match": [ { "host": [ "b.example.com" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "a.example.com", "b.example.com" ] }, { "on_demand": true } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_automation_policies_6.caddyfiletest ================================================ # (this Caddyfile is contrived, but based on issue #4161) example.com { tls { ca https://foobar } } example.com:8443 { tls { ca https://foobar } } example.com:8444 { tls { ca https://foobar } } example.com:8445 { tls { ca https://foobar } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] }, "srv1": { "listen": [ ":8443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] }, "srv2": { "listen": [ ":8444" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] }, "srv3": { "listen": [ ":8445" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "example.com" ], "issuers": [ { "ca": "https://foobar", "module": "acme" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_automation_policies_7.caddyfiletest ================================================ # (this Caddyfile is contrived, but based on issues #4176 and #4198) http://example.com { } https://example.com { tls internal } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] }, "srv1": { "listen": [ ":80" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "example.com" ], "issuers": [ { "module": "internal" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_automation_policies_8.caddyfiletest ================================================ # (this Caddyfile is contrived, but based on issues #4176 and #4198) http://example.com { } https://example.com { tls abc@example.com } http://localhost:8081 { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] }, "srv1": { "listen": [ ":80" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true } ] }, "srv2": { "listen": [ ":8081" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ], "automatic_https": { "skip": [ "localhost" ] } } } }, "tls": { "automation": { "policies": [ { "subjects": [ "example.com" ], "issuers": [ { "email": "abc@example.com", "module": "acme" }, { "ca": "https://acme.zerossl.com/v2/DV90", "email": "abc@example.com", "module": "acme" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_automation_policies_9.caddyfiletest ================================================ # example from issue #4640 http://foo:8447, http://127.0.0.1:8447 { reverse_proxy 127.0.0.1:8080 } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":8447" ], "routes": [ { "match": [ { "host": [ "foo", "127.0.0.1" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "upstreams": [ { "dial": "127.0.0.1:8080" } ] } ] } ] } ], "terminal": true } ], "automatic_https": { "skip": [ "foo", "127.0.0.1" ] } } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_automation_policies_global_email_localhost.caddyfiletest ================================================ { email foo@bar } localhost { } example.com { } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "example.com" ] } ], "terminal": true }, { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "example.com" ], "issuers": [ { "email": "foo@bar", "module": "acme" }, { "ca": "https://acme.zerossl.com/v2/DV90", "email": "foo@bar", "module": "acme" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_automation_wildcard_force_automate.caddyfiletest ================================================ automated1.example.com { tls force_automate respond "Automated!" } automated2.example.com { tls force_automate respond "Automated!" } shadowed.example.com { respond "Shadowed!" } *.example.com { tls cert.pem key.pem respond "Wildcard!" } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "automated1.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Automated!", "handler": "static_response" } ] } ] } ], "terminal": true }, { "match": [ { "host": [ "automated2.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Automated!", "handler": "static_response" } ] } ] } ], "terminal": true }, { "match": [ { "host": [ "shadowed.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Shadowed!", "handler": "static_response" } ] } ] } ], "terminal": true }, { "match": [ { "host": [ "*.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Wildcard!", "handler": "static_response" } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "match": { "sni": [ "automated1.example.com", "automated2.example.com" ] } }, { "match": { "sni": [ "*.example.com" ] }, "certificate_selection": { "any_tag": [ "cert0" ] } }, {} ] } } }, "tls": { "certificates": { "automate": [ "automated1.example.com", "automated2.example.com" ], "load_files": [ { "certificate": "cert.pem", "key": "key.pem", "tags": [ "cert0" ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_automation_wildcard_shadowing.caddyfiletest ================================================ subdomain.example.com { respond "Subdomain!" } *.example.com { tls cert.pem key.pem respond "Wildcard!" } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "subdomain.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Subdomain!", "handler": "static_response" } ] } ] } ], "terminal": true }, { "match": [ { "host": [ "*.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Wildcard!", "handler": "static_response" } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "match": { "sni": [ "*.example.com" ] }, "certificate_selection": { "any_tag": [ "cert0" ] } }, {} ] } } }, "tls": { "certificates": { "load_files": [ { "certificate": "cert.pem", "key": "key.pem", "tags": [ "cert0" ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_client_auth_cert_file-legacy-with-verifier.caddyfiletest ================================================ localhost respond "hello from localhost" tls { client_auth { mode request trusted_ca_cert_file ../caddy.ca.cer verifier dummy } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "match": { "sni": [ "localhost" ] }, "client_authentication": { "ca": { "provider": "inline", "trusted_ca_certs": [ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==" ] }, "verifiers": [ { "verifier": "dummy" } ], "mode": "request" } }, {} ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_client_auth_cert_file-legacy.caddyfiletest ================================================ localhost respond "hello from localhost" tls { client_auth { mode request trusted_ca_cert_file ../caddy.ca.cer } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "match": { "sni": [ "localhost" ] }, "client_authentication": { "ca": { "provider": "inline", "trusted_ca_certs": [ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==" ] }, "mode": "request" } }, {} ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_client_auth_cert_file.caddyfiletest ================================================ localhost respond "hello from localhost" tls { client_auth { mode request trust_pool file { pem_file ../caddy.ca.cer } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "match": { "sni": [ "localhost" ] }, "client_authentication": { "ca": { "pem_files": [ "../caddy.ca.cer" ], "provider": "file" }, "mode": "request" } }, {} ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_client_auth_inline_cert-legacy.caddyfiletest ================================================ localhost respond "hello from localhost" tls { client_auth { mode request trusted_ca_cert MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ== } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "match": { "sni": [ "localhost" ] }, "client_authentication": { "ca": { "provider": "inline", "trusted_ca_certs": [ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==" ] }, "mode": "request" } }, {} ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_client_auth_inline_cert.caddyfiletest ================================================ localhost respond "hello from localhost" tls { client_auth { mode request trust_pool inline { trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ== } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "match": { "sni": [ "localhost" ] }, "client_authentication": { "ca": { "provider": "inline", "trusted_ca_certs": [ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==" ] }, "mode": "request" } }, {} ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_client_auth_inline_cert_with_leaf_trust.caddyfiletest ================================================ localhost respond "hello from localhost" tls { client_auth { mode request trust_pool inline { trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ== } trusted_leaf_cert_file ../caddy.ca.cer } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "match": { "sni": [ "localhost" ] }, "client_authentication": { "ca": { "provider": "inline", "trusted_ca_certs": [ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==" ] }, "trusted_leaf_certs": [ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==" ], "mode": "request" } }, {} ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_client_auth_leaf_verifier_file_loader_block.caddyfiletest ================================================ localhost respond "hello from localhost" tls { client_auth { mode request trust_pool inline { trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ== } verifier leaf { file ../caddy.ca.cer } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "match": { "sni": [ "localhost" ] }, "client_authentication": { "ca": { "provider": "inline", "trusted_ca_certs": [ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==" ] }, "verifiers": [ { "leaf_certs_loaders": [ { "files": [ "../caddy.ca.cer" ], "loader": "file" } ], "verifier": "leaf" } ], "mode": "request" } }, {} ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_client_auth_leaf_verifier_file_loader_inline.caddyfiletest ================================================ localhost respond "hello from localhost" tls { client_auth { mode request trust_pool inline { trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ== } verifier leaf file ../caddy.ca.cer } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "match": { "sni": [ "localhost" ] }, "client_authentication": { "ca": { "provider": "inline", "trusted_ca_certs": [ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==" ] }, "verifiers": [ { "leaf_certs_loaders": [ { "files": [ "../caddy.ca.cer" ], "loader": "file" } ], "verifier": "leaf" } ], "mode": "request" } }, {} ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_client_auth_leaf_verifier_file_loader_multi-in-block.caddyfiletest ================================================ localhost respond "hello from localhost" tls { client_auth { mode request trust_pool inline { trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ== } verifier leaf { file ../caddy.ca.cer file ../caddy.ca.cer } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "match": { "sni": [ "localhost" ] }, "client_authentication": { "ca": { "provider": "inline", "trusted_ca_certs": [ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==" ] }, "verifiers": [ { "leaf_certs_loaders": [ { "files": [ "../caddy.ca.cer" ], "loader": "file" }, { "files": [ "../caddy.ca.cer" ], "loader": "file" } ], "verifier": "leaf" } ], "mode": "request" } }, {} ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_client_auth_leaf_verifier_folder_loader_block.caddyfiletest ================================================ localhost respond "hello from localhost" tls { client_auth { mode request trust_pool inline { trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ== } verifier leaf { folder ../ } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "match": { "sni": [ "localhost" ] }, "client_authentication": { "ca": { "provider": "inline", "trusted_ca_certs": [ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==" ] }, "verifiers": [ { "leaf_certs_loaders": [ { "folders": [ "../" ], "loader": "folder" } ], "verifier": "leaf" } ], "mode": "request" } }, {} ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_client_auth_leaf_verifier_folder_loader_inline.caddyfiletest ================================================ localhost respond "hello from localhost" tls { client_auth { mode request trust_pool inline { trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ== } verifier leaf folder ../ } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "match": { "sni": [ "localhost" ] }, "client_authentication": { "ca": { "provider": "inline", "trusted_ca_certs": [ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==" ] }, "verifiers": [ { "leaf_certs_loaders": [ { "folders": [ "../" ], "loader": "folder" } ], "verifier": "leaf" } ], "mode": "request" } }, {} ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_client_auth_leaf_verifier_folder_loader_multi-in-block.caddyfiletest ================================================ localhost respond "hello from localhost" tls { client_auth { mode request trust_pool inline { trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ== } verifier leaf { folder ../ folder ../ } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "match": { "sni": [ "localhost" ] }, "client_authentication": { "ca": { "provider": "inline", "trusted_ca_certs": [ "MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==" ] }, "verifiers": [ { "leaf_certs_loaders": [ { "folders": [ "../" ], "loader": "folder" }, { "folders": [ "../" ], "loader": "folder" } ], "verifier": "leaf" } ], "mode": "request" } }, {} ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_conn_policy_consolidate.caddyfiletest ================================================ # https://github.com/caddyserver/caddy/issues/3906 a.a { tls internal respond 403 } http://b.b https://b.b:8443 { tls internal respond 404 } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "a.a" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "static_response", "status_code": 403 } ] } ] } ], "terminal": true } ] }, "srv1": { "listen": [ ":80" ], "routes": [ { "match": [ { "host": [ "b.b" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "static_response", "status_code": 404 } ] } ] } ], "terminal": true } ] }, "srv2": { "listen": [ ":8443" ], "routes": [ { "match": [ { "host": [ "b.b" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "static_response", "status_code": 404 } ] } ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "a.a", "b.b" ], "issuers": [ { "module": "internal" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_dns_multiple_options_without_provider.caddyfiletest ================================================ localhost tls { propagation_delay 10s dns_ttl 5m } ---------- parsing caddyfile tokens for 'tls': setting DNS challenge options [propagation_delay, dns_ttl] requires a DNS provider (set with the 'dns' subdirective or 'acme_dns' global option), at Caddyfile:6 ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_dns_override_acme_dns.caddyfiletest ================================================ { acme_dns mock foo } localhost { tls { dns mock bar resolvers 8.8.8.8 8.8.4.4 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "localhost" ], "issuers": [ { "challenges": { "dns": { "provider": { "argument": "bar", "name": "mock" }, "resolvers": [ "8.8.8.8", "8.8.4.4" ] } }, "module": "acme" } ] }, { "issuers": [ { "challenges": { "dns": { "provider": { "argument": "foo", "name": "mock" } } }, "module": "acme" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_dns_override_global_dns.caddyfiletest ================================================ { dns mock foo } localhost { tls { dns mock bar resolvers 8.8.8.8 8.8.4.4 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "localhost" ], "issuers": [ { "challenges": { "dns": { "provider": { "argument": "bar", "name": "mock" }, "resolvers": [ "8.8.8.8", "8.8.4.4" ] } }, "module": "acme" } ] } ] }, "dns": { "argument": "foo", "name": "mock" } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_dns_propagation_timeout_without_provider.caddyfiletest ================================================ :443 { tls { propagation_timeout 30s } } ---------- parsing caddyfile tokens for 'tls': setting DNS challenge options [propagation_timeout] requires a DNS provider (set with the 'dns' subdirective or 'acme_dns' global option), at Caddyfile:4 ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_dns_propagation_without_provider.caddyfiletest ================================================ :443 { tls { propagation_delay 30s } } ---------- parsing caddyfile tokens for 'tls': setting DNS challenge options [propagation_delay] requires a DNS provider (set with the 'dns' subdirective or 'acme_dns' global option), at Caddyfile:4 ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_dns_resolvers_with_global_provider.caddyfiletest ================================================ { acme_dns mock } localhost { tls { resolvers 8.8.8.8 8.8.4.4 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "localhost" ], "issuers": [ { "challenges": { "dns": { "provider": { "name": "mock" }, "resolvers": [ "8.8.8.8", "8.8.4.4" ] } }, "module": "acme" } ] }, { "issuers": [ { "challenges": { "dns": { "provider": { "name": "mock" } } }, "module": "acme" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_dns_ttl.caddyfiletest ================================================ localhost respond "hello from localhost" tls { dns mock dns_ttl 5m10s } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "localhost" ], "issuers": [ { "challenges": { "dns": { "provider": { "name": "mock" }, "ttl": 310000000000 } }, "module": "acme" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_explicit_issuer_dns_ttl.caddyfiletest ================================================ localhost respond "hello from localhost" tls { issuer acme { dns_ttl 5m10s } issuer zerossl api_key { dns_ttl 10m20s } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "localhost" ], "issuers": [ { "challenges": { "dns": { "ttl": 310000000000 } }, "module": "acme" }, { "api_key": "api_key", "cname_validation": { "ttl": 620000000000 }, "module": "zerossl" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_explicit_issuer_propagation_options.caddyfiletest ================================================ localhost respond "hello from localhost" tls { issuer acme { propagation_delay 5m10s propagation_timeout 10m20s } issuer zerossl api_key { propagation_delay 5m30s propagation_timeout -1 } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "localhost" ], "issuers": [ { "challenges": { "dns": { "propagation_delay": 310000000000, "propagation_timeout": 620000000000 } }, "module": "acme" }, { "api_key": "api_key", "cname_validation": { "propagation_delay": 330000000000, "propagation_timeout": -1 }, "module": "zerossl" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_internal_options.caddyfiletest ================================================ a.example.com { tls { issuer internal { ca foo lifetime 24h sign_with_root } } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "a.example.com" ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "a.example.com" ], "issuers": [ { "ca": "foo", "lifetime": 86400000000000, "module": "internal", "sign_with_root": true } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tls_propagation_options.caddyfiletest ================================================ localhost respond "hello from localhost" tls { dns mock propagation_delay 5m10s propagation_timeout 10m20s } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from localhost", "handler": "static_response" } ] } ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "localhost" ], "issuers": [ { "challenges": { "dns": { "propagation_delay": 310000000000, "propagation_timeout": 620000000000, "provider": { "name": "mock" } } }, "module": "acme" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/tracing.caddyfiletest ================================================ :80 { tracing /myhandler { span my-span } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "match": [ { "path": [ "/myhandler" ] } ], "handle": [ { "handler": "tracing", "span": "my-span" } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/uri_query_operations.caddyfiletest ================================================ :9080 uri query +foo bar uri query -baz uri query taz test uri query key=value example uri query changethis>changed uri query { findme value replacement +foo1 baz } respond "{query}" ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":9080" ], "routes": [ { "handle": [ { "handler": "rewrite", "query": { "add": [ { "key": "foo", "val": "bar" } ] } }, { "handler": "rewrite", "query": { "delete": [ "baz" ] } }, { "handler": "rewrite", "query": { "set": [ { "key": "taz", "val": "test" } ] } }, { "handler": "rewrite", "query": { "set": [ { "key": "key=value", "val": "example" } ] } }, { "handler": "rewrite", "query": { "rename": [ { "key": "changethis", "val": "changed" } ] } }, { "handler": "rewrite", "query": { "add": [ { "key": "foo1", "val": "baz" } ], "replace": [ { "key": "findme", "replace": "replacement", "search_regexp": "value" } ] } }, { "body": "{http.request.uri.query}", "handler": "static_response" } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/uri_replace_brace_escape.caddyfiletest ================================================ :9080 uri replace "\}" %7D uri replace "\{" %7B respond "{query}" ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":9080" ], "routes": [ { "handle": [ { "handler": "rewrite", "uri_substring": [ { "find": "\\}", "replace": "%7D" } ] }, { "handler": "rewrite", "uri_substring": [ { "find": "\\{", "replace": "%7B" } ] }, { "body": "{http.request.uri.query}", "handler": "static_response" } ] } ] } } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt/wildcard_pattern.caddyfiletest ================================================ *.example.com { tls foo@example.com { dns mock } @foo host foo.example.com handle @foo { respond "Foo!" } @bar host bar.example.com handle @bar { respond "Bar!" } # Fallback for otherwise unhandled domains handle { abort } } ---------- { "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "*.example.com" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "group": "group3", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Foo!", "handler": "static_response" } ] } ] } ], "match": [ { "host": [ "foo.example.com" ] } ] }, { "group": "group3", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "Bar!", "handler": "static_response" } ] } ] } ], "match": [ { "host": [ "bar.example.com" ] } ] }, { "group": "group3", "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "abort": true, "handler": "static_response" } ] } ] } ] } ] } ], "terminal": true } ] } } }, "tls": { "automation": { "policies": [ { "subjects": [ "*.example.com" ], "issuers": [ { "challenges": { "dns": { "provider": { "name": "mock" } } }, "email": "foo@example.com", "module": "acme" }, { "ca": "https://acme.zerossl.com/v2/DV90", "challenges": { "dns": { "provider": { "name": "mock" } } }, "email": "foo@example.com", "module": "acme" } ] } ] } } } } ================================================ FILE: caddytest/integration/caddyfile_adapt_test.go ================================================ package integration import ( jsonMod "encoding/json" "fmt" "os" "path/filepath" "regexp" "strings" "testing" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddytest" _ "github.com/caddyserver/caddy/v2/internal/testmocks" ) func TestCaddyfileAdaptToJSON(t *testing.T) { // load the list of test files from the dir files, err := os.ReadDir("./caddyfile_adapt") if err != nil { t.Errorf("failed to read caddyfile_adapt dir: %s", err) } // prep a regexp to fix strings on windows winNewlines := regexp.MustCompile(`\r?\n`) for _, f := range files { if f.IsDir() { continue } filename := f.Name() // run each file as a subtest, so that we can see which one fails more easily t.Run(filename, func(t *testing.T) { // read the test file data, err := os.ReadFile("./caddyfile_adapt/" + filename) if err != nil { t.Errorf("failed to read %s dir: %s", filename, err) } // split the Caddyfile (first) and JSON (second) parts // (append newline to Caddyfile to match formatter expectations) parts := strings.Split(string(data), "----------") caddyfile, expected := strings.TrimSpace(parts[0])+"\n", strings.TrimSpace(parts[1]) // replace windows newlines in the json with unix newlines expected = winNewlines.ReplaceAllString(expected, "\n") // replace os-specific default path for file_server's hide field replacePath, _ := jsonMod.Marshal(fmt.Sprint(".", string(filepath.Separator), "Caddyfile")) expected = strings.ReplaceAll(expected, `"./Caddyfile"`, string(replacePath)) // if the expected output is JSON, compare it if len(expected) > 0 && expected[0] == '{' { ok := caddytest.CompareAdapt(t, filename, caddyfile, "caddyfile", expected) if !ok { t.Errorf("failed to adapt %s", filename) } return } // otherwise, adapt the Caddyfile and check for errors cfgAdapter := caddyconfig.GetAdapter("caddyfile") _, _, err = cfgAdapter.Adapt([]byte(caddyfile), nil) if err == nil { t.Errorf("expected error for %s but got none", filename) } else { normalizedErr := winNewlines.ReplaceAllString(err.Error(), "\n") if !strings.Contains(normalizedErr, expected) { t.Errorf("expected error for %s to contain:\n%s\nbut got:\n%s", filename, expected, normalizedErr) } } }) } } ================================================ FILE: caddytest/integration/caddyfile_test.go ================================================ package integration import ( "net/http" "net/url" "testing" "github.com/caddyserver/caddy/v2/caddytest" ) func TestRespond(t *testing.T) { // arrange tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns } localhost:9080 { respond /version 200 { body "hello from localhost" } } `, "caddyfile") // act and assert tester.AssertGetResponse("http://localhost:9080/version", 200, "hello from localhost") } func TestRedirect(t *testing.T) { // arrange tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns } localhost:9080 { redir / http://localhost:9080/hello 301 respond /hello 200 { body "hello from localhost" } } `, "caddyfile") // act and assert tester.AssertRedirect("http://localhost:9080/", "http://localhost:9080/hello", 301) // follow redirect tester.AssertGetResponse("http://localhost:9080/", 200, "hello from localhost") } func TestDuplicateHosts(t *testing.T) { // act and assert caddytest.AssertLoadError(t, ` localhost:9080 { } localhost:9080 { } `, "caddyfile", "ambiguous site definition") } func TestReadCookie(t *testing.T) { localhost, _ := url.Parse("http://localhost") cookie := http.Cookie{ Name: "clientname", Value: "caddytest", } // arrange tester := caddytest.NewTester(t) tester.Client.Jar.SetCookies(localhost, []*http.Cookie{&cookie}) tester.InitServer(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns } localhost:9080 { templates { root testdata } file_server { root testdata } } `, "caddyfile") // act and assert tester.AssertGetResponse("http://localhost:9080/cookie.html", 200, "

Cookie.ClientName caddytest

") } func TestReplIndex(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns } localhost:9080 { templates { root testdata } file_server { root testdata index "index.{host}.html" } } `, "caddyfile") // act and assert tester.AssertGetResponse("http://localhost:9080/", 200, "") } func TestInvalidPrefix(t *testing.T) { type testCase struct { config, expectedError string } failureCases := []testCase{ { config: `wss://localhost`, expectedError: `the scheme wss:// is only supported in browsers; use https:// instead`, }, { config: `ws://localhost`, expectedError: `the scheme ws:// is only supported in browsers; use http:// instead`, }, { config: `someInvalidPrefix://localhost`, expectedError: "unsupported URL scheme someinvalidprefix://", }, { config: `h2c://localhost`, expectedError: `unsupported URL scheme h2c://`, }, { config: `localhost, wss://localhost`, expectedError: `the scheme wss:// is only supported in browsers; use https:// instead`, }, { config: `localhost { reverse_proxy ws://localhost" }`, expectedError: `the scheme ws:// is only supported in browsers; use http:// instead`, }, { config: `localhost { reverse_proxy someInvalidPrefix://localhost" }`, expectedError: `unsupported URL scheme someinvalidprefix://`, }, } for _, failureCase := range failureCases { caddytest.AssertLoadError(t, failureCase.config, "caddyfile", failureCase.expectedError) } } func TestValidPrefix(t *testing.T) { type testCase struct { rawConfig, expectedResponse string } successCases := []testCase{ { "localhost", `{ "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ] } } } } }`, }, { "https://localhost", `{ "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ] } } } } }`, }, { "http://localhost", `{ "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ] } } } } }`, }, { `localhost { reverse_proxy http://localhost:3000 }`, `{ "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "upstreams": [ { "dial": "localhost:3000" } ] } ] } ] } ], "terminal": true } ] } } } } }`, }, { `localhost { reverse_proxy https://localhost:3000 }`, `{ "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "transport": { "protocol": "http", "tls": {} }, "upstreams": [ { "dial": "localhost:3000" } ] } ] } ] } ], "terminal": true } ] } } } } }`, }, { `localhost { reverse_proxy h2c://localhost:3000 }`, `{ "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "transport": { "protocol": "http", "versions": [ "h2c", "2" ] }, "upstreams": [ { "dial": "localhost:3000" } ] } ] } ] } ], "terminal": true } ] } } } } }`, }, { `localhost { reverse_proxy localhost:3000 }`, `{ "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "reverse_proxy", "upstreams": [ { "dial": "localhost:3000" } ] } ] } ] } ], "terminal": true } ] } } } } }`, }, } for _, successCase := range successCases { caddytest.AssertAdapt(t, successCase.rawConfig, "caddyfile", successCase.expectedResponse) } } func TestUriReplace(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 } :9080 uri replace "\}" %7D uri replace "\{" %7B respond "{query}"`, "caddyfile") tester.AssertGetResponse("http://localhost:9080/endpoint?test={%20content%20}", 200, "test=%7B%20content%20%7D") } func TestUriOps(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 } :9080 uri query +foo bar uri query -baz uri query taz test uri query key=value example uri query changethis>changed respond "{query}"`, "caddyfile") tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar0&baz=buz&taz=nottest&changethis=val", 200, "changed=val&foo=bar0&foo=bar&key%3Dvalue=example&taz=test") } // Tests the `http.request.local.port` placeholder. // We don't test the very similar `http.request.local.host` placeholder, // because depending on the host the test is running on, localhost might // refer to 127.0.0.1 or ::1. // TODO: Test each http version separately (especially http/3) func TestHttpRequestLocalPortPlaceholder(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 } :9080 respond "{http.request.local.port}"`, "caddyfile") tester.AssertGetResponse("http://localhost:9080/", 200, "9080") } func TestSetThenAddQueryParams(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 } :9080 uri query foo bar uri query +foo baz respond "{query}"`, "caddyfile") tester.AssertGetResponse("http://localhost:9080/endpoint", 200, "foo=bar&foo=baz") } func TestSetThenDeleteParams(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 } :9080 uri query bar foo{query.foo} uri query -foo respond "{query}"`, "caddyfile") tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar", 200, "bar=foobar") } func TestRenameAndOtherOps(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 } :9080 uri query foo>bar uri query bar taz uri query +bar baz respond "{query}"`, "caddyfile") tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar", 200, "bar=taz&bar=baz") } func TestReplaceOps(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 } :9080 uri query foo bar baz respond "{query}"`, "caddyfile") tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar", 200, "foo=baz") } func TestReplaceWithReplacementPlaceholder(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 } :9080 uri query foo bar {query.placeholder} respond "{query}"`, "caddyfile") tester.AssertGetResponse("http://localhost:9080/endpoint?placeholder=baz&foo=bar", 200, "foo=baz&placeholder=baz") } func TestReplaceWithKeyPlaceholder(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 } :9080 uri query {query.placeholder} bar baz respond "{query}"`, "caddyfile") tester.AssertGetResponse("http://localhost:9080/endpoint?placeholder=foo&foo=bar", 200, "foo=baz&placeholder=foo") } func TestPartialReplacement(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 } :9080 uri query foo ar az respond "{query}"`, "caddyfile") tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar", 200, "foo=baz") } func TestNonExistingSearch(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 } :9080 uri query foo var baz respond "{query}"`, "caddyfile") tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar", 200, "foo=bar") } func TestReplaceAllOps(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 } :9080 uri query * bar baz respond "{query}"`, "caddyfile") tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar&baz=bar", 200, "baz=baz&foo=baz") } func TestUriOpsBlock(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { admin localhost:2999 http_port 9080 } :9080 uri query { +foo bar -baz taz test } respond "{query}"`, "caddyfile") tester.AssertGetResponse("http://localhost:9080/endpoint?foo=bar0&baz=buz&taz=nottest", 200, "foo=bar0&foo=bar&taz=test") } func TestHandleErrorSimpleCodes(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(`{ admin localhost:2999 http_port 9080 } localhost:9080 { root * /srv error /private* "Unauthorized" 410 error /hidden* "Not found" 404 handle_errors 404 410 { respond "404 or 410 error" } }`, "caddyfile") // act and assert tester.AssertGetResponse("http://localhost:9080/private", 410, "404 or 410 error") tester.AssertGetResponse("http://localhost:9080/hidden", 404, "404 or 410 error") } func TestHandleErrorRange(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(`{ admin localhost:2999 http_port 9080 } localhost:9080 { root * /srv error /private* "Unauthorized" 410 error /hidden* "Not found" 404 handle_errors 4xx { respond "Error in the [400 .. 499] range" } }`, "caddyfile") // act and assert tester.AssertGetResponse("http://localhost:9080/private", 410, "Error in the [400 .. 499] range") tester.AssertGetResponse("http://localhost:9080/hidden", 404, "Error in the [400 .. 499] range") } func TestHandleErrorSort(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(`{ admin localhost:2999 http_port 9080 } localhost:9080 { root * /srv error /private* "Unauthorized" 410 error /hidden* "Not found" 404 error /internalerr* "Internal Server Error" 500 handle_errors { respond "Fallback route: code outside the [400..499] range" } handle_errors 4xx { respond "Error in the [400 .. 499] range" } }`, "caddyfile") // act and assert tester.AssertGetResponse("http://localhost:9080/internalerr", 500, "Fallback route: code outside the [400..499] range") tester.AssertGetResponse("http://localhost:9080/hidden", 404, "Error in the [400 .. 499] range") } func TestHandleErrorRangeAndCodes(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(`{ admin localhost:2999 http_port 9080 } localhost:9080 { root * /srv error /private* "Unauthorized" 410 error /threehundred* "Moved Permanently" 301 error /internalerr* "Internal Server Error" 500 handle_errors 500 3xx { respond "Error code is equal to 500 or in the [300..399] range" } handle_errors 4xx { respond "Error in the [400 .. 499] range" } }`, "caddyfile") // act and assert tester.AssertGetResponse("http://localhost:9080/internalerr", 500, "Error code is equal to 500 or in the [300..399] range") tester.AssertGetResponse("http://localhost:9080/threehundred", 301, "Error code is equal to 500 or in the [300..399] range") tester.AssertGetResponse("http://localhost:9080/private", 410, "Error in the [400 .. 499] range") } func TestHandleErrorSubHandlers(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(`{ admin localhost:2999 http_port 9080 } localhost:9080 { root * /srv file_server error /*/internalerr* "Internal Server Error" 500 handle_errors 404 { handle /en/* { respond "not found" 404 } handle /es/* { respond "no encontrado" 404 } handle { respond "default not found" } } handle_errors { handle { respond "Default error" } handle /en/* { respond "English error" } } } `, "caddyfile") // act and assert tester.AssertGetResponse("http://localhost:9080/en/notfound", 404, "not found") tester.AssertGetResponse("http://localhost:9080/es/notfound", 404, "no encontrado") tester.AssertGetResponse("http://localhost:9080/notfound", 404, "default not found") tester.AssertGetResponse("http://localhost:9080/es/internalerr", 500, "Default error") tester.AssertGetResponse("http://localhost:9080/en/internalerr", 500, "English error") } func TestInvalidSiteAddressesAsDirectives(t *testing.T) { type testCase struct { config, expectedError string } failureCases := []testCase{ { config: ` handle { file_server }`, expectedError: `Caddyfile:2: parsed 'handle' as a site address, but it is a known directive; directives must appear in a site block`, }, { config: ` reverse_proxy localhost:9000 localhost:9001 { file_server }`, expectedError: `Caddyfile:2: parsed 'reverse_proxy' as a site address, but it is a known directive; directives must appear in a site block`, }, } for _, failureCase := range failureCases { caddytest.AssertLoadError(t, failureCase.config, "caddyfile", failureCase.expectedError) } } ================================================ FILE: caddytest/integration/forwardauth_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package integration import ( "fmt" "net/http" "net/http/httptest" "strings" "sync" "testing" "github.com/caddyserver/caddy/v2/caddytest" ) // TestForwardAuthCopyHeadersStripsClientHeaders is a regression test for the // header injection vulnerability in forward_auth copy_headers. // // When the auth service returns 200 OK without one of the copy_headers headers, // the MatchNot guard skips the Set operation. Before this fix, the original // client-supplied header survived unchanged into the backend request, allowing // privilege escalation with only a valid (non-privileged) bearer token. After // the fix, an unconditional delete route runs first, so the backend always // sees an absent header rather than the attacker-supplied value. func TestForwardAuthCopyHeadersStripsClientHeaders(t *testing.T) { // Mock auth service: accepts any Bearer token, returns 200 OK with NO // identity headers. This is the stateless JWT validator pattern that // triggers the vulnerability. authSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.HasPrefix(r.Header.Get("Authorization"), "Bearer ") { w.WriteHeader(http.StatusOK) return } w.WriteHeader(http.StatusUnauthorized) })) defer authSrv.Close() // Mock backend: records the identity headers it receives. A real application // would use X-User-Id / X-User-Role to make authorization decisions. type received struct{ userID, userRole string } var ( mu sync.Mutex last received ) backendSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { mu.Lock() last = received{ userID: r.Header.Get("X-User-Id"), userRole: r.Header.Get("X-User-Role"), } mu.Unlock() w.WriteHeader(http.StatusOK) fmt.Fprint(w, "ok") })) defer backendSrv.Close() authAddr := strings.TrimPrefix(authSrv.URL, "http://") backendAddr := strings.TrimPrefix(backendSrv.URL, "http://") tester := caddytest.NewTester(t) tester.InitServer(fmt.Sprintf(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns } http://localhost:9080 { forward_auth %s { uri / copy_headers X-User-Id X-User-Role } reverse_proxy %s } `, authAddr, backendAddr), "caddyfile") // Case 1: no token. Auth must still reject the request even when the client // includes identity headers. This confirms the auth check is not bypassed. req, _ := http.NewRequest(http.MethodGet, "http://localhost:9080/", nil) req.Header.Set("X-User-Id", "injected") req.Header.Set("X-User-Role", "injected") resp := tester.AssertResponseCode(req, http.StatusUnauthorized) resp.Body.Close() // Case 2: valid token, no injected headers. The backend should see absent // identity headers (the auth service never returns them). req, _ = http.NewRequest(http.MethodGet, "http://localhost:9080/", nil) req.Header.Set("Authorization", "Bearer token123") tester.AssertResponse(req, http.StatusOK, "ok") mu.Lock() gotID, gotRole := last.userID, last.userRole mu.Unlock() if gotID != "" { t.Errorf("baseline: X-User-Id should be absent, got %q", gotID) } if gotRole != "" { t.Errorf("baseline: X-User-Role should be absent, got %q", gotRole) } // Case 3 (the security regression): valid token plus forged identity headers. // The fix must strip those values so the backend never sees them. req, _ = http.NewRequest(http.MethodGet, "http://localhost:9080/", nil) req.Header.Set("Authorization", "Bearer token123") req.Header.Set("X-User-Id", "admin") // forged req.Header.Set("X-User-Role", "superadmin") // forged tester.AssertResponse(req, http.StatusOK, "ok") mu.Lock() gotID, gotRole = last.userID, last.userRole mu.Unlock() if gotID != "" { t.Errorf("injection: X-User-Id must be stripped, got %q", gotID) } if gotRole != "" { t.Errorf("injection: X-User-Role must be stripped, got %q", gotRole) } } // TestForwardAuthCopyHeadersAuthResponseWins verifies that when the auth // service does include a copy_headers header in its response, that value // is forwarded to the backend and takes precedence over any client-supplied // value for the same header. func TestForwardAuthCopyHeadersAuthResponseWins(t *testing.T) { const wantUserID = "service-user-42" const wantUserRole = "editor" // Auth service: accepts bearer token and sets identity headers. authSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.HasPrefix(r.Header.Get("Authorization"), "Bearer ") { w.Header().Set("X-User-Id", wantUserID) w.Header().Set("X-User-Role", wantUserRole) w.WriteHeader(http.StatusOK) return } w.WriteHeader(http.StatusUnauthorized) })) defer authSrv.Close() type received struct{ userID, userRole string } var ( mu sync.Mutex last received ) backendSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { mu.Lock() last = received{ userID: r.Header.Get("X-User-Id"), userRole: r.Header.Get("X-User-Role"), } mu.Unlock() w.WriteHeader(http.StatusOK) fmt.Fprint(w, "ok") })) defer backendSrv.Close() authAddr := strings.TrimPrefix(authSrv.URL, "http://") backendAddr := strings.TrimPrefix(backendSrv.URL, "http://") tester := caddytest.NewTester(t) tester.InitServer(fmt.Sprintf(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns } http://localhost:9080 { forward_auth %s { uri / copy_headers X-User-Id X-User-Role } reverse_proxy %s } `, authAddr, backendAddr), "caddyfile") // The client sends forged headers; the auth service overrides them with // its own values. The backend must receive the auth service values. req, _ := http.NewRequest(http.MethodGet, "http://localhost:9080/", nil) req.Header.Set("Authorization", "Bearer token123") req.Header.Set("X-User-Id", "forged-id") // must be overwritten req.Header.Set("X-User-Role", "forged-role") // must be overwritten tester.AssertResponse(req, http.StatusOK, "ok") mu.Lock() gotID, gotRole := last.userID, last.userRole mu.Unlock() if gotID != wantUserID { t.Errorf("X-User-Id: want %q, got %q", wantUserID, gotID) } if gotRole != wantUserRole { t.Errorf("X-User-Role: want %q, got %q", wantUserRole, gotRole) } } ================================================ FILE: caddytest/integration/h2listener_test.go ================================================ package integration import ( "fmt" "net/http" "slices" "strings" "testing" "github.com/caddyserver/caddy/v2/caddytest" ) func newH2ListenerWithVersionsWithTLSTester(t *testing.T, serverVersions []string, clientVersions []string) *caddytest.Tester { const baseConfig = ` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 servers :9443 { protocols %s } } localhost { respond "{http.request.tls.proto} {http.request.proto}" } ` tester := caddytest.NewTester(t) tester.InitServer(fmt.Sprintf(baseConfig, strings.Join(serverVersions, " ")), "caddyfile") tr := tester.Client.Transport.(*http.Transport) tr.TLSClientConfig.NextProtos = clientVersions tr.Protocols = new(http.Protocols) if slices.Contains(clientVersions, "h2") { tr.ForceAttemptHTTP2 = true tr.Protocols.SetHTTP2(true) } if !slices.Contains(clientVersions, "http/1.1") { tr.Protocols.SetHTTP1(false) } return tester } func TestH2ListenerWithTLS(t *testing.T) { tests := []struct { serverVersions []string clientVersions []string expectedBody string failed bool }{ {[]string{"h2"}, []string{"h2"}, "h2 HTTP/2.0", false}, {[]string{"h2"}, []string{"http/1.1"}, "", true}, {[]string{"h1"}, []string{"http/1.1"}, "http/1.1 HTTP/1.1", false}, {[]string{"h1"}, []string{"h2"}, "", true}, {[]string{"h2", "h1"}, []string{"h2"}, "h2 HTTP/2.0", false}, {[]string{"h2", "h1"}, []string{"http/1.1"}, "http/1.1 HTTP/1.1", false}, } for _, tc := range tests { tester := newH2ListenerWithVersionsWithTLSTester(t, tc.serverVersions, tc.clientVersions) t.Logf("running with server versions %v and client versions %v:", tc.serverVersions, tc.clientVersions) if tc.failed { resp, err := tester.Client.Get("https://localhost:9443") if err == nil { t.Errorf("unexpected response: %d", resp.StatusCode) } } else { tester.AssertGetResponse("https://localhost:9443", 200, tc.expectedBody) } } } func newH2ListenerWithVersionsWithoutTLSTester(t *testing.T, serverVersions []string, clientVersions []string) *caddytest.Tester { const baseConfig = ` { skip_install_trust admin localhost:2999 http_port 9080 servers :9080 { protocols %s } } http://localhost { respond "{http.request.proto}" } ` tester := caddytest.NewTester(t) tester.InitServer(fmt.Sprintf(baseConfig, strings.Join(serverVersions, " ")), "caddyfile") tr := tester.Client.Transport.(*http.Transport) tr.Protocols = new(http.Protocols) if slices.Contains(clientVersions, "h2c") { tr.Protocols.SetHTTP1(false) tr.Protocols.SetUnencryptedHTTP2(true) } else if slices.Contains(clientVersions, "http/1.1") { tr.Protocols.SetHTTP1(true) tr.Protocols.SetUnencryptedHTTP2(false) } return tester } func TestH2ListenerWithoutTLS(t *testing.T) { tests := []struct { serverVersions []string clientVersions []string expectedBody string failed bool }{ {[]string{"h2c"}, []string{"h2c"}, "HTTP/2.0", false}, {[]string{"h2c"}, []string{"http/1.1"}, "", true}, {[]string{"h1"}, []string{"http/1.1"}, "HTTP/1.1", false}, {[]string{"h1"}, []string{"h2c"}, "", true}, {[]string{"h2c", "h1"}, []string{"h2c"}, "HTTP/2.0", false}, {[]string{"h2c", "h1"}, []string{"http/1.1"}, "HTTP/1.1", false}, } for _, tc := range tests { tester := newH2ListenerWithVersionsWithoutTLSTester(t, tc.serverVersions, tc.clientVersions) t.Logf("running with server versions %v and client versions %v:", tc.serverVersions, tc.clientVersions) if tc.failed { resp, err := tester.Client.Get("http://localhost:9080") if err == nil { t.Errorf("unexpected response: %d", resp.StatusCode) } } else { tester.AssertGetResponse("http://localhost:9080", 200, tc.expectedBody) } } } ================================================ FILE: caddytest/integration/handler_test.go ================================================ package integration import ( "bytes" "net/http" "testing" "github.com/caddyserver/caddy/v2/caddytest" ) func TestBrowse(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns } http://localhost:9080 { file_server browse } `, "caddyfile") req, err := http.NewRequest(http.MethodGet, "http://localhost:9080/", nil) if err != nil { t.Fail() return } tester.AssertResponseCode(req, 200) } func TestRespondWithJSON(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns } localhost { respond {http.request.body} } `, "caddyfile") res, _ := tester.AssertPostResponseBody("https://localhost:9443/", nil, bytes.NewBufferString(`{ "greeting": "Hello, world!" }`), 200, `{ "greeting": "Hello, world!" }`) if res.Header.Get("Content-Type") != "application/json" { t.Errorf("expected Content-Type to be application/json, but was %s", res.Header.Get("Content-Type")) } } ================================================ FILE: caddytest/integration/intercept_test.go ================================================ package integration import ( "testing" "github.com/caddyserver/caddy/v2/caddytest" ) func TestIntercept(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(`{ skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns } localhost:9080 { respond /intercept "I'm a teapot" 408 header /intercept To-Intercept ok respond /no-intercept "I'm not a teapot" intercept { @teapot status 408 handle_response @teapot { header /intercept intercepted {resp.header.To-Intercept} respond /intercept "I'm a combined coffee/tea pot that is temporarily out of coffee" 503 } } } `, "caddyfile") r, _ := tester.AssertGetResponse("http://localhost:9080/intercept", 503, "I'm a combined coffee/tea pot that is temporarily out of coffee") if r.Header.Get("intercepted") != "ok" { t.Fatalf(`header "intercepted" value is not "ok": %s`, r.Header.Get("intercepted")) } tester.AssertGetResponse("http://localhost:9080/no-intercept", 200, "I'm not a teapot") } ================================================ FILE: caddytest/integration/leafcertloaders_test.go ================================================ package integration import ( "testing" "github.com/caddyserver/caddy/v2/caddytest" ) func TestLeafCertLoaders(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { "admin": { "listen": "localhost:2999" }, "apps": { "http": { "http_port": 9080, "https_port": 9443, "grace_period": 1, "servers": { "srv0": { "listen": [ ":9443" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "terminal": true } ], "tls_connection_policies": [ { "client_authentication": { "verifiers": [ { "verifier": "leaf", "leaf_certs_loaders": [ { "loader": "file", "files": ["../leafcert.pem"] }, { "loader": "folder", "folders": ["../"] }, { "loader": "storage" }, { "loader": "pem" } ] } ] } } ] } } } } }`, "json") } ================================================ FILE: caddytest/integration/listener_test.go ================================================ package integration import ( "bytes" "fmt" "math/rand/v2" "net" "net/http" "strings" "testing" "github.com/caddyserver/caddy/v2/caddytest" ) func setupListenerWrapperTest(t *testing.T, handlerFunc http.HandlerFunc) *caddytest.Tester { l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("failed to listen: %s", err) } mux := http.NewServeMux() mux.Handle("/", handlerFunc) srv := &http.Server{ Handler: mux, } go srv.Serve(l) t.Cleanup(func() { _ = srv.Close() _ = l.Close() }) tester := caddytest.NewTester(t) tester.InitServer(fmt.Sprintf(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 local_certs servers :9443 { listener_wrappers { http_redirect tls } } } localhost { reverse_proxy %s } `, l.Addr().String()), "caddyfile") return tester } func TestHTTPRedirectWrapperWithLargeUpload(t *testing.T) { const uploadSize = (1024 * 1024) + 1 // 1 MB + 1 byte // 1 more than an MB body := make([]byte, uploadSize) rand.NewChaCha8([32]byte{}).Read(body) tester := setupListenerWrapperTest(t, func(writer http.ResponseWriter, request *http.Request) { buf := new(bytes.Buffer) _, err := buf.ReadFrom(request.Body) if err != nil { t.Fatalf("failed to read body: %s", err) } if !bytes.Equal(buf.Bytes(), body) { t.Fatalf("body not the same") } writer.WriteHeader(http.StatusNoContent) }) resp, err := tester.Client.Post("https://localhost:9443", "application/octet-stream", bytes.NewReader(body)) if err != nil { t.Fatalf("failed to post: %s", err) } if resp.StatusCode != http.StatusNoContent { t.Fatalf("unexpected status: %d != %d", resp.StatusCode, http.StatusNoContent) } } func TestLargeHttpRequest(t *testing.T) { tester := setupListenerWrapperTest(t, func(writer http.ResponseWriter, request *http.Request) { t.Fatal("not supposed to handle a request") }) // We never read the body in any way, set an extra long header instead. req, _ := http.NewRequest("POST", "http://localhost:9443", nil) req.Header.Set("Long-Header", strings.Repeat("X", 1024*1024)) _, err := tester.Client.Do(req) if err == nil { t.Fatal("not supposed to succeed") } } ================================================ FILE: caddytest/integration/map_test.go ================================================ package integration import ( "bytes" "testing" "github.com/caddyserver/caddy/v2/caddytest" ) func TestMap(t *testing.T) { // arrange tester := caddytest.NewTester(t) tester.InitServer(`{ skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns } localhost:9080 { map {http.request.method} {dest-1} {dest-2} { default unknown1 unknown2 ~G(.)(.) G${1}${2}-called POST post-called foobar } respond /version 200 { body "hello from localhost {dest-1} {dest-2}" } } `, "caddyfile") // act and assert tester.AssertGetResponse("http://localhost:9080/version", 200, "hello from localhost GET-called unknown2") tester.AssertPostResponseBody("http://localhost:9080/version", []string{}, bytes.NewBuffer([]byte{}), 200, "hello from localhost post-called foobar") } func TestMapRespondWithDefault(t *testing.T) { // arrange tester := caddytest.NewTester(t) tester.InitServer(`{ skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 } localhost:9080 { map {http.request.method} {dest-name} { default unknown GET get-called } respond /version 200 { body "hello from localhost {dest-name}" } } `, "caddyfile") // act and assert tester.AssertGetResponse("http://localhost:9080/version", 200, "hello from localhost get-called") tester.AssertPostResponseBody("http://localhost:9080/version", []string{}, bytes.NewBuffer([]byte{}), 200, "hello from localhost unknown") } func TestMapAsJSON(t *testing.T) { // arrange tester := caddytest.NewTester(t) tester.InitServer(` { "admin": { "listen": "localhost:2999" }, "apps": { "pki": { "certificate_authorities" : { "local" : { "install_trust": false } } }, "http": { "http_port": 9080, "https_port": 9443, "servers": { "srv0": { "listen": [ ":9080" ], "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "handler": "map", "source": "{http.request.method}", "destinations": ["{dest-name}"], "defaults": ["unknown"], "mappings": [ { "input": "GET", "outputs": ["get-called"] }, { "input": "POST", "outputs": ["post-called"] } ] } ] }, { "handle": [ { "body": "hello from localhost {dest-name}", "handler": "static_response", "status_code": 200 } ], "match": [ { "path": ["/version"] } ] } ] } ], "match": [ { "host": ["localhost"] } ], "terminal": true } ] } } } } }`, "json") tester.AssertGetResponse("http://localhost:9080/version", 200, "hello from localhost get-called") tester.AssertPostResponseBody("http://localhost:9080/version", []string{}, bytes.NewBuffer([]byte{}), 200, "hello from localhost post-called") } ================================================ FILE: caddytest/integration/mockdns_test.go ================================================ package integration import ( "context" "github.com/caddyserver/certmagic" "github.com/libdns/libdns" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(MockDNSProvider{}) } // MockDNSProvider is a mock DNS provider, for testing config with DNS modules. type MockDNSProvider struct { Argument string `json:"argument,omitempty"` // optional argument useful for testing } // CaddyModule returns the Caddy module information. func (MockDNSProvider) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "dns.providers.mock", New: func() caddy.Module { return new(MockDNSProvider) }, } } // Provision sets up the module. func (MockDNSProvider) Provision(ctx caddy.Context) error { return nil } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (p *MockDNSProvider) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume directive name if d.NextArg() { p.Argument = d.Val() } if d.NextArg() { return d.Errf("unexpected argument '%s'", d.Val()) } return nil } // AppendRecords appends DNS records to the zone. func (MockDNSProvider) AppendRecords(ctx context.Context, zone string, recs []libdns.Record) ([]libdns.Record, error) { return nil, nil } // DeleteRecords deletes DNS records from the zone. func (MockDNSProvider) DeleteRecords(ctx context.Context, zone string, recs []libdns.Record) ([]libdns.Record, error) { return nil, nil } // GetRecords gets DNS records from the zone. func (MockDNSProvider) GetRecords(ctx context.Context, zone string) ([]libdns.Record, error) { return nil, nil } // SetRecords sets DNS records in the zone. func (MockDNSProvider) SetRecords(ctx context.Context, zone string, recs []libdns.Record) ([]libdns.Record, error) { return nil, nil } // Interface guard var ( _ caddyfile.Unmarshaler = (*MockDNSProvider)(nil) _ certmagic.DNSProvider = (*MockDNSProvider)(nil) _ caddy.Provisioner = (*MockDNSProvider)(nil) _ caddy.Module = (*MockDNSProvider)(nil) ) ================================================ FILE: caddytest/integration/pki_test.go ================================================ package integration import ( "testing" "github.com/caddyserver/caddy/v2/caddytest" ) func TestLeafCertLifetimeLessThanIntermediate(t *testing.T) { caddytest.AssertLoadError(t, ` { "admin": { "disabled": true }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "ca": "internal", "handler": "acme_server", "lifetime": 604800000000000 } ] } ] } ] } ] } } }, "pki": { "certificate_authorities": { "internal": { "install_trust": false, "intermediate_lifetime": 604800000000000, "name": "Internal CA" } } } } } `, "json", "should be less than intermediate certificate lifetime") } func TestIntermediateLifetimeLessThanRoot(t *testing.T) { caddytest.AssertLoadError(t, ` { "admin": { "disabled": true }, "apps": { "http": { "servers": { "srv0": { "listen": [ ":443" ], "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "ca": "internal", "handler": "acme_server", "lifetime": 2592000000000000 } ] } ] } ] } ] } } }, "pki": { "certificate_authorities": { "internal": { "install_trust": false, "intermediate_lifetime": 311040000000000000, "name": "Internal CA" } } } } } `, "json", "intermediate certificate lifetime must be less than actual root certificate lifetime") } ================================================ FILE: caddytest/integration/proxyprotocol_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Integration tests for Caddy's PROXY protocol support, covering two distinct // roles that Caddy can play: // // 1. As a PROXY protocol *sender* (reverse proxy outbound transport): // Caddy receives an inbound request from a test client and the // reverse_proxy handler forwards it to an upstream with a PROXY protocol // header (v1 or v2) prepended to the connection. A lightweight backend // built with go-proxyproto validates that the header was received and // carries the correct client address. // // Transport versions tested: // - "1.1" -> plain HTTP/1.1 to the upstream // - "h2c" -> HTTP/2 cleartext (h2c) to the upstream (regression for #7529) // - "2" -> HTTP/2 over TLS (h2) to the upstream // // For each transport version both PROXY protocol v1 and v2 are exercised. // // HTTP/3 (h3) is not included because it uses QUIC/UDP and therefore // bypasses the TCP-level dialContext that injects PROXY protocol headers; // there is no meaningful h3 + proxy protocol sender combination to test. // // 2. As a PROXY protocol *receiver* (server-side listener wrapper): // A raw TCP client dials Caddy directly, injects a PROXY v2 header // spoofing a source address, and sends a normal HTTP/1.1 request. The // Caddy server is configured with the proxy_protocol listener wrapper and // is expected to surface the spoofed address via the // {http.request.remote.host} placeholder. package integration import ( "crypto/tls" "encoding/json" "fmt" "net" "net/http" "net/http/httptest" "slices" "strings" "sync" "testing" goproxy "github.com/pires/go-proxyproto" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" "github.com/caddyserver/caddy/v2/caddytest" ) // proxyProtoBackend is a minimal HTTP server that sits behind a // go-proxyproto listener and records the source address that was // delivered in the PROXY header for each request. type proxyProtoBackend struct { mu sync.Mutex headerAddrs []string // host:port strings extracted from each PROXY header ln net.Listener srv *http.Server } // newProxyProtoBackend starts a TCP listener wrapped with go-proxyproto on a // random local port and serves requests with a simple "OK" body. The PROXY // header source addresses are accumulated in headerAddrs so tests can // inspect them. func newProxyProtoBackend(t *testing.T) *proxyProtoBackend { t.Helper() b := &proxyProtoBackend{} rawLn, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("backend: listen: %v", err) } // Wrap with go-proxyproto so the PROXY header is stripped and parsed // before the HTTP server sees the connection. We use REQUIRE so that a // missing header returns an error instead of silently passing through. pLn := &goproxy.Listener{ Listener: rawLn, Policy: func(_ net.Addr) (goproxy.Policy, error) { return goproxy.REQUIRE, nil }, } b.ln = pLn // Wrap the handler with h2c support so the backend can speak HTTP/2 // cleartext (h2c) as well as plain HTTP/1.1. Without this, Caddy's // reverse proxy would receive a 'frame too large' error when the // upstream transport is configured to use h2c. h2Server := &http2.Server{} handlerFn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // go-proxyproto has already updated the net.Conn's remote // address to the value from the PROXY header; the HTTP server // surfaces it in r.RemoteAddr. b.mu.Lock() b.headerAddrs = append(b.headerAddrs, r.RemoteAddr) b.mu.Unlock() w.WriteHeader(http.StatusOK) _, _ = fmt.Fprint(w, "OK") }) b.srv = &http.Server{ Handler: h2c.NewHandler(handlerFn, h2Server), } go b.srv.Serve(pLn) //nolint:errcheck t.Cleanup(func() { _ = b.srv.Close() _ = rawLn.Close() }) return b } // addr returns the listening address (host:port) of the backend. func (b *proxyProtoBackend) addr() string { return b.ln.Addr().String() } // recordedAddrs returns a snapshot of all PROXY-header source addresses seen // so far. func (b *proxyProtoBackend) recordedAddrs() []string { b.mu.Lock() defer b.mu.Unlock() cp := make([]string, len(b.headerAddrs)) copy(cp, b.headerAddrs) return cp } // tlsProxyProtoBackend is a TLS-enabled backend that sits behind a // go-proxyproto listener. The PROXY header is stripped before the TLS // handshake so the layer order on a connection is: // // raw TCP → go-proxyproto (strips PROXY header) → TLS handshake → HTTP/2 type tlsProxyProtoBackend struct { mu sync.Mutex headerAddrs []string srv *httptest.Server } // newTLSProxyProtoBackend starts a TLS listener that first reads and strips // PROXY protocol headers (go-proxyproto, REQUIRE policy) and then performs a // TLS handshake. The backend speaks HTTP/2 over TLS (h2). // // The certificate is the standard self-signed certificate generated by // httptest.Server; the Caddy transport must be configured with // insecure_skip_verify: true to trust it. func newTLSProxyProtoBackend(t *testing.T) *tlsProxyProtoBackend { t.Helper() b := &tlsProxyProtoBackend{} handlerFn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { b.mu.Lock() b.headerAddrs = append(b.headerAddrs, r.RemoteAddr) b.mu.Unlock() w.WriteHeader(http.StatusOK) _, _ = fmt.Fprint(w, "OK") }) rawLn, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("tlsBackend: listen: %v", err) } // Wrap with go-proxyproto so the PROXY header is consumed before TLS. pLn := &goproxy.Listener{ Listener: rawLn, Policy: func(_ net.Addr) (goproxy.Policy, error) { return goproxy.REQUIRE, nil }, } // httptest.NewUnstartedServer lets us replace the listener before // calling StartTLS(), which wraps our proxyproto listener with // tls.NewListener. This gives us the right layer order. b.srv = httptest.NewUnstartedServer(handlerFn) b.srv.Listener = pLn // StartTLS enables HTTP/2 on the server automatically. b.srv.StartTLS() t.Cleanup(func() { b.srv.Close() }) return b } // addr returns the listening address (host:port) of the TLS backend. func (b *tlsProxyProtoBackend) addr() string { return b.srv.Listener.Addr().String() } // tlsConfig returns the *tls.Config used by the backend server. // Tests can use it to verify cert details if needed. func (b *tlsProxyProtoBackend) tlsConfig() *tls.Config { return b.srv.TLS } // recordedAddrs returns a snapshot of all PROXY-header source addresses. func (b *tlsProxyProtoBackend) recordedAddrs() []string { b.mu.Lock() defer b.mu.Unlock() cp := make([]string, len(b.headerAddrs)) copy(cp, b.headerAddrs) return cp } // proxyProtoTLSConfig builds a Caddy JSON configuration that proxies to a TLS // upstream with PROXY protocol. The transport uses insecure_skip_verify so // the self-signed certificate generated by httptest.Server is accepted. func proxyProtoTLSConfig(listenPort int, backendAddr, ppVersion string, transportVersions []string) string { versionsJSON, _ := json.Marshal(transportVersions) return fmt.Sprintf(`{ "admin": { "listen": "localhost:2999" }, "apps": { "pki": { "certificate_authorities": { "local": { "install_trust": false } } }, "http": { "grace_period": 1, "servers": { "proxy": { "listen": [":%d"], "automatic_https": { "disable": true }, "routes": [ { "handle": [ { "handler": "reverse_proxy", "upstreams": [{"dial": "%s"}], "transport": { "protocol": "http", "proxy_protocol": "%s", "versions": %s, "tls": { "insecure_skip_verify": true } } } ] } ] } } } } }`, listenPort, backendAddr, ppVersion, string(versionsJSON)) } // testTLSProxyProtocolMatrix is the shared implementation for TLS-based proxy // protocol tests. It mirrors testProxyProtocolMatrix but uses a TLS backend. func testTLSProxyProtocolMatrix(t *testing.T, ppVersion string, transportVersions []string, numRequests int) { t.Helper() backend := newTLSProxyProtoBackend(t) listenPort := freePort(t) tester := caddytest.NewTester(t) tester.WithDefaultOverrides(caddytest.Config{ AdminPort: 2999, }) cfg := proxyProtoTLSConfig(listenPort, backend.addr(), ppVersion, transportVersions) tester.InitServer(cfg, "json") proxyURL := fmt.Sprintf("http://127.0.0.1:%d/", listenPort) for i := 0; i < numRequests; i++ { resp, err := tester.Client.Get(proxyURL) if err != nil { t.Fatalf("request %d/%d: GET %s: %v", i+1, numRequests, proxyURL, err) } resp.Body.Close() if resp.StatusCode != http.StatusOK { t.Errorf("request %d/%d: expected status 200, got %d", i+1, numRequests, resp.StatusCode) } } addrs := backend.recordedAddrs() if len(addrs) == 0 { t.Fatalf("backend recorded no PROXY protocol addresses (expected at least 1)") } for i, addr := range addrs { host, _, err := net.SplitHostPort(addr) if err != nil { t.Errorf("addr[%d] %q: SplitHostPort: %v", i, addr, err) continue } if host != "127.0.0.1" { t.Errorf("addr[%d]: expected source 127.0.0.1, got %q", i, host) } } } // proxyProtoConfig builds a Caddy JSON configuration that: // - listens on listenPort for inbound HTTP requests // - proxies them to backendAddr with PROXY protocol ppVersion ("v1"/"v2") // - uses the given transport versions (e.g. ["1.1"] or ["h2c"]) func proxyProtoConfig(listenPort int, backendAddr, ppVersion string, transportVersions []string) string { versionsJSON, _ := json.Marshal(transportVersions) return fmt.Sprintf(`{ "admin": { "listen": "localhost:2999" }, "apps": { "pki": { "certificate_authorities": { "local": { "install_trust": false } } }, "http": { "grace_period": 1, "servers": { "proxy": { "listen": [":%d"], "automatic_https": { "disable": true }, "routes": [ { "handle": [ { "handler": "reverse_proxy", "upstreams": [{"dial": "%s"}], "transport": { "protocol": "http", "proxy_protocol": "%s", "versions": %s } } ] } ] } } } } }`, listenPort, backendAddr, ppVersion, string(versionsJSON)) } // freePort returns a free local TCP port by binding briefly and releasing it. func freePort(t *testing.T) int { t.Helper() ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("freePort: %v", err) } port := ln.Addr().(*net.TCPAddr).Port _ = ln.Close() return port } // TestProxyProtocolV1WithH1 verifies that PROXY protocol v1 headers are sent // correctly when the transport uses HTTP/1.1 to the upstream. func TestProxyProtocolV1WithH1(t *testing.T) { testProxyProtocolMatrix(t, "v1", []string{"1.1"}, 1) } // TestProxyProtocolV2WithH1 verifies that PROXY protocol v2 headers are sent // correctly when the transport uses HTTP/1.1 to the upstream. func TestProxyProtocolV2WithH1(t *testing.T) { testProxyProtocolMatrix(t, "v2", []string{"1.1"}, 1) } // TestProxyProtocolV1WithH2C verifies that PROXY protocol v1 headers are sent // correctly when the transport uses h2c (HTTP/2 cleartext) to the upstream. func TestProxyProtocolV1WithH2C(t *testing.T) { testProxyProtocolMatrix(t, "v1", []string{"h2c"}, 1) } // TestProxyProtocolV2WithH2C verifies that PROXY protocol v2 headers are sent // correctly when the transport uses h2c (HTTP/2 cleartext) to the upstream. // This is the primary regression test for github.com/caddyserver/caddy/issues/7529: // before the fix, the h2 transport opened a new TCP connection per request // (because req.URL.Host was mangled differently for each request due to the // varying client port), which caused file-descriptor exhaustion under load. func TestProxyProtocolV2WithH2C(t *testing.T) { testProxyProtocolMatrix(t, "v2", []string{"h2c"}, 1) } // TestProxyProtocolV2WithH2CMultipleRequests sends several sequential requests // through the h2c + PROXY-protocol path and confirms that: // 1. Every request receives a 200 response (no connection exhaustion). // 2. The backend received at least one PROXY header (connection was reused). // // This is the core regression guard for issue #7529: without the fix, a new // TCP connection was opened per request, quickly exhausting file descriptors. func TestProxyProtocolV2WithH2CMultipleRequests(t *testing.T) { testProxyProtocolMatrix(t, "v2", []string{"h2c"}, 5) } // TestProxyProtocolV1WithH2 verifies that PROXY protocol v1 headers are sent // correctly when the transport uses HTTP/2 over TLS (h2) to the upstream. func TestProxyProtocolV1WithH2(t *testing.T) { testTLSProxyProtocolMatrix(t, "v1", []string{"2"}, 1) } // TestProxyProtocolV2WithH2 verifies that PROXY protocol v2 headers are sent // correctly when the transport uses HTTP/2 over TLS (h2) to the upstream. func TestProxyProtocolV2WithH2(t *testing.T) { testTLSProxyProtocolMatrix(t, "v2", []string{"2"}, 1) } // TestProxyProtocolServerAndProxy is an end-to-end matrix test that exercises // all combinations of PROXY protocol version x transport version. func TestProxyProtocolServerAndProxy(t *testing.T) { plainTests := []struct { name string ppVersion string transportVersions []string numRequests int }{ {"h1-v1", "v1", []string{"1.1"}, 3}, {"h1-v2", "v2", []string{"1.1"}, 3}, {"h2c-v1", "v1", []string{"h2c"}, 3}, {"h2c-v2", "v2", []string{"h2c"}, 3}, } for _, tc := range plainTests { t.Run(tc.name, func(t *testing.T) { testProxyProtocolMatrix(t, tc.ppVersion, tc.transportVersions, tc.numRequests) }) } tlsTests := []struct { name string ppVersion string transportVersions []string numRequests int }{ {"h2-v1", "v1", []string{"2"}, 3}, {"h2-v2", "v2", []string{"2"}, 3}, } for _, tc := range tlsTests { t.Run(tc.name, func(t *testing.T) { testTLSProxyProtocolMatrix(t, tc.ppVersion, tc.transportVersions, tc.numRequests) }) } } // testProxyProtocolMatrix is the shared implementation for the proxy protocol // tests. It: // 1. Starts a go-proxyproto-wrapped backend. // 2. Configures Caddy as a reverse proxy with the given PROXY protocol // version and transport versions. // 3. Sends numRequests GET requests through Caddy and asserts 200 OK each time. // 4. Asserts the backend recorded at least one PROXY header whose source host // is 127.0.0.1 (the loopback address used by the test client). func testProxyProtocolMatrix(t *testing.T, ppVersion string, transportVersions []string, numRequests int) { t.Helper() backend := newProxyProtoBackend(t) listenPort := freePort(t) tester := caddytest.NewTester(t) tester.WithDefaultOverrides(caddytest.Config{ AdminPort: 2999, }) cfg := proxyProtoConfig(listenPort, backend.addr(), ppVersion, transportVersions) tester.InitServer(cfg, "json") // If the test is h2c-only (no "1.1" in versions), reconfigure the test // client transport to use unencrypted HTTP/2 so we actually exercise the // h2c code path through Caddy. if slices.Contains(transportVersions, "h2c") && !slices.Contains(transportVersions, "1.1") { tr, ok := tester.Client.Transport.(*http.Transport) if ok { tr.Protocols = new(http.Protocols) tr.Protocols.SetHTTP1(false) tr.Protocols.SetUnencryptedHTTP2(true) } } proxyURL := fmt.Sprintf("http://127.0.0.1:%d/", listenPort) for i := 0; i < numRequests; i++ { resp, err := tester.Client.Get(proxyURL) if err != nil { t.Fatalf("request %d/%d: GET %s: %v", i+1, numRequests, proxyURL, err) } resp.Body.Close() if resp.StatusCode != http.StatusOK { t.Errorf("request %d/%d: expected status 200, got %d", i+1, numRequests, resp.StatusCode) } } // The backend must have seen at least one PROXY header. For h1, there is // one per request; for h2c, requests share the same connection so only one // header is written at connection establishment. addrs := backend.recordedAddrs() if len(addrs) == 0 { t.Fatalf("backend recorded no PROXY protocol addresses (expected at least 1)") } // Every PROXY-decoded source address must be the loopback address since // the test client always connects from 127.0.0.1. for i, addr := range addrs { host, _, err := net.SplitHostPort(addr) if err != nil { t.Errorf("addr[%d] %q: SplitHostPort: %v", i, addr, err) continue } if host != "127.0.0.1" { t.Errorf("addr[%d]: expected source 127.0.0.1, got %q", i, host) } } } // TestProxyProtocolListenerWrapper verifies that Caddy's // caddy.listeners.proxy_protocol listener wrapper can successfully parse // incoming PROXY protocol headers. // // The test dials Caddy's listening port directly, injects a raw PROXY v2 // header spoofing source address 10.0.0.1:1234, then sends a normal // HTTP/1.1 GET request. The Caddy server is configured to echo back the // remote address ({http.request.remote.host}). The test asserts that the // echoed address is the spoofed 10.0.0.1. func TestProxyProtocolListenerWrapper(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(`{ skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns servers :9080 { listener_wrappers { proxy_protocol { timeout 5s allow 127.0.0.0/8 } } } } http://localhost:9080 { respond "{http.request.remote.host}" }`, "caddyfile") // Dial the Caddy listener directly and inject a PROXY v2 header that // claims the connection originates from 10.0.0.1:1234. conn, err := net.Dial("tcp", "127.0.0.1:9080") if err != nil { t.Fatalf("dial: %v", err) } defer conn.Close() spoofedSrc := &net.TCPAddr{IP: net.ParseIP("10.0.0.1"), Port: 1234} spoofedDst := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 9080} hdr := goproxy.HeaderProxyFromAddrs(2, spoofedSrc, spoofedDst) if _, err := hdr.WriteTo(conn); err != nil { t.Fatalf("write proxy header: %v", err) } // Write a minimal HTTP/1.1 GET request. _, err = fmt.Fprintf(conn, "GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n") if err != nil { t.Fatalf("write HTTP request: %v", err) } // Read the raw response and look for the spoofed address in the body. buf := make([]byte, 4096) n, _ := conn.Read(buf) raw := string(buf[:n]) if !strings.Contains(raw, "10.0.0.1") { t.Errorf("expected spoofed address 10.0.0.1 in response body; full response:\n%s", raw) } } ================================================ FILE: caddytest/integration/reverseproxy_test.go ================================================ package integration import ( "fmt" "net" "net/http" "os" "runtime" "strings" "testing" "github.com/caddyserver/caddy/v2/caddytest" ) func TestSRVReverseProxy(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { "admin": { "listen": "localhost:2999" }, "apps": { "pki": { "certificate_authorities": { "local": { "install_trust": false } } }, "http": { "grace_period": 1, "servers": { "srv0": { "listen": [ ":18080" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "dynamic_upstreams": { "source": "srv", "name": "srv.host.service.consul" } } ] } ] } } } } } `, "json") } func TestDialWithPlaceholderUnix(t *testing.T) { if runtime.GOOS == "windows" { t.SkipNow() } f, err := os.CreateTemp("", "*.sock") if err != nil { t.Errorf("failed to create TempFile: %s", err) return } // a hack to get a file name within a valid path to use as socket socketName := f.Name() os.Remove(f.Name()) server := http.Server{ Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { w.Write([]byte("Hello, World!")) }), } unixListener, err := net.Listen("unix", socketName) if err != nil { t.Errorf("failed to listen on the socket: %s", err) return } go server.Serve(unixListener) t.Cleanup(func() { server.Close() }) runtime.Gosched() // Allow other goroutines to run tester := caddytest.NewTester(t) tester.InitServer(` { "admin": { "listen": "localhost:2999" }, "apps": { "pki": { "certificate_authorities": { "local": { "install_trust": false } } }, "http": { "grace_period": 1, "servers": { "srv0": { "listen": [ ":18080" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "upstreams": [ { "dial": "unix/{http.request.header.X-Caddy-Upstream-Dial}" } ] } ] } ] } } } } } `, "json") req, err := http.NewRequest(http.MethodGet, "http://localhost:18080", nil) if err != nil { t.Fail() return } req.Header.Set("X-Caddy-Upstream-Dial", socketName) tester.AssertResponse(req, 200, "Hello, World!") } func TestReverseProxyWithPlaceholderDialAddress(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { "admin": { "listen": "localhost:2999" }, "apps": { "pki": { "certificate_authorities": { "local": { "install_trust": false } } }, "http": { "grace_period": 1, "servers": { "srv0": { "listen": [ ":18080" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "static_response", "body": "Hello, World!" } ], "terminal": true } ], "automatic_https": { "skip": [ "localhost" ] } }, "srv1": { "listen": [ ":9080" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "reverse_proxy", "upstreams": [ { "dial": "{http.request.header.X-Caddy-Upstream-Dial}" } ] } ], "terminal": true } ], "automatic_https": { "skip": [ "localhost" ] } } } } } } `, "json") req, err := http.NewRequest(http.MethodGet, "http://localhost:9080", nil) if err != nil { t.Fail() return } req.Header.Set("X-Caddy-Upstream-Dial", "localhost:18080") tester.AssertResponse(req, 200, "Hello, World!") } func TestReverseProxyWithPlaceholderTCPDialAddress(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { "admin": { "listen": "localhost:2999" }, "apps": { "pki": { "certificate_authorities": { "local": { "install_trust": false } } }, "http": { "grace_period": 1, "servers": { "srv0": { "listen": [ ":18080" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "static_response", "body": "Hello, World!" } ], "terminal": true } ], "automatic_https": { "skip": [ "localhost" ] } }, "srv1": { "listen": [ ":9080" ], "routes": [ { "match": [ { "host": [ "localhost" ] } ], "handle": [ { "handler": "reverse_proxy", "upstreams": [ { "dial": "tcp/{http.request.header.X-Caddy-Upstream-Dial}:18080" } ] } ], "terminal": true } ], "automatic_https": { "skip": [ "localhost" ] } } } } } } `, "json") req, err := http.NewRequest(http.MethodGet, "http://localhost:9080", nil) if err != nil { t.Fail() return } req.Header.Set("X-Caddy-Upstream-Dial", "localhost") tester.AssertResponse(req, 200, "Hello, World!") } func TestReverseProxyHealthCheck(t *testing.T) { // Start lightweight backend servers so they're ready before Caddy's // active health checker runs; this avoids a startup race where the // health checker probes backends that haven't yet begun accepting // connections and marks them unhealthy. // // This mirrors how health checks are typically used in practice (to a separate // backend service) and avoids probing the same Caddy instance while it's still // provisioning and not ready to accept connections. // backend server that responds to proxied requests helloSrv := &http.Server{ Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { _, _ = w.Write([]byte("Hello, World!")) }), } ln0, err := net.Listen("tcp", "127.0.0.1:2020") if err != nil { t.Fatalf("failed to listen on 127.0.0.1:2020: %v", err) } go helloSrv.Serve(ln0) t.Cleanup(func() { helloSrv.Close(); ln0.Close() }) // backend server that serves health checks healthSrv := &http.Server{ Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { _, _ = w.Write([]byte("ok")) }), } ln1, err := net.Listen("tcp", "127.0.0.1:2021") if err != nil { t.Fatalf("failed to listen on 127.0.0.1:2021: %v", err) } go healthSrv.Serve(ln1) t.Cleanup(func() { healthSrv.Close(); ln1.Close() }) tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns } http://localhost:9080 { reverse_proxy { to localhost:2020 health_uri /health health_port 2021 health_interval 10ms health_timeout 100ms health_passes 1 health_fails 1 } } `, "caddyfile") tester.AssertGetResponse("http://localhost:9080/", 200, "Hello, World!") } // TestReverseProxyHealthCheckPortUsed verifies that health_port is actually // used for active health checks and not the upstream's main port. This is a // regression test for https://github.com/caddyserver/caddy/issues/7524. func TestReverseProxyHealthCheckPortUsed(t *testing.T) { // upstream server: serves proxied requests normally, but returns 503 for // /health so that if health checks mistakenly hit this port the upstream // gets marked unhealthy and the proxy returns 503. upstreamSrv := &http.Server{ Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { if req.URL.Path == "/health" { w.WriteHeader(http.StatusServiceUnavailable) return } _, _ = w.Write([]byte("Hello, World!")) }), } ln0, err := net.Listen("tcp", "127.0.0.1:2022") if err != nil { t.Fatalf("failed to listen on 127.0.0.1:2022: %v", err) } go upstreamSrv.Serve(ln0) t.Cleanup(func() { upstreamSrv.Close(); ln0.Close() }) // separate health check server on the configured health_port: returns 200 // so the upstream is marked healthy only if health checks go to this port. healthSrv := &http.Server{ Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { _, _ = w.Write([]byte("ok")) }), } ln1, err := net.Listen("tcp", "127.0.0.1:2023") if err != nil { t.Fatalf("failed to listen on 127.0.0.1:2023: %v", err) } go healthSrv.Serve(ln1) t.Cleanup(func() { healthSrv.Close(); ln1.Close() }) tester := caddytest.NewTester(t) tester.InitServer(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns } http://localhost:9080 { reverse_proxy { to localhost:2022 health_uri /health health_port 2023 health_interval 10ms health_timeout 100ms health_passes 1 health_fails 1 } } `, "caddyfile") tester.AssertGetResponse("http://localhost:9080/", 200, "Hello, World!") } func TestReverseProxyHealthCheckUnixSocket(t *testing.T) { if runtime.GOOS == "windows" { t.SkipNow() } tester := caddytest.NewTester(t) f, err := os.CreateTemp("", "*.sock") if err != nil { t.Errorf("failed to create TempFile: %s", err) return } // a hack to get a file name within a valid path to use as socket socketName := f.Name() os.Remove(f.Name()) server := http.Server{ Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { if strings.HasPrefix(req.URL.Path, "/health") { w.Write([]byte("ok")) return } w.Write([]byte("Hello, World!")) }), } unixListener, err := net.Listen("unix", socketName) if err != nil { t.Errorf("failed to listen on the socket: %s", err) return } go server.Serve(unixListener) t.Cleanup(func() { server.Close() }) runtime.Gosched() // Allow other goroutines to run tester.InitServer(fmt.Sprintf(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns } http://localhost:9080 { reverse_proxy { to unix/%s health_uri /health health_port 2021 health_interval 2s health_timeout 5s } } `, socketName), "caddyfile") tester.AssertGetResponse("http://localhost:9080/", 200, "Hello, World!") } func TestReverseProxyHealthCheckUnixSocketWithoutPort(t *testing.T) { if runtime.GOOS == "windows" { t.SkipNow() } tester := caddytest.NewTester(t) f, err := os.CreateTemp("", "*.sock") if err != nil { t.Errorf("failed to create TempFile: %s", err) return } // a hack to get a file name within a valid path to use as socket socketName := f.Name() os.Remove(f.Name()) server := http.Server{ Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { if strings.HasPrefix(req.URL.Path, "/health") { w.Write([]byte("ok")) return } w.Write([]byte("Hello, World!")) }), } unixListener, err := net.Listen("unix", socketName) if err != nil { t.Errorf("failed to listen on the socket: %s", err) return } go server.Serve(unixListener) t.Cleanup(func() { server.Close() }) runtime.Gosched() // Allow other goroutines to run tester.InitServer(fmt.Sprintf(` { skip_install_trust admin localhost:2999 http_port 9080 https_port 9443 grace_period 1ns } http://localhost:9080 { reverse_proxy { to unix/%s health_uri /health health_interval 2s health_timeout 5s } } `, socketName), "caddyfile") tester.AssertGetResponse("http://localhost:9080/", 200, "Hello, World!") } ================================================ FILE: caddytest/integration/sni_test.go ================================================ package integration import ( "testing" "github.com/caddyserver/caddy/v2/caddytest" ) func TestDefaultSNI(t *testing.T) { // arrange tester := caddytest.NewTester(t) tester.InitServer(`{ "admin": { "listen": "localhost:2999" }, "apps": { "http": { "http_port": 9080, "https_port": 9443, "grace_period": 1, "servers": { "srv0": { "listen": [ ":9443" ], "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from a.caddy.localhost", "handler": "static_response", "status_code": 200 } ], "match": [ { "path": [ "/version" ] } ] } ] } ], "match": [ { "host": [ "127.0.0.1" ] } ], "terminal": true } ], "tls_connection_policies": [ { "certificate_selection": { "any_tag": ["cert0"] }, "match": { "sni": [ "127.0.0.1" ] } }, { "default_sni": "*.caddy.localhost" } ] } } }, "tls": { "certificates": { "load_files": [ { "certificate": "/caddy.localhost.crt", "key": "/caddy.localhost.key", "tags": [ "cert0" ] } ] } }, "pki": { "certificate_authorities" : { "local" : { "install_trust": false } } } } } `, "json") // act and assert // makes a request with no sni tester.AssertGetResponse("https://127.0.0.1:9443/version", 200, "hello from a.caddy.localhost") } func TestDefaultSNIWithNamedHostAndExplicitIP(t *testing.T) { // arrange tester := caddytest.NewTester(t) tester.InitServer(` { "admin": { "listen": "localhost:2999" }, "apps": { "http": { "http_port": 9080, "https_port": 9443, "grace_period": 1, "servers": { "srv0": { "listen": [ ":9443" ], "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "body": "hello from a", "handler": "static_response", "status_code": 200 } ], "match": [ { "path": [ "/version" ] } ] } ] } ], "match": [ { "host": [ "a.caddy.localhost", "127.0.0.1" ] } ], "terminal": true } ], "tls_connection_policies": [ { "certificate_selection": { "any_tag": ["cert0"] }, "default_sni": "a.caddy.localhost", "match": { "sni": [ "a.caddy.localhost", "127.0.0.1", "" ] } }, { "default_sni": "a.caddy.localhost" } ] } } }, "tls": { "certificates": { "load_files": [ { "certificate": "/a.caddy.localhost.crt", "key": "/a.caddy.localhost.key", "tags": [ "cert0" ] } ] } }, "pki": { "certificate_authorities" : { "local" : { "install_trust": false } } } } } `, "json") // act and assert // makes a request with no sni tester.AssertGetResponse("https://127.0.0.1:9443/version", 200, "hello from a") } func TestDefaultSNIWithPortMappingOnly(t *testing.T) { // arrange tester := caddytest.NewTester(t) tester.InitServer(` { "admin": { "listen": "localhost:2999" }, "apps": { "http": { "http_port": 9080, "https_port": 9443, "grace_period": 1, "servers": { "srv0": { "listen": [ ":9443" ], "routes": [ { "handle": [ { "body": "hello from a.caddy.localhost", "handler": "static_response", "status_code": 200 } ], "match": [ { "path": [ "/version" ] } ] } ], "tls_connection_policies": [ { "certificate_selection": { "any_tag": ["cert0"] }, "default_sni": "a.caddy.localhost" } ] } } }, "tls": { "certificates": { "load_files": [ { "certificate": "/a.caddy.localhost.crt", "key": "/a.caddy.localhost.key", "tags": [ "cert0" ] } ] } }, "pki": { "certificate_authorities" : { "local" : { "install_trust": false } } } } } `, "json") // act and assert // makes a request with no sni tester.AssertGetResponse("https://127.0.0.1:9443/version", 200, "hello from a.caddy.localhost") } func TestHttpOnlyOnDomainWithSNI(t *testing.T) { caddytest.AssertAdapt(t, ` { skip_install_trust default_sni a.caddy.localhost } :80 { respond /version 200 { body "hello from localhost" } } `, "caddyfile", `{ "apps": { "http": { "servers": { "srv0": { "listen": [ ":80" ], "routes": [ { "match": [ { "path": [ "/version" ] } ], "handle": [ { "body": "hello from localhost", "handler": "static_response", "status_code": 200 } ] } ] } } }, "pki": { "certificate_authorities": { "local": { "install_trust": false } } } } }`) } ================================================ FILE: caddytest/integration/stream_test.go ================================================ package integration import ( "compress/gzip" "context" "crypto/rand" "fmt" "io" "net/http" "net/http/httputil" "net/url" "strings" "testing" "time" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" "github.com/caddyserver/caddy/v2/caddytest" ) // (see https://github.com/caddyserver/caddy/issues/3556 for use case) func TestH2ToH2CStream(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { "admin": { "listen": "localhost:2999" }, "apps": { "http": { "http_port": 9080, "https_port": 9443, "grace_period": 1, "servers": { "srv0": { "listen": [ ":9443" ], "routes": [ { "handle": [ { "handler": "reverse_proxy", "transport": { "protocol": "http", "compression": false, "versions": [ "h2c", "2" ] }, "upstreams": [ { "dial": "localhost:54321" } ] } ], "match": [ { "path": [ "/tov2ray" ] } ] } ], "tls_connection_policies": [ { "certificate_selection": { "any_tag": ["cert0"] }, "default_sni": "a.caddy.localhost" } ] } } }, "tls": { "certificates": { "load_files": [ { "certificate": "/a.caddy.localhost.crt", "key": "/a.caddy.localhost.key", "tags": [ "cert0" ] } ] } }, "pki": { "certificate_authorities" : { "local" : { "install_trust": false } } } } } `, "json") expectedBody := "some data to be echoed" // start the server server := testH2ToH2CStreamServeH2C(t) go server.ListenAndServe() defer func() { ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) defer cancel() server.Shutdown(ctx) }() r, w := io.Pipe() req := &http.Request{ Method: "PUT", Body: io.NopCloser(r), URL: &url.URL{ Scheme: "https", Host: "127.0.0.1:9443", Path: "/tov2ray", }, Proto: "HTTP/2", ProtoMajor: 2, ProtoMinor: 0, Header: make(http.Header), } // Disable any compression method from server. req.Header.Set("Accept-Encoding", "identity") resp := tester.AssertResponseCode(req, http.StatusOK) if resp.StatusCode != http.StatusOK { return } go func() { fmt.Fprint(w, expectedBody) w.Close() }() defer resp.Body.Close() bytes, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("unable to read the response body %s", err) } body := string(bytes) if !strings.Contains(body, expectedBody) { t.Errorf("requesting \"%s\" expected response body \"%s\" but got \"%s\"", req.RequestURI, expectedBody, body) } } func testH2ToH2CStreamServeH2C(t *testing.T) *http.Server { h2s := &http2.Server{} handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rstring, err := httputil.DumpRequest(r, false) if err == nil { t.Logf("h2c server received req: %s", rstring) } // We only accept HTTP/2! if r.ProtoMajor != 2 { t.Error("Not a HTTP/2 request, rejected!") w.WriteHeader(http.StatusInternalServerError) return } if r.Host != "127.0.0.1:9443" { t.Errorf("r.Host doesn't match, %v!", r.Host) w.WriteHeader(http.StatusNotFound) return } if !strings.HasPrefix(r.URL.Path, "/tov2ray") { w.WriteHeader(http.StatusNotFound) return } w.Header().Set("Cache-Control", "no-store") w.WriteHeader(200) http.NewResponseController(w).Flush() buf := make([]byte, 4*1024) for { n, err := r.Body.Read(buf) if n > 0 { w.Write(buf[:n]) } if err != nil { if err == io.EOF { r.Body.Close() } break } } }) server := &http.Server{ Addr: "127.0.0.1:54321", Handler: h2c.NewHandler(handler, h2s), } return server } // (see https://github.com/caddyserver/caddy/issues/3606 for use case) func TestH2ToH1ChunkedResponse(t *testing.T) { tester := caddytest.NewTester(t) tester.InitServer(` { "admin": { "listen": "localhost:2999" }, "logging": { "logs": { "default": { "level": "DEBUG" } } }, "apps": { "http": { "http_port": 9080, "https_port": 9443, "grace_period": 1, "servers": { "srv0": { "listen": [ ":9443" ], "routes": [ { "handle": [ { "handler": "subroute", "routes": [ { "handle": [ { "encodings": { "gzip": {} }, "handler": "encode" } ] }, { "handle": [ { "handler": "reverse_proxy", "upstreams": [ { "dial": "localhost:54321" } ] } ], "match": [ { "path": [ "/tov2ray" ] } ] } ] } ], "terminal": true } ], "tls_connection_policies": [ { "certificate_selection": { "any_tag": [ "cert0" ] }, "default_sni": "a.caddy.localhost" } ] } } }, "tls": { "certificates": { "load_files": [ { "certificate": "/a.caddy.localhost.crt", "key": "/a.caddy.localhost.key", "tags": [ "cert0" ] } ] } }, "pki": { "certificate_authorities": { "local": { "install_trust": false } } } } } `, "json") // need a large body here to trigger caddy's compression, larger than gzip.miniLength expectedBody, err := GenerateRandomString(1024) if err != nil { t.Fatalf("generate expected body failed, err: %s", err) } // start the server server := testH2ToH1ChunkedResponseServeH1(t) go server.ListenAndServe() defer func() { ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) defer cancel() server.Shutdown(ctx) }() r, w := io.Pipe() req := &http.Request{ Method: "PUT", Body: io.NopCloser(r), URL: &url.URL{ Scheme: "https", Host: "127.0.0.1:9443", Path: "/tov2ray", }, Proto: "HTTP/2", ProtoMajor: 2, ProtoMinor: 0, Header: make(http.Header), } // underlying transport will automatically add gzip // req.Header.Set("Accept-Encoding", "gzip") go func() { fmt.Fprint(w, expectedBody) w.Close() }() resp := tester.AssertResponseCode(req, http.StatusOK) if resp.StatusCode != http.StatusOK { return } defer resp.Body.Close() bytes, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("unable to read the response body %s", err) } body := string(bytes) if body != expectedBody { t.Errorf("requesting \"%s\" expected response body \"%s\" but got \"%s\"", req.RequestURI, expectedBody, body) } } func testH2ToH1ChunkedResponseServeH1(t *testing.T) *http.Server { handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Host != "127.0.0.1:9443" { t.Errorf("r.Host doesn't match, %v!", r.Host) w.WriteHeader(http.StatusNotFound) return } if !strings.HasPrefix(r.URL.Path, "/tov2ray") { w.WriteHeader(http.StatusNotFound) return } defer r.Body.Close() bytes, err := io.ReadAll(r.Body) if err != nil { t.Fatalf("unable to read the response body %s", err) } n := len(bytes) var writer io.Writer if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { gw, err := gzip.NewWriterLevel(w, 5) if err != nil { t.Error("can't return gzip data") w.WriteHeader(http.StatusInternalServerError) return } defer gw.Close() writer = gw w.Header().Set("Content-Encoding", "gzip") w.Header().Del("Content-Length") w.WriteHeader(200) } else { writer = w } if n > 0 { writer.Write(bytes[:]) } }) server := &http.Server{ Addr: "127.0.0.1:54321", Handler: handler, } return server } // GenerateRandomBytes returns securely generated random bytes. // It will return an error if the system's secure random // number generator fails to function correctly, in which // case the caller should not continue. func GenerateRandomBytes(n int) ([]byte, error) { b := make([]byte, n) _, err := rand.Read(b) // Note that err == nil only if we read len(b) bytes. if err != nil { return nil, err } return b, nil } // GenerateRandomString returns a securely generated random string. // It will return an error if the system's secure random // number generator fails to function correctly, in which // case the caller should not continue. func GenerateRandomString(n int) (string, error) { const letters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-" bytes, err := GenerateRandomBytes(n) if err != nil { return "", err } for i, b := range bytes { bytes[i] = letters[b%byte(len(letters))] } return string(bytes), nil } ================================================ FILE: caddytest/integration/testdata/cookie.html ================================================

Cookie.ClientName {{.Cookie "clientname"}}

================================================ FILE: caddytest/integration/testdata/foo.txt ================================================ foo ================================================ FILE: caddytest/integration/testdata/foo_with_multiple_trailing_newlines.txt ================================================ foo ================================================ FILE: caddytest/integration/testdata/foo_with_trailing_newline.txt ================================================ foo ================================================ FILE: caddytest/integration/testdata/import_respond.txt ================================================ respond "'I am {args[0]}', hears {args[1]}" ================================================ FILE: caddytest/integration/testdata/index.localhost.html ================================================ ================================================ FILE: caddytest/integration/testdata/issue_7518_unused_block_panic_snippets.conf ================================================ # Used by import_block_snippet_non_replaced_block_from_separate_file.caddyfiletest (snippet) { header { reverse_proxy localhost:3000 {block} } } # This snippet being unused by the test Caddyfile is intentional. # This is to test that a panic runtime error triggered by an out-of-range slice index access # will not happen again, please see issue #7518 and pull request #7543 for more information (unused_snippet) { header SomeHeader SomeValue } ================================================ FILE: caddytest/leafcert.pem ================================================ -----BEGIN CERTIFICATE----- MIICUTCCAfugAwIBAgIBADANBgkqhkiG9w0BAQQFADBXMQswCQYDVQQGEwJDTjEL MAkGA1UECBMCUE4xCzAJBgNVBAcTAkNOMQswCQYDVQQKEwJPTjELMAkGA1UECxMC VU4xFDASBgNVBAMTC0hlcm9uZyBZYW5nMB4XDTA1MDcxNTIxMTk0N1oXDTA1MDgx NDIxMTk0N1owVzELMAkGA1UEBhMCQ04xCzAJBgNVBAgTAlBOMQswCQYDVQQHEwJD TjELMAkGA1UEChMCT04xCzAJBgNVBAsTAlVOMRQwEgYDVQQDEwtIZXJvbmcgWWFu ZzBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQCp5hnG7ogBhtlynpOS21cBewKE/B7j V14qeyslnr26xZUsSVko36ZnhiaO/zbMOoRcKK9vEcgMtcLFuQTWDl3RAgMBAAGj gbEwga4wHQYDVR0OBBYEFFXI70krXeQDxZgbaCQoR4jUDncEMH8GA1UdIwR4MHaA FFXI70krXeQDxZgbaCQoR4jUDncEoVukWTBXMQswCQYDVQQGEwJDTjELMAkGA1UE CBMCUE4xCzAJBgNVBAcTAkNOMQswCQYDVQQKEwJPTjELMAkGA1UECxMCVU4xFDAS BgNVBAMTC0hlcm9uZyBZYW5nggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEE BQADQQA/ugzBrjjK9jcWnDVfGHlk3icNRq0oV7Ri32z/+HQX67aRfgZu7KWdI+Ju Wm7DCfrPNGVwFWUQOmsPue9rZBgO -----END CERTIFICATE----- ================================================ FILE: cmd/caddy/main.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package main is the entry point of the Caddy application. // Most of Caddy's functionality is provided through modules, // which can be plugged in by adding their import below. // // There is no need to modify the Caddy source code to customize your // builds. You can easily build a custom Caddy with these simple steps: // // 1. Copy this file (main.go) into a new folder // 2. Edit the imports below to include the modules you want plugged in // 3. Run `go mod init caddy` // 4. Run `go install` or `go build` - you now have a custom binary! // // Or you can use xcaddy which does it all for you as a command: // https://github.com/caddyserver/xcaddy package main import ( _ "time/tzdata" caddycmd "github.com/caddyserver/caddy/v2/cmd" // plug in Caddy modules here _ "github.com/caddyserver/caddy/v2/modules/standard" ) func main() { caddycmd.Main() } ================================================ FILE: cmd/caddy/setcap.sh ================================================ #!/bin/sh # USAGE: # go run -exec ./setcap.sh main.go # # (Example: `go run -exec ./setcap.sh main.go run --config caddy.json`) # # For some reason this does not work on my Arch system, so if you find that's # the case, you can instead do: # # go build && ./setcap.sh ./caddy # # but this will leave the ./caddy binary laying around. # sudo setcap cap_net_bind_service=+ep "$1" "$@" ================================================ FILE: cmd/cobra.go ================================================ package caddycmd import ( "fmt" "github.com/spf13/cobra" "github.com/caddyserver/caddy/v2" ) var defaultFactory = newRootCommandFactory(func() *cobra.Command { bin := caddy.CustomBinaryName if bin == "" { bin = "caddy" } long := caddy.CustomLongDescription if long == "" { long = `Caddy is an extensible server platform written in Go. At its core, Caddy merely manages configuration. Modules are plugged in statically at compile-time to provide useful functionality. Caddy's standard distribution includes common modules to serve HTTP, TLS, and PKI applications, including the automation of certificates. To run Caddy, use: - 'caddy run' to run Caddy in the foreground (recommended). - 'caddy start' to start Caddy in the background; only do this if you will be keeping the terminal window open until you run 'caddy stop' to close the server. When Caddy is started, it opens a locally-bound administrative socket to which configuration can be POSTed via a restful HTTP API (see https://caddyserver.com/docs/api). Caddy's native configuration format is JSON. However, config adapters can be used to convert other config formats to JSON when Caddy receives its configuration. The Caddyfile is a built-in config adapter that is popular for hand-written configurations due to its straightforward syntax (see https://caddyserver.com/docs/caddyfile). Many third-party adapters are available (see https://caddyserver.com/docs/config-adapters). Use 'caddy adapt' to see how a config translates to JSON. For convenience, the CLI can act as an HTTP client to give Caddy its initial configuration for you. If a file named Caddyfile is in the current working directory, it will do this automatically. Otherwise, you can use the --config flag to specify the path to a config file. Some special-purpose subcommands build and load a configuration file for you directly from command line input; for example: - caddy file-server - caddy reverse-proxy - caddy respond These commands disable the administration endpoint because their configuration is specified solely on the command line. In general, the most common way to run Caddy is simply: $ caddy run Or, with a configuration file: $ caddy run --config caddy.json If running interactively in a terminal, running Caddy in the background may be more convenient: $ caddy start ... $ caddy stop This allows you to run other commands while Caddy stays running. Be sure to stop Caddy before you close the terminal! Depending on the system, Caddy may need permission to bind to low ports. One way to do this on Linux is to use setcap: $ sudo setcap cap_net_bind_service=+ep $(which caddy) Remember to run that command again after replacing the binary. See the Caddy website for tutorials, configuration structure, syntax, and module documentation: https://caddyserver.com/docs/ Custom Caddy builds are available on the Caddy download page at: https://caddyserver.com/download The xcaddy command can be used to build Caddy from source with or without additional plugins: https://github.com/caddyserver/xcaddy Where possible, Caddy should be installed using officially-supported package installers: https://caddyserver.com/docs/install Instructions for running Caddy in production are also available: https://caddyserver.com/docs/running ` } return &cobra.Command{ Use: bin, Long: long, Example: ` $ caddy run $ caddy run --config caddy.json $ caddy reload --config caddy.json $ caddy stop`, // kind of annoying to have all the help text printed out if // caddy has an error provisioning its modules, for instance... SilenceUsage: true, Version: onlyVersionText(), } }) const fullDocsFooter = `Full documentation is available at: https://caddyserver.com/docs/command-line` func init() { defaultFactory.Use(func(rootCmd *cobra.Command) { rootCmd.SetVersionTemplate("{{.Version}}\n") rootCmd.SetHelpTemplate(rootCmd.HelpTemplate() + "\n" + fullDocsFooter + "\n") }) } func onlyVersionText() string { _, f := caddy.Version() return f } func caddyCmdToCobra(caddyCmd Command) *cobra.Command { cmd := &cobra.Command{ Use: caddyCmd.Name + " " + caddyCmd.Usage, Short: caddyCmd.Short, Long: caddyCmd.Long, } if caddyCmd.CobraFunc != nil { caddyCmd.CobraFunc(cmd) } else { cmd.RunE = WrapCommandFuncForCobra(caddyCmd.Func) cmd.Flags().AddGoFlagSet(caddyCmd.Flags) } return cmd } // WrapCommandFuncForCobra wraps a Caddy CommandFunc for use // in a cobra command's RunE field. func WrapCommandFuncForCobra(f CommandFunc) func(cmd *cobra.Command, _ []string) error { return func(cmd *cobra.Command, _ []string) error { status, err := f(Flags{cmd.Flags()}) if status > 1 { cmd.SilenceErrors = true return &exitError{ExitCode: status, Err: err} } return err } } // exitError carries the exit code from CommandFunc to Main() type exitError struct { ExitCode int Err error } func (e *exitError) Error() string { if e.Err == nil { return fmt.Sprintf("exiting with status %d", e.ExitCode) } return e.Err.Error() } ================================================ FILE: cmd/commandfactory.go ================================================ package caddycmd import ( "github.com/spf13/cobra" ) type rootCommandFactory struct { constructor func() *cobra.Command options []func(*cobra.Command) } func newRootCommandFactory(fn func() *cobra.Command) *rootCommandFactory { return &rootCommandFactory{ constructor: fn, } } func (f *rootCommandFactory) Use(fn func(cmd *cobra.Command)) { f.options = append(f.options, fn) } func (f *rootCommandFactory) Build() *cobra.Command { o := f.constructor() for _, v := range f.options { v(o) } return o } ================================================ FILE: cmd/commandfuncs.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddycmd import ( "bytes" "context" "crypto/rand" "encoding/json" "errors" "fmt" "io" "io/fs" "log" "maps" "net" "net/http" "os" "os/exec" "runtime" "runtime/debug" "strings" "github.com/aryann/difflib" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/internal" ) func cmdStart(fl Flags) (int, error) { configFlag := fl.String("config") configAdapterFlag := fl.String("adapter") pidfileFlag := fl.String("pidfile") watchFlag := fl.Bool("watch") var err error var envfileFlag []string envfileFlag, err = fl.GetStringSlice("envfile") if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("reading envfile flag: %v", err) } // open a listener to which the child process will connect when // it is ready to confirm that it has successfully started ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("opening listener for success confirmation: %v", err) } defer ln.Close() // craft the command with a pingback address and with a // pipe for its stdin, so we can tell it our confirmation // code that we expect so that some random port scan at // the most unfortunate time won't fool us into thinking // the child succeeded (i.e. the alternative is to just // wait for any connection on our listener, but better to // ensure it's the process we're expecting - we can be // sure by giving it some random bytes and having it echo // them back to us) cmd := exec.Command(os.Args[0], "run", "--pingback", ln.Addr().String()) //nolint:gosec // no command injection that I can determine... // we should be able to run caddy in relative paths if errors.Is(cmd.Err, exec.ErrDot) { cmd.Err = nil } if configFlag != "" { cmd.Args = append(cmd.Args, "--config", configFlag) } for _, envfile := range envfileFlag { cmd.Args = append(cmd.Args, "--envfile", envfile) } if configAdapterFlag != "" { cmd.Args = append(cmd.Args, "--adapter", configAdapterFlag) } if watchFlag { cmd.Args = append(cmd.Args, "--watch") } if pidfileFlag != "" { cmd.Args = append(cmd.Args, "--pidfile", pidfileFlag) } stdinPipe, err := cmd.StdinPipe() if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("creating stdin pipe: %v", err) } cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr // generate the random bytes we'll send to the child process expect := make([]byte, 32) _, err = rand.Read(expect) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("generating random confirmation bytes: %v", err) } // begin writing the confirmation bytes to the child's // stdin; use a goroutine since the child hasn't been // started yet, and writing synchronously would result // in a deadlock go func() { _, _ = stdinPipe.Write(expect) stdinPipe.Close() }() // start the process err = cmd.Start() if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("starting caddy process: %v", err) } // there are two ways we know we're done: either // the process will connect to our listener, or // it will exit with an error success, exit := make(chan struct{}), make(chan error) // in one goroutine, we await the success of the child process go func() { for { conn, err := ln.Accept() if err != nil { if !errors.Is(err, net.ErrClosed) { log.Println(err) } break } err = handlePingbackConn(conn, expect) if err == nil { close(success) break } log.Println(err) } }() // in another goroutine, we await the failure of the child process go func() { err := cmd.Wait() // don't send on this line! Wait blocks, but send starts before it unblocks exit <- err // sending on separate line ensures select won't trigger until after Wait unblocks }() // when one of the goroutines unblocks, we're done and can exit select { case <-success: fmt.Printf("Successfully started Caddy (pid=%d) - Caddy is running in the background\n", cmd.Process.Pid) case err := <-exit: return caddy.ExitCodeFailedStartup, fmt.Errorf("caddy process exited with error: %v", err) } return caddy.ExitCodeSuccess, nil } func cmdRun(fl Flags) (int, error) { caddy.TrapSignals() // set up buffered logging for early startup // so that we can hold onto logs until after // the config is loaded (or fails to load) // so that we can write the logs to the user's // configured output. we must be sure to flush // on any error before the config is loaded. logger, defaultLogger, logBuffer := caddy.BufferedLog() undoMaxProcs := setResourceLimits(logger) defer undoMaxProcs() // release the local reference to the undo function so it can be GC'd; // the deferred call above has already captured the actual function value. undoMaxProcs = nil //nolint:ineffassign,wastedassign configFlag := fl.String("config") configAdapterFlag := fl.String("adapter") resumeFlag := fl.Bool("resume") printEnvFlag := fl.Bool("environ") watchFlag := fl.Bool("watch") pidfileFlag := fl.String("pidfile") pingbackFlag := fl.String("pingback") // load all additional envs as soon as possible err := handleEnvFileFlag(fl) if err != nil { logBuffer.FlushTo(defaultLogger) return caddy.ExitCodeFailedStartup, err } // if we are supposed to print the environment, do that first if printEnvFlag { printEnvironment() } // load the config, depending on flags var config []byte if resumeFlag { config, err = os.ReadFile(caddy.ConfigAutosavePath) if errors.Is(err, fs.ErrNotExist) { // not a bad error; just can't resume if autosave file doesn't exist logger.Info("no autosave file exists", zap.String("autosave_file", caddy.ConfigAutosavePath)) resumeFlag = false } else if err != nil { logBuffer.FlushTo(defaultLogger) return caddy.ExitCodeFailedStartup, err } else { if configFlag == "" { logger.Info("resuming from last configuration", zap.String("autosave_file", caddy.ConfigAutosavePath)) } else { // if they also specified a config file, user should be aware that we're not // using it (doing so could lead to data/config loss by overwriting!) logger.Warn("--config and --resume flags were used together; ignoring --config and resuming from last configuration", zap.String("autosave_file", caddy.ConfigAutosavePath)) } } } // we don't use 'else' here since this value might have been changed in 'if' block; i.e. not mutually exclusive var configFile string var adapterUsed string if !resumeFlag { config, configFile, adapterUsed, err = LoadConfig(configFlag, configAdapterFlag) if err != nil { logBuffer.FlushTo(defaultLogger) return caddy.ExitCodeFailedStartup, err } } // create pidfile now, in case loading config takes a while (issue #5477) if pidfileFlag != "" { err := caddy.PIDFile(pidfileFlag) if err != nil { logger.Error("unable to write PID file", zap.String("pidfile", pidfileFlag), zap.Error(err)) } } // If we have a source config file (we're running via 'caddy run --config ...'), // record it so SIGUSR1 can reload from the same file. Also provide a callback // that knows how to load/adapt that source when requested by the main process. if configFile != "" { caddy.SetLastConfig(configFile, adapterUsed, func(file, adapter string) error { cfg, _, _, err := LoadConfig(file, adapter) if err != nil { return err } return caddy.Load(cfg, true) }) } // run the initial config err = caddy.Load(config, true) if err != nil { logBuffer.FlushTo(defaultLogger) return caddy.ExitCodeFailedStartup, fmt.Errorf("loading initial config: %v", err) } // release the reference to the config so it can be GC'd config = nil //nolint:ineffassign,wastedassign // at this stage the config will have replaced the // default logger to the configured one, so we can // log normally, now that the config is running. // also clear our ref to the buffer so it can get GC'd logger = caddy.Log() defaultLogger = nil //nolint:ineffassign,wastedassign logBuffer = nil //nolint:wastedassign,ineffassign logger.Info("serving initial configuration") // if we are to report to another process the successful start // of the server, do so now by echoing back contents of stdin if pingbackFlag != "" { confirmationBytes, err := io.ReadAll(os.Stdin) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("reading confirmation bytes from stdin: %v", err) } conn, err := net.Dial("tcp", pingbackFlag) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("dialing confirmation address: %v", err) } _, err = conn.Write(confirmationBytes) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("writing confirmation bytes to %s: %v", pingbackFlag, err) } // close (non-defer because we `select {}` below) // and release references so they can be GC'd conn.Close() confirmationBytes = nil //nolint:ineffassign,wastedassign conn = nil //nolint:wastedassign,ineffassign } // if enabled, reload config file automatically on changes // (this better only be used in dev!) if watchFlag { go watchConfigFile(configFile, adapterUsed) } // warn if the environment does not provide enough information about the disk hasXDG := os.Getenv("XDG_DATA_HOME") != "" && os.Getenv("XDG_CONFIG_HOME") != "" && os.Getenv("XDG_CACHE_HOME") != "" switch runtime.GOOS { case "windows": if os.Getenv("HOME") == "" && os.Getenv("USERPROFILE") == "" && !hasXDG { logger.Warn("neither HOME nor USERPROFILE environment variables are set - please fix; some assets might be stored in ./caddy") } case "plan9": if os.Getenv("home") == "" && !hasXDG { logger.Warn("$home environment variable is empty - please fix; some assets might be stored in ./caddy") } default: if os.Getenv("HOME") == "" && !hasXDG { logger.Warn("$HOME environment variable is empty - please fix; some assets might be stored in ./caddy") } } // release the last local logger reference logger = nil //nolint:wastedassign,ineffassign select {} } func cmdStop(fl Flags) (int, error) { addressFlag := fl.String("address") configFlag := fl.String("config") configAdapterFlag := fl.String("adapter") adminAddr, err := DetermineAdminAPIAddress(addressFlag, nil, configFlag, configAdapterFlag) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err) } resp, err := AdminAPIRequest(adminAddr, http.MethodPost, "/stop", nil, nil) if err != nil { caddy.Log().Warn("failed using API to stop instance", zap.Error(err)) return caddy.ExitCodeFailedStartup, err } defer resp.Body.Close() return caddy.ExitCodeSuccess, nil } func cmdReload(fl Flags) (int, error) { configFlag := fl.String("config") configAdapterFlag := fl.String("adapter") addressFlag := fl.String("address") forceFlag := fl.Bool("force") // get the config in caddy's native format config, configFile, adapterUsed, err := LoadConfig(configFlag, configAdapterFlag) if err != nil { return caddy.ExitCodeFailedStartup, err } if configFile == "" { return caddy.ExitCodeFailedStartup, fmt.Errorf("no config file to load") } adminAddr, err := DetermineAdminAPIAddress(addressFlag, config, configFile, configAdapterFlag) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err) } // optionally force a config reload headers := make(http.Header) if forceFlag { headers.Set("Cache-Control", "must-revalidate") } // Provide the source file/adapter to the running process so it can // preserve its last-config knowledge if this reload came from the same source. headers.Set("Caddy-Config-Source-File", configFile) headers.Set("Caddy-Config-Source-Adapter", adapterUsed) resp, err := AdminAPIRequest(adminAddr, http.MethodPost, "/load", headers, bytes.NewReader(config)) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("sending configuration to instance: %v", err) } defer resp.Body.Close() return caddy.ExitCodeSuccess, nil } func cmdVersion(_ Flags) (int, error) { _, full := caddy.Version() fmt.Println(full) return caddy.ExitCodeSuccess, nil } func cmdBuildInfo(_ Flags) (int, error) { bi, ok := debug.ReadBuildInfo() if !ok { return caddy.ExitCodeFailedStartup, fmt.Errorf("no build information") } fmt.Println(bi) return caddy.ExitCodeSuccess, nil } // jsonModuleInfo holds metadata about a Caddy module for JSON output. type jsonModuleInfo struct { ModuleName string `json:"module_name"` ModuleType string `json:"module_type"` Version string `json:"version,omitempty"` PackageURL string `json:"package_url,omitempty"` } func cmdListModules(fl Flags) (int, error) { packages := fl.Bool("packages") versions := fl.Bool("versions") skipStandard := fl.Bool("skip-standard") jsonOutput := fl.Bool("json") // Organize modules by whether they come with the standard distribution standard, nonstandard, unknown, err := getModules() if err != nil { // If module info can't be fetched, just print the IDs and exit for _, m := range caddy.Modules() { fmt.Println(m) } return caddy.ExitCodeSuccess, nil } // Logic for JSON output if jsonOutput { output := []jsonModuleInfo{} // addToOutput is a helper to convert internal module info to the JSON-serializable struct addToOutput := func(list []moduleInfo, moduleType string) { for _, mi := range list { item := jsonModuleInfo{ ModuleName: mi.caddyModuleID, ModuleType: moduleType, // Mapping the type here } if mi.goModule != nil { item.Version = mi.goModule.Version item.PackageURL = mi.goModule.Path } output = append(output, item) } } // Pass the respective type for each category if !skipStandard { addToOutput(standard, "standard") } addToOutput(nonstandard, "non-standard") addToOutput(unknown, "unknown") jsonBytes, err := json.MarshalIndent(output, "", " ") if err != nil { return caddy.ExitCodeFailedQuit, err } fmt.Println(string(jsonBytes)) return caddy.ExitCodeSuccess, nil } // Logic for Text output (Fallback) printModuleInfo := func(mi moduleInfo) { fmt.Print(mi.caddyModuleID) if versions && mi.goModule != nil { fmt.Print(" " + mi.goModule.Version) } if packages && mi.goModule != nil { fmt.Print(" " + mi.goModule.Path) if mi.goModule.Replace != nil { fmt.Print(" => " + mi.goModule.Replace.Path) } } if mi.err != nil { fmt.Printf(" [%v]", mi.err) } fmt.Println() } // Standard modules (always shipped with Caddy) if !skipStandard { if len(standard) > 0 { for _, mod := range standard { printModuleInfo(mod) } } fmt.Printf("\n Standard modules: %d\n", len(standard)) } // Non-standard modules (third party plugins) if len(nonstandard) > 0 { if len(standard) > 0 && !skipStandard { fmt.Println() } for _, mod := range nonstandard { printModuleInfo(mod) } fmt.Printf("\n Non-standard modules: %d\n", len(nonstandard)) } // Unknown modules (couldn't get Caddy module info) if len(unknown) > 0 { if (len(standard) > 0 && !skipStandard) || len(nonstandard) > 0 { fmt.Println() } for _, mod := range unknown { printModuleInfo(mod) } fmt.Printf("\n Unknown modules: %d\n", len(unknown)) } return caddy.ExitCodeSuccess, nil } func cmdEnviron(fl Flags) (int, error) { // load all additional envs as soon as possible err := handleEnvFileFlag(fl) if err != nil { return caddy.ExitCodeFailedStartup, err } printEnvironment() return caddy.ExitCodeSuccess, nil } func cmdAdaptConfig(fl Flags) (int, error) { configFlag := fl.String("config") adapterFlag := fl.String("adapter") prettyFlag := fl.Bool("pretty") validateFlag := fl.Bool("validate") var err error configFlag, err = configFileWithRespectToDefault(caddy.Log(), configFlag) if err != nil { return caddy.ExitCodeFailedStartup, err } if configFlag == "" { return caddy.ExitCodeFailedStartup, fmt.Errorf("input file required when there is no Caddyfile in current directory (use --config flag)") } // load all additional envs as soon as possible err = handleEnvFileFlag(fl) if err != nil { return caddy.ExitCodeFailedStartup, err } if adapterFlag == "" { return caddy.ExitCodeFailedStartup, fmt.Errorf("adapter name is required (use --adapt flag or leave unspecified for default)") } cfgAdapter := caddyconfig.GetAdapter(adapterFlag) if cfgAdapter == nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("unrecognized config adapter: %s", adapterFlag) } var input []byte // read from stdin if the file name is "-" if configFlag == "-" { input, err = io.ReadAll(os.Stdin) } else { input, err = os.ReadFile(configFlag) } if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("reading input file: %v", err) } opts := map[string]any{"filename": configFlag} adaptedConfig, warnings, err := cfgAdapter.Adapt(input, opts) if err != nil { return caddy.ExitCodeFailedStartup, err } if prettyFlag { var prettyBuf bytes.Buffer err = json.Indent(&prettyBuf, adaptedConfig, "", "\t") if err != nil { return caddy.ExitCodeFailedStartup, err } adaptedConfig = prettyBuf.Bytes() } // print result to stdout fmt.Println(string(adaptedConfig)) // print warnings to stderr for _, warn := range warnings { msg := warn.Message if warn.Directive != "" { msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message) } caddy.Log().Named(adapterFlag).Warn(msg, zap.String("file", warn.File), zap.Int("line", warn.Line)) } // validate output if requested if validateFlag { var cfg *caddy.Config err = caddy.StrictUnmarshalJSON(adaptedConfig, &cfg) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("decoding config: %v", err) } err = caddy.Validate(cfg) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("validation: %v", err) } } return caddy.ExitCodeSuccess, nil } func cmdValidateConfig(fl Flags) (int, error) { configFlag := fl.String("config") adapterFlag := fl.String("adapter") // load all additional envs as soon as possible err := handleEnvFileFlag(fl) if err != nil { return caddy.ExitCodeFailedStartup, err } // use default config and ensure a config file is specified configFlag, err = configFileWithRespectToDefault(caddy.Log(), configFlag) if err != nil { return caddy.ExitCodeFailedStartup, err } if configFlag == "" { return caddy.ExitCodeFailedStartup, fmt.Errorf("input file required when there is no Caddyfile in current directory (use --config flag)") } input, _, _, err := LoadConfig(configFlag, adapterFlag) if err != nil { return caddy.ExitCodeFailedStartup, err } input = caddy.RemoveMetaFields(input) var cfg *caddy.Config err = caddy.StrictUnmarshalJSON(input, &cfg) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("decoding config: %v", err) } err = caddy.Validate(cfg) if err != nil { return caddy.ExitCodeFailedStartup, err } fmt.Println("Valid configuration") return caddy.ExitCodeSuccess, nil } func cmdFmt(fl Flags) (int, error) { configFile := fl.Arg(0) configFlag := fl.String("config") if (len(fl.Args()) > 1) || (configFlag != "" && configFile != "") { return caddy.ExitCodeFailedStartup, fmt.Errorf("fmt does not support multiple files %s %s", configFlag, strings.Join(fl.Args(), " ")) } if configFile == "" && configFlag == "" { configFile = "Caddyfile" } else if configFile == "" { configFile = configFlag } // as a special case, read from stdin if the file name is "-" if configFile == "-" { input, err := io.ReadAll(os.Stdin) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("reading stdin: %v", err) } fmt.Print(string(caddyfile.Format(input))) return caddy.ExitCodeSuccess, nil } input, err := os.ReadFile(configFile) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("reading input file: %v", err) } output := caddyfile.Format(input) if fl.Bool("overwrite") { if err := os.WriteFile(configFile, output, 0o600); err != nil { //nolint:gosec // path traversal is not really a thing here, this is either "Caddyfile" or admin-controlled return caddy.ExitCodeFailedStartup, fmt.Errorf("overwriting formatted file: %v", err) } return caddy.ExitCodeSuccess, nil } if fl.Bool("diff") { diff := difflib.Diff( strings.Split(string(input), "\n"), strings.Split(string(output), "\n")) for _, d := range diff { switch d.Delta { case difflib.Common: fmt.Printf(" %s\n", d.Payload) case difflib.LeftOnly: fmt.Printf("- %s\n", d.Payload) case difflib.RightOnly: fmt.Printf("+ %s\n", d.Payload) } } } else { fmt.Print(string(output)) } if warning, diff := caddyfile.FormattingDifference(configFile, input); diff { return caddy.ExitCodeFailedStartup, fmt.Errorf(`%s:%d: Caddyfile input is not formatted; Tip: use '--overwrite' to update your Caddyfile in-place instead of previewing it. Consult '--help' for more options`, warning.File, warning.Line, ) } return caddy.ExitCodeSuccess, nil } // handleEnvFileFlag loads the environment variables from the given --envfile // flag if specified. This should be called as early in the command function. func handleEnvFileFlag(fl Flags) error { var err error var envfileFlag []string envfileFlag, err = fl.GetStringSlice("envfile") if err != nil { return fmt.Errorf("reading envfile flag: %v", err) } for _, envfile := range envfileFlag { if err := loadEnvFromFile(envfile); err != nil { return fmt.Errorf("loading additional environment variables: %v", err) } } return nil } // AdminAPIRequest makes an API request according to the CLI flags given, // with the given HTTP method and request URI. If body is non-nil, it will // be assumed to be Content-Type application/json. The caller should close // the response body. Should only be used by Caddy CLI commands which // need to interact with a running instance of Caddy via the admin API. func AdminAPIRequest(adminAddr, method, uri string, headers http.Header, body io.Reader) (*http.Response, error) { parsedAddr, err := caddy.ParseNetworkAddress(adminAddr) if err != nil || parsedAddr.PortRangeSize() > 1 { return nil, fmt.Errorf("invalid admin address %s: %v", adminAddr, err) } origin := "http://" + parsedAddr.JoinHostPort(0) if parsedAddr.IsUnixNetwork() { origin = "http://127.0.0.1" // bogus host is a hack so that http.NewRequest() is happy // the unix address at this point might still contain the optional // unix socket permissions, which are part of the address/host. // those need to be removed first, as they aren't part of the // resulting unix file path addr, _, err := internal.SplitUnixSocketPermissionsBits(parsedAddr.Host) if err != nil { return nil, err } parsedAddr.Host = addr } else if parsedAddr.IsFdNetwork() { origin = "http://127.0.0.1" } // form the request req, err := http.NewRequest(method, origin+uri, body) if err != nil { return nil, fmt.Errorf("making request: %v", err) } if parsedAddr.IsUnixNetwork() || parsedAddr.IsFdNetwork() { // We used to conform to RFC 2616 Section 14.26 which requires // an empty host header when there is no host, as is the case // with unix sockets and socket fds. However, Go required a // Host value so we used a hack of a space character as the host // (it would see the Host was non-empty, then trim the space later). // As of Go 1.20.6 (July 2023), this hack no longer works. See: // https://github.com/golang/go/issues/60374 // See also the discussion here: // https://github.com/golang/go/issues/61431 // // After that, we now require a Host value of either 127.0.0.1 // or ::1 if one is set. Above I choose to use 127.0.0.1. Even // though the value should be completely irrelevant (it could be // "srldkjfsd"), if for some reason the Host *is* used, at least // we can have some reasonable assurance it will stay on the local // machine and that browsers, if they ever allow access to unix // sockets, can still enforce CORS, ensuring it is still coming // from the local machine. } else { req.Header.Set("Origin", origin) } if body != nil { req.Header.Set("Content-Type", "application/json") } maps.Copy(req.Header, headers) // make an HTTP client that dials our network type, since admin // endpoints aren't always TCP, which is what the default transport // expects; reuse is not of particular concern here client := http.Client{ Transport: &http.Transport{ DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { return net.Dial(parsedAddr.Network, parsedAddr.JoinHostPort(0)) }, }, } resp, err := client.Do(req) //nolint:gosec // the only SSRF here would be self-sabatoge I think if err != nil { return nil, fmt.Errorf("performing request: %v", err) } // if it didn't work, let the user know if resp.StatusCode >= 400 { respBody, err := io.ReadAll(io.LimitReader(resp.Body, 1024*1024*2)) if err != nil { return nil, fmt.Errorf("HTTP %d: reading error message: %v", resp.StatusCode, err) } return nil, fmt.Errorf("caddy responded with error: HTTP %d: %s", resp.StatusCode, respBody) } return resp, nil } // DetermineAdminAPIAddress determines which admin API endpoint address should // be used based on the inputs. By priority: if `address` is specified, then // it is returned; if `config` is specified, then that config will be used for // finding the admin address; if `configFile` (and `configAdapter`) are specified, // then that config will be loaded to find the admin address; otherwise, the // default admin listen address will be returned. func DetermineAdminAPIAddress(address string, config []byte, configFile, configAdapter string) (string, error) { // Prefer the address if specified and non-empty if address != "" { return address, nil } // Try to load the config from file if specified, with the given adapter name if configFile != "" { var loadedConfigFile string var err error // use the provided loaded config if non-empty // otherwise, load it from the specified file/adapter loadedConfig := config if len(loadedConfig) == 0 { // get the config in caddy's native format loadedConfig, loadedConfigFile, _, err = LoadConfig(configFile, configAdapter) if err != nil { return "", err } if loadedConfigFile == "" { return "", fmt.Errorf("no config file to load; either use --config flag or ensure Caddyfile exists in current directory") } } // get the address of the admin listener from the config if len(loadedConfig) > 0 { var tmpStruct struct { Admin caddy.AdminConfig `json:"admin"` } err := json.Unmarshal(loadedConfig, &tmpStruct) if err != nil { return "", fmt.Errorf("unmarshaling admin listener address from config: %v", err) } if tmpStruct.Admin.Listen != "" { return tmpStruct.Admin.Listen, nil } } } // Fallback to the default listen address otherwise return caddy.DefaultAdminListen, nil } // configFileWithRespectToDefault returns the filename to use for loading the config, based // on whether a config file is already specified and a supported default config file exists. func configFileWithRespectToDefault(logger *zap.Logger, configFile string) (string, error) { const defaultCaddyfile = "Caddyfile" // if no input file was specified, try a default Caddyfile if the Caddyfile adapter is plugged in if configFile == "" && caddyconfig.GetAdapter("caddyfile") != nil { _, err := os.Stat(defaultCaddyfile) if err == nil { // default Caddyfile exists if logger != nil { logger.Info("using adjacent Caddyfile") } return defaultCaddyfile, nil } if !errors.Is(err, fs.ErrNotExist) { // problem checking return configFile, fmt.Errorf("checking if default Caddyfile exists: %v", err) } } // default config file does not exist or is irrelevant return configFile, nil } type moduleInfo struct { caddyModuleID string goModule *debug.Module err error } ================================================ FILE: cmd/commands.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddycmd import ( "flag" "fmt" "os" "regexp" "strings" "sync" "github.com/spf13/cobra" "github.com/spf13/cobra/doc" "github.com/caddyserver/caddy/v2" ) // Command represents a subcommand. Name, Func, // and Short are required. type Command struct { // The name of the subcommand. Must conform to the // format described by the RegisterCommand() godoc. // Required. Name string // Usage is a brief message describing the syntax of // the subcommand's flags and args. Use [] to indicate // optional parameters and <> to enclose literal values // intended to be replaced by the user. Do not prefix // the string with "caddy" or the name of the command // since these will be prepended for you; only include // the actual parameters for this command. Usage string // Short is a one-line message explaining what the // command does. Should not end with punctuation. // Required. Short string // Long is the full help text shown to the user. // Will be trimmed of whitespace on both ends before // being printed. Long string // Flags is the flagset for command. // This is ignored if CobraFunc is set. Flags *flag.FlagSet // Func is a function that executes a subcommand using // the parsed flags. It returns an exit code and any // associated error. // Required if CobraFunc is not set. Func CommandFunc // CobraFunc allows further configuration of the command // via cobra's APIs. If this is set, then Func and Flags // are ignored, with the assumption that they are set in // this function. A caddycmd.WrapCommandFuncForCobra helper // exists to simplify porting CommandFunc to Cobra's RunE. CobraFunc func(*cobra.Command) } // CommandFunc is a command's function. It runs the // command and returns the proper exit code along with // any error that occurred. type CommandFunc func(Flags) (int, error) // Commands returns a list of commands initialised by // RegisterCommand func Commands() map[string]Command { commandsMu.RLock() defer commandsMu.RUnlock() return commands } var ( commandsMu sync.RWMutex commands = make(map[string]Command) ) func init() { RegisterCommand(Command{ Name: "start", Usage: "[--config [--adapter ]] [--envfile ] [--watch] [--pidfile ]", Short: "Starts the Caddy process in the background and then returns", Long: ` Starts the Caddy process, optionally bootstrapped with an initial config file. This command unblocks after the server starts running or fails to run. If --envfile is specified, an environment file with environment variables in the KEY=VALUE format will be loaded into the Caddy process. On Windows, the spawned child process will remain attached to the terminal, so closing the window will forcefully stop Caddy; to avoid forgetting this, try using 'caddy run' instead to keep it in the foreground. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringP("config", "c", "", "Configuration file") cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply") cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load") cmd.Flags().BoolP("watch", "w", false, "Reload changed config file automatically") cmd.Flags().StringP("pidfile", "", "", "Path of file to which to write process ID") cmd.RunE = WrapCommandFuncForCobra(cmdStart) }, }) RegisterCommand(Command{ Name: "run", Usage: "[--config [--adapter ]] [--envfile ] [--environ] [--resume] [--watch] [--pidfile ]", Short: `Starts the Caddy process and blocks indefinitely`, Long: ` Starts the Caddy process, optionally bootstrapped with an initial config file, and blocks indefinitely until the server is stopped; i.e. runs Caddy in "daemon" mode (foreground). If a config file is specified, it will be applied immediately after the process is running. If the config file is not in Caddy's native JSON format, you can specify an adapter with --adapter to adapt the given config file to Caddy's native format. The config adapter must be a registered module. Any warnings will be printed to the log, but beware that any adaptation without errors will immediately be used. If you want to review the results of the adaptation first, use the 'adapt' subcommand. As a special case, if the current working directory has a file called "Caddyfile" and the caddyfile config adapter is plugged in (default), then that file will be loaded and used to configure Caddy, even without any command line flags. If --envfile is specified, an environment file with environment variables in the KEY=VALUE format will be loaded into the Caddy process. If --environ is specified, the environment as seen by the Caddy process will be printed before starting. This is the same as the environ command but does not quit after printing, and can be useful for troubleshooting. The --resume flag will override the --config flag if there is a config auto- save file. It is not an error if --resume is used and no autosave file exists. If --watch is specified, the config file will be loaded automatically after changes. ⚠️ This can make unintentional config changes easier; only use this option in a local development environment. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringP("config", "c", "", "Configuration file") cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply") cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load") cmd.Flags().BoolP("environ", "e", false, "Print environment") cmd.Flags().BoolP("resume", "r", false, "Use saved config, if any (and prefer over --config file)") cmd.Flags().BoolP("watch", "w", false, "Watch config file for changes and reload it automatically") cmd.Flags().StringP("pidfile", "", "", "Path of file to which to write process ID") cmd.Flags().StringP("pingback", "", "", "Echo confirmation bytes to this address on success") cmd.RunE = WrapCommandFuncForCobra(cmdRun) }, }) RegisterCommand(Command{ Name: "stop", Usage: "[--config [--adapter ]] [--address ]", Short: "Gracefully stops a started Caddy process", Long: ` Stops the background Caddy process as gracefully as possible. It requires that the admin API is enabled and accessible, since it will use the API's /stop endpoint. The address of this request can be customized using the --address flag, or from the given --config, if not the default. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringP("config", "c", "", "Configuration file to use to parse the admin address, if --address is not used") cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply (when --config is used)") cmd.Flags().StringP("address", "", "", "The address to use to reach the admin API endpoint, if not the default") cmd.RunE = WrapCommandFuncForCobra(cmdStop) }, }) RegisterCommand(Command{ Name: "reload", Usage: "--config [--adapter ] [--address ]", Short: "Changes the config of the running Caddy instance", Long: ` Gives the running Caddy instance a new configuration. This has the same effect as POSTing a document to the /load API endpoint, but is convenient for simple workflows revolving around config files. Since the admin endpoint is configurable, the endpoint configuration is loaded from the --address flag if specified; otherwise it is loaded from the given config file; otherwise the default is assumed. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringP("config", "c", "", "Configuration file (required)") cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply") cmd.Flags().StringP("address", "", "", "Address of the administration listener, if different from config") cmd.Flags().BoolP("force", "f", false, "Force config reload, even if it is the same") cmd.RunE = WrapCommandFuncForCobra(cmdReload) }, }) RegisterCommand(Command{ Name: "version", Short: "Prints the version", Long: ` Prints the version of this Caddy binary. Version information must be embedded into the binary at compile-time in order for Caddy to display anything useful with this command. If Caddy is built from within a version control repository, the Go command will embed the revision hash if available. However, if Caddy is built in the way specified by our online documentation (or by using xcaddy), more detailed version information is printed as given by Go modules. For more details about the full version string, see the Go module documentation: https://go.dev/doc/modules/version-numbers `, Func: cmdVersion, }) RegisterCommand(Command{ Name: "list-modules", Usage: "[--packages] [--versions] [--skip-standard] [--json]", Short: "Lists the installed Caddy modules", CobraFunc: func(cmd *cobra.Command) { cmd.Flags().BoolP("packages", "", false, "Print package paths") cmd.Flags().BoolP("versions", "", false, "Print version information") cmd.Flags().BoolP("skip-standard", "s", false, "Skip printing standard modules") cmd.Flags().BoolP("json", "", false, "Print modules in JSON format") cmd.RunE = WrapCommandFuncForCobra(cmdListModules) }, }) RegisterCommand(Command{ Name: "build-info", Short: "Prints information about this build", Func: cmdBuildInfo, }) RegisterCommand(Command{ Name: "environ", Usage: "[--envfile ]", Short: "Prints the environment", Long: ` Prints the environment as seen by this Caddy process. The environment includes variables set in the system. If your Caddy configuration uses environment variables (e.g. "{env.VARIABLE}") then this command can be useful for verifying that the variables will have the values you expect in your config. If --envfile is specified, an environment file with environment variables in the KEY=VALUE format will be loaded into the Caddy process. Note that environments may be different depending on how you run Caddy. Environments for Caddy instances started by service managers such as systemd are often different than the environment inherited from your shell or terminal. You can also print the environment the same time you use "caddy run" by adding the "--environ" flag. Environments may contain sensitive data. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load") cmd.RunE = WrapCommandFuncForCobra(cmdEnviron) }, }) RegisterCommand(Command{ Name: "adapt", Usage: "--config [--adapter ] [--pretty] [--validate] [--envfile ]", Short: "Adapts a configuration to Caddy's native JSON", Long: ` Adapts a configuration to Caddy's native JSON format and writes the output to stdout, along with any warnings to stderr. If --pretty is specified, the output will be formatted with indentation for human readability. If --validate is used, the adapted config will be checked for validity. If the config is invalid, an error will be printed to stderr and a non- zero exit status will be returned. If --envfile is specified, an environment file with environment variables in the KEY=VALUE format will be loaded into the Caddy process. If you wish to use stdin instead of a regular file, use - as the path. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringP("config", "c", "", "Configuration file to adapt (required)") cmd.Flags().StringP("adapter", "a", "caddyfile", "Name of config adapter") cmd.Flags().BoolP("pretty", "p", false, "Format the output for human readability") cmd.Flags().BoolP("validate", "", false, "Validate the output") cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load") cmd.RunE = WrapCommandFuncForCobra(cmdAdaptConfig) }, }) RegisterCommand(Command{ Name: "validate", Usage: "--config [--adapter ] [--envfile ]", Short: "Tests whether a configuration file is valid", Long: ` Loads and provisions the provided config, but does not start running it. This reveals any errors with the configuration through the loading and provisioning stages. If --envfile is specified, an environment file with environment variables in the KEY=VALUE format will be loaded into the Caddy process. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringP("config", "c", "", "Input configuration file") cmd.Flags().StringP("adapter", "a", "", "Name of config adapter") cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load") cmd.RunE = WrapCommandFuncForCobra(cmdValidateConfig) }, }) RegisterCommand(Command{ Name: "storage", Short: "Commands for working with Caddy's storage (EXPERIMENTAL)", Long: ` Allows exporting and importing Caddy's storage contents. The two commands can be combined in a pipeline to transfer directly from one storage to another: $ caddy storage export --config Caddyfile.old --output - | > caddy storage import --config Caddyfile.new --input - The - argument refers to stdout and stdin, respectively. NOTE: When importing to or exporting from file_system storage (the default), the command should be run as the user that owns the associated root path. EXPERIMENTAL: May be changed or removed. `, CobraFunc: func(cmd *cobra.Command) { exportCmd := &cobra.Command{ Use: "export --config --output ", Short: "Exports storage assets as a tarball", Long: ` The contents of the configured storage module (TLS certificates, etc) are exported via a tarball. --output is required, - can be given for stdout. `, RunE: WrapCommandFuncForCobra(cmdExportStorage), } exportCmd.Flags().StringP("config", "c", "", "Input configuration file (required)") exportCmd.Flags().StringP("output", "o", "", "Output path") cmd.AddCommand(exportCmd) importCmd := &cobra.Command{ Use: "import --config --input ", Short: "Imports storage assets from a tarball.", Long: ` Imports storage assets to the configured storage module. The import file must be a tar archive. --input is required, - can be given for stdin. `, RunE: WrapCommandFuncForCobra(cmdImportStorage), } importCmd.Flags().StringP("config", "c", "", "Configuration file to load (required)") importCmd.Flags().StringP("input", "i", "", "Tar of assets to load (required)") cmd.AddCommand(importCmd) }, }) RegisterCommand(Command{ Name: "fmt", Usage: "[--overwrite] [--diff] []", Short: "Formats a Caddyfile", Long: ` Formats the Caddyfile by adding proper indentation and spaces to improve human readability. It prints the result to stdout. If --overwrite is specified, the output will be written to the config file directly instead of printing it. If --diff is specified, the output will be compared against the input, and lines will be prefixed with '-' and '+' where they differ. Note that unchanged lines are prefixed with two spaces for alignment, and that this is not a valid patch format. If you wish to use stdin instead of a regular file, use - as the path. When reading from stdin, the --overwrite flag has no effect: the result is always printed to stdout. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringP("config", "c", "", "Configuration file") cmd.Flags().BoolP("overwrite", "w", false, "Overwrite the input file with the results") cmd.Flags().BoolP("diff", "d", false, "Print the differences between the input file and the formatted output") cmd.RunE = WrapCommandFuncForCobra(cmdFmt) }, }) RegisterCommand(Command{ Name: "upgrade", Short: "Upgrade Caddy (EXPERIMENTAL)", Long: ` Downloads an updated Caddy binary with the same modules/plugins at the latest versions. EXPERIMENTAL: May be changed or removed. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().BoolP("keep-backup", "k", false, "Keep the backed up binary, instead of deleting it") cmd.RunE = WrapCommandFuncForCobra(cmdUpgrade) }, }) RegisterCommand(Command{ Name: "add-package", Usage: "", Short: "Adds Caddy packages (EXPERIMENTAL)", Long: ` Downloads an updated Caddy binary with the specified packages (module/plugin) added, with an optional version specified (e.g., "package@version"). Retains existing packages. Returns an error if any of the specified packages are already included. EXPERIMENTAL: May be changed or removed. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().BoolP("keep-backup", "k", false, "Keep the backed up binary, instead of deleting it") cmd.RunE = WrapCommandFuncForCobra(cmdAddPackage) }, }) RegisterCommand(Command{ Name: "remove-package", Func: cmdRemovePackage, Usage: "", Short: "Removes Caddy packages (EXPERIMENTAL)", Long: ` Downloads an updated Caddy binaries without the specified packages (module/plugin). Returns an error if any of the packages are not included. EXPERIMENTAL: May be changed or removed. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().BoolP("keep-backup", "k", false, "Keep the backed up binary, instead of deleting it") cmd.RunE = WrapCommandFuncForCobra(cmdRemovePackage) }, }) defaultFactory.Use(func(rootCmd *cobra.Command) { manpageCommand := Command{ Name: "manpage", Usage: "--directory ", Short: "Generates the manual pages for Caddy commands", Long: ` Generates the manual pages for Caddy commands into the designated directory tagged into section 8 (System Administration). The manual page files are generated into the directory specified by the argument of --directory. If the directory does not exist, it will be created. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringP("directory", "o", "", "The output directory where the manpages are generated") cmd.RunE = WrapCommandFuncForCobra(func(fl Flags) (int, error) { dir := strings.TrimSpace(fl.String("directory")) if dir == "" { return caddy.ExitCodeFailedQuit, fmt.Errorf("designated output directory and specified section are required") } if err := os.MkdirAll(dir, 0o755); err != nil { return caddy.ExitCodeFailedQuit, err } if err := doc.GenManTree(rootCmd, &doc.GenManHeader{ Title: "Caddy", Section: "8", // https://en.wikipedia.org/wiki/Man_page#Manual_sections }, dir); err != nil { return caddy.ExitCodeFailedQuit, err } return caddy.ExitCodeSuccess, nil }) }, } // source: https://github.com/spf13/cobra/blob/6dec1ae26659a130bdb4c985768d1853b0e1bc06/site/content/completions/_index.md completionCommand := Command{ Name: "completion", Usage: "[bash|zsh|fish|powershell]", Short: "Generate completion script", Long: fmt.Sprintf(`To load completions: Bash: $ source <(%[1]s completion bash) # To load completions for each session, execute once: # Linux: $ %[1]s completion bash > /etc/bash_completion.d/%[1]s # macOS: $ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s Zsh: # If shell completion is not already enabled in your environment, # you will need to enable it. You can execute the following once: $ echo "autoload -U compinit; compinit" >> ~/.zshrc # To load completions for each session, execute once: $ %[1]s completion zsh > "${fpath[1]}/_%[1]s" # You will need to start a new shell for this setup to take effect. fish: $ %[1]s completion fish | source # To load completions for each session, execute once: $ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish PowerShell: PS> %[1]s completion powershell | Out-String | Invoke-Expression # To load completions for every new session, run: PS> %[1]s completion powershell > %[1]s.ps1 # and source this file from your PowerShell profile. `, rootCmd.Root().Name()), CobraFunc: func(cmd *cobra.Command) { cmd.DisableFlagsInUseLine = true cmd.ValidArgs = []string{"bash", "zsh", "fish", "powershell"} cmd.Args = cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs) cmd.RunE = func(cmd *cobra.Command, args []string) error { switch args[0] { case "bash": return cmd.Root().GenBashCompletion(os.Stdout) case "zsh": return cmd.Root().GenZshCompletion(os.Stdout) case "fish": return cmd.Root().GenFishCompletion(os.Stdout, true) case "powershell": return cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) default: return fmt.Errorf("unrecognized shell: %s", args[0]) } } }, } rootCmd.AddCommand(caddyCmdToCobra(manpageCommand)) rootCmd.AddCommand(caddyCmdToCobra(completionCommand)) // add manpage and completion commands to the map of // available commands, because they're not registered // through RegisterCommand. commandsMu.Lock() commands[manpageCommand.Name] = manpageCommand commands[completionCommand.Name] = completionCommand commandsMu.Unlock() }) } // RegisterCommand registers the command cmd. // cmd.Name must be unique and conform to the // following format: // // - lowercase // - alphanumeric and hyphen characters only // - cannot start or end with a hyphen // - hyphen cannot be adjacent to another hyphen // // This function panics if the name is already registered, // if the name does not meet the described format, or if // any of the fields are missing from cmd. // // This function should be used in init(). func RegisterCommand(cmd Command) { commandsMu.Lock() defer commandsMu.Unlock() if cmd.Name == "" { panic("command name is required") } if cmd.Func == nil && cmd.CobraFunc == nil { panic("command function missing") } if cmd.Short == "" { panic("command short string is required") } if _, exists := commands[cmd.Name]; exists { panic("command already registered: " + cmd.Name) } if !commandNameRegex.MatchString(cmd.Name) { panic("invalid command name") } defaultFactory.Use(func(rootCmd *cobra.Command) { rootCmd.AddCommand(caddyCmdToCobra(cmd)) }) commands[cmd.Name] = cmd } var commandNameRegex = regexp.MustCompile(`^[a-z0-9]$|^([a-z0-9]+-?[a-z0-9]*)+[a-z0-9]$`) ================================================ FILE: cmd/commands_test.go ================================================ package caddycmd import ( "maps" "reflect" "slices" "testing" ) func TestCommandsAreAvailable(t *testing.T) { // trigger init, and build the default factory, so that // all commands from this package are available cmd := defaultFactory.Build() if cmd == nil { t.Fatal("default factory failed to build") } // check that the default factory has 17 commands; it doesn't // include the commands registered through calls to init in // other packages cmds := Commands() if len(cmds) != 17 { t.Errorf("expected 17 commands, got %d", len(cmds)) } commandNames := slices.Collect(maps.Keys(cmds)) slices.Sort(commandNames) expectedCommandNames := []string{ "adapt", "add-package", "build-info", "completion", "environ", "fmt", "list-modules", "manpage", "reload", "remove-package", "run", "start", "stop", "storage", "upgrade", "validate", "version", } if !reflect.DeepEqual(expectedCommandNames, commandNames) { t.Errorf("expected %v, got %v", expectedCommandNames, commandNames) } } ================================================ FILE: cmd/main.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddycmd import ( "bufio" "bytes" "encoding/json" "errors" "flag" "fmt" "io" "io/fs" "log" "log/slog" "net" "os" "path/filepath" "runtime" "runtime/debug" "strconv" "strings" "time" "github.com/KimMachineGun/automemlimit/memlimit" "github.com/caddyserver/certmagic" "github.com/spf13/pflag" "go.uber.org/automaxprocs/maxprocs" "go.uber.org/zap" "go.uber.org/zap/exp/zapslog" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" ) func init() { // set a fitting User-Agent for ACME requests version, _ := caddy.Version() cleanModVersion := strings.TrimPrefix(version, "v") ua := "Caddy/" + cleanModVersion if uaEnv, ok := os.LookupEnv("USERAGENT"); ok { ua = uaEnv + " " + ua } certmagic.UserAgent = ua // by using Caddy, user indicates agreement to CA terms // (very important, as Caddy is often non-interactive // and thus ACME account creation will fail!) certmagic.DefaultACME.Agreed = true } // Main implements the main function of the caddy command. // Call this if Caddy is to be the main() of your program. func Main() { if len(os.Args) == 0 { fmt.Printf("[FATAL] no arguments provided by OS; args[0] must be command\n") os.Exit(caddy.ExitCodeFailedStartup) } if err := defaultFactory.Build().Execute(); err != nil { var exitError *exitError if errors.As(err, &exitError) { os.Exit(exitError.ExitCode) } os.Exit(1) } } // handlePingbackConn reads from conn and ensures it matches // the bytes in expect, or returns an error if it doesn't. func handlePingbackConn(conn net.Conn, expect []byte) error { defer conn.Close() confirmationBytes, err := io.ReadAll(io.LimitReader(conn, 32)) if err != nil { return err } if !bytes.Equal(confirmationBytes, expect) { return fmt.Errorf("wrong confirmation: %x", confirmationBytes) } return nil } // LoadConfig loads the config from configFile and adapts it // using adapterName. If adapterName is specified, configFile // must be also. If no configFile is specified, it tries // loading a default config file. The lack of a config file is // not treated as an error, but false will be returned if // there is no config available. It prints any warnings to stderr, // and returns the resulting JSON config bytes along with // the name of the loaded config file (if any). // The return values are: // - config bytes (nil if no config) // - config file used ("" if none) // - adapter used ("" if none) // - error, if any func LoadConfig(configFile, adapterName string) ([]byte, string, string, error) { return loadConfigWithLogger(caddy.Log(), configFile, adapterName) } func isCaddyfile(configFile, adapterName string) (bool, error) { if adapterName == "caddyfile" { return true, nil } // as a special case, if a config file starts with "caddyfile" or // has a ".caddyfile" extension, and no adapter is specified, and // no adapter module name matches the extension, assume // caddyfile adapter for convenience baseConfig := strings.ToLower(filepath.Base(configFile)) baseConfigExt := filepath.Ext(baseConfig) startsOrEndsInCaddyfile := strings.HasPrefix(baseConfig, "caddyfile") || strings.HasSuffix(baseConfig, ".caddyfile") if baseConfigExt == ".json" { return false, nil } // If the adapter is not specified, // the config file starts with "caddyfile", // the config file has an extension, // and isn't a JSON file (e.g. Caddyfile.yaml), // then we don't know what the config format is. if adapterName == "" && startsOrEndsInCaddyfile { return true, nil } // adapter is not empty, // adapter is not "caddyfile", // extension is not ".json", // extension is not ".caddyfile" // file does not start with "Caddyfile" return false, nil } func loadConfigWithLogger(logger *zap.Logger, configFile, adapterName string) ([]byte, string, string, error) { // if no logger is provided, use a nop logger // just so we don't have to check for nil if logger == nil { logger = zap.NewNop() } // specifying an adapter without a config file is ambiguous if adapterName != "" && configFile == "" { return nil, "", "", fmt.Errorf("cannot adapt config without config file (use --config)") } // load initial config and adapter var config []byte var cfgAdapter caddyconfig.Adapter var err error if configFile != "" { if configFile == "-" { config, err = io.ReadAll(os.Stdin) if err != nil { return nil, "", "", fmt.Errorf("reading config from stdin: %v", err) } logger.Info("using config from stdin") } else { config, err = os.ReadFile(configFile) if err != nil { return nil, "", "", fmt.Errorf("reading config from file: %v", err) } logger.Info("using config from file", zap.String("file", configFile)) } } else if adapterName == "" { // if the Caddyfile adapter is plugged in, we can try using an // adjacent Caddyfile by default cfgAdapter = caddyconfig.GetAdapter("caddyfile") if cfgAdapter != nil { config, err = os.ReadFile("Caddyfile") if errors.Is(err, fs.ErrNotExist) { // okay, no default Caddyfile; pretend like this never happened cfgAdapter = nil } else if err != nil { // default Caddyfile exists, but error reading it return nil, "", "", fmt.Errorf("reading default Caddyfile: %v", err) } else { // success reading default Caddyfile configFile = "Caddyfile" logger.Info("using adjacent Caddyfile") } } } if yes, err := isCaddyfile(configFile, adapterName); yes { adapterName = "caddyfile" } else if err != nil { return nil, "", "", err } // load config adapter if adapterName != "" { cfgAdapter = caddyconfig.GetAdapter(adapterName) if cfgAdapter == nil { return nil, "", "", fmt.Errorf("unrecognized config adapter: %s", adapterName) } } // adapt config if cfgAdapter != nil { adaptedConfig, warnings, err := cfgAdapter.Adapt(config, map[string]any{ "filename": configFile, }) if err != nil { return nil, "", "", fmt.Errorf("adapting config using %s: %v", adapterName, err) } logger.Info("adapted config to JSON", zap.String("adapter", adapterName)) for _, warn := range warnings { msg := warn.Message if warn.Directive != "" { msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message) } logger.Warn(msg, zap.String("adapter", adapterName), zap.String("file", warn.File), zap.Int("line", warn.Line)) } config = adaptedConfig } else if len(config) != 0 { // validate that the config is at least valid JSON err = json.Unmarshal(config, new(any)) if err != nil { if jsonErr, ok := err.(*json.SyntaxError); ok { return nil, "", "", fmt.Errorf("config is not valid JSON: %w, at offset %d; did you mean to use a config adapter (the --adapter flag)?", err, jsonErr.Offset) } return nil, "", "", fmt.Errorf("config is not valid JSON: %w; did you mean to use a config adapter (the --adapter flag)?", err) } } return config, configFile, adapterName, nil } // watchConfigFile watches the config file at filename for changes // and reloads the config if the file was updated. This function // blocks indefinitely; it only quits if the poller has errors for // long enough time. The filename passed in must be the actual // config file used, not one to be discovered. // Each second the config files is loaded and parsed into an object // and is compared to the last config object that was loaded func watchConfigFile(filename, adapterName string) { defer func() { if err := recover(); err != nil { log.Printf("[PANIC] watching config file: %v\n%s", err, debug.Stack()) } }() // make our logger; since config reloads can change the // default logger, we need to get it dynamically each time logger := func() *zap.Logger { return caddy.Log(). Named("watcher"). With(zap.String("config_file", filename)) } // get current config lastCfg, _, _, err := loadConfigWithLogger(nil, filename, adapterName) if err != nil { logger().Error("unable to load latest config", zap.Error(err)) return } logger().Info("watching config file for changes") // begin poller //nolint:staticcheck for range time.Tick(1 * time.Second) { // get current config newCfg, _, _, err := loadConfigWithLogger(nil, filename, adapterName) if err != nil { logger().Error("unable to load latest config", zap.Error(err)) return } // if it hasn't changed, nothing to do if bytes.Equal(lastCfg, newCfg) { continue } logger().Info("config file changed; reloading") // remember the current config lastCfg = newCfg // apply the updated config err = caddy.Load(lastCfg, false) if err != nil { logger().Error("applying latest config", zap.Error(err)) continue } } } // Flags wraps a FlagSet so that typed values // from flags can be easily retrieved. type Flags struct { *pflag.FlagSet } // String returns the string representation of the // flag given by name. It panics if the flag is not // in the flag set. func (f Flags) String(name string) string { return f.FlagSet.Lookup(name).Value.String() } // Bool returns the boolean representation of the // flag given by name. It returns false if the flag // is not a boolean type. It panics if the flag is // not in the flag set. func (f Flags) Bool(name string) bool { val, _ := strconv.ParseBool(f.String(name)) return val } // Int returns the integer representation of the // flag given by name. It returns 0 if the flag // is not an integer type. It panics if the flag is // not in the flag set. func (f Flags) Int(name string) int { val, _ := strconv.ParseInt(f.String(name), 0, strconv.IntSize) return int(val) } // Float64 returns the float64 representation of the // flag given by name. It returns false if the flag // is not a float64 type. It panics if the flag is // not in the flag set. func (f Flags) Float64(name string) float64 { val, _ := strconv.ParseFloat(f.String(name), 64) return val } // Duration returns the duration representation of the // flag given by name. It returns false if the flag // is not a duration type. It panics if the flag is // not in the flag set. func (f Flags) Duration(name string) time.Duration { val, _ := caddy.ParseDuration(f.String(name)) return val } func loadEnvFromFile(envFile string) error { file, err := os.Open(envFile) if err != nil { return fmt.Errorf("reading environment file: %v", err) } defer file.Close() envMap, err := parseEnvFile(file) if err != nil { return fmt.Errorf("parsing environment file: %v", err) } for k, v := range envMap { // do not overwrite existing environment variables _, exists := os.LookupEnv(k) if !exists { if err := os.Setenv(k, v); err != nil { return fmt.Errorf("setting environment variables: %v", err) } } } // Update the storage paths to ensure they have the proper // value after loading a specified env file. caddy.ConfigAutosavePath = filepath.Join(caddy.AppConfigDir(), "autosave.json") caddy.DefaultStorage = &certmagic.FileStorage{Path: caddy.AppDataDir()} return nil } // parseEnvFile parses an env file from KEY=VALUE format. // It's pretty naive. Limited value quotation is supported, // but variable and command expansions are not supported. func parseEnvFile(envInput io.Reader) (map[string]string, error) { envMap := make(map[string]string) scanner := bufio.NewScanner(envInput) var lineNumber int for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) lineNumber++ // skip empty lines and lines starting with comment if line == "" || strings.HasPrefix(line, "#") { continue } // split line into key and value before, after, isCut := strings.Cut(line, "=") if !isCut { return nil, fmt.Errorf("can't parse line %d; line should be in KEY=VALUE format", lineNumber) } key, val := before, after // sometimes keys are prefixed by "export " so file can be sourced in bash; ignore it here key = strings.TrimPrefix(key, "export ") // validate key and value if key == "" { return nil, fmt.Errorf("missing or empty key on line %d", lineNumber) } if strings.Contains(key, " ") { return nil, fmt.Errorf("invalid key on line %d: contains whitespace: %s", lineNumber, key) } if strings.HasPrefix(val, " ") || strings.HasPrefix(val, "\t") { return nil, fmt.Errorf("invalid value on line %d: whitespace before value: '%s'", lineNumber, val) } // remove any trailing comment after value if commentStart, _, found := strings.Cut(val, "#"); found { val = strings.TrimRight(commentStart, " \t") } // quoted value: support newlines if strings.HasPrefix(val, `"`) || strings.HasPrefix(val, "'") { quote := string(val[0]) for !strings.HasSuffix(line, quote) || strings.HasSuffix(line, `\`+quote) { val = strings.ReplaceAll(val, `\`+quote, quote) if !scanner.Scan() { break } lineNumber++ line = strings.ReplaceAll(scanner.Text(), `\`+quote, quote) val += "\n" + line } val = strings.TrimPrefix(val, quote) val = strings.TrimSuffix(val, quote) } envMap[key] = val } if err := scanner.Err(); err != nil { return nil, err } return envMap, nil } func printEnvironment() { _, version := caddy.Version() fmt.Printf("caddy.HomeDir=%s\n", caddy.HomeDir()) fmt.Printf("caddy.AppDataDir=%s\n", caddy.AppDataDir()) fmt.Printf("caddy.AppConfigDir=%s\n", caddy.AppConfigDir()) fmt.Printf("caddy.ConfigAutosavePath=%s\n", caddy.ConfigAutosavePath) fmt.Printf("caddy.Version=%s\n", version) fmt.Printf("runtime.GOOS=%s\n", runtime.GOOS) fmt.Printf("runtime.GOARCH=%s\n", runtime.GOARCH) fmt.Printf("runtime.Compiler=%s\n", runtime.Compiler) fmt.Printf("runtime.NumCPU=%d\n", runtime.NumCPU()) fmt.Printf("runtime.GOMAXPROCS=%d\n", runtime.GOMAXPROCS(0)) fmt.Printf("runtime.Version=%s\n", runtime.Version()) cwd, err := os.Getwd() if err != nil { cwd = fmt.Sprintf("", err) } fmt.Printf("os.Getwd=%s\n\n", cwd) for _, v := range os.Environ() { fmt.Println(v) } } func setResourceLimits(logger *zap.Logger) func() { // Configure the maximum number of CPUs to use to match the Linux container quota (if any) // See https://pkg.go.dev/runtime#GOMAXPROCS undo, err := maxprocs.Set(maxprocs.Logger(logger.Sugar().Infof)) if err != nil { logger.Warn("failed to set GOMAXPROCS", zap.Error(err)) } // Configure the maximum memory to use to match the Linux container quota (if any) or system memory // See https://pkg.go.dev/runtime/debug#SetMemoryLimit _, _ = memlimit.SetGoMemLimitWithOpts( memlimit.WithLogger( slog.New(zapslog.NewHandler( logger.Core(), zapslog.WithName("memlimit"), // the default enables traces at ERROR level, this disables // them by setting it to a level higher than any other level zapslog.AddStacktraceAt(slog.Level(127)), )), ), memlimit.WithProvider( memlimit.ApplyFallback( memlimit.FromCgroup, memlimit.FromSystem, ), ), ) return undo } // StringSlice is a flag.Value that enables repeated use of a string flag. type StringSlice []string func (ss StringSlice) String() string { return "[" + strings.Join(ss, ", ") + "]" } func (ss *StringSlice) Set(value string) error { *ss = append(*ss, value) return nil } // Interface guard var _ flag.Value = (*StringSlice)(nil) ================================================ FILE: cmd/main_test.go ================================================ package caddycmd import ( "reflect" "strings" "testing" ) func TestParseEnvFile(t *testing.T) { for i, tc := range []struct { input string expect map[string]string shouldErr bool }{ { input: `KEY=value`, expect: map[string]string{ "KEY": "value", }, }, { input: ` KEY=value OTHER_KEY=Some Value `, expect: map[string]string{ "KEY": "value", "OTHER_KEY": "Some Value", }, }, { input: ` KEY=value INVALID KEY=asdf OTHER_KEY=Some Value `, shouldErr: true, }, { input: ` KEY=value SIMPLE_QUOTED="quoted value" OTHER_KEY=Some Value `, expect: map[string]string{ "KEY": "value", "SIMPLE_QUOTED": "quoted value", "OTHER_KEY": "Some Value", }, }, { input: ` KEY=value NEWLINES="foo bar" OTHER_KEY=Some Value `, expect: map[string]string{ "KEY": "value", "NEWLINES": "foo\n\tbar", "OTHER_KEY": "Some Value", }, }, { input: ` KEY=value ESCAPED="\"escaped quotes\" here" OTHER_KEY=Some Value `, expect: map[string]string{ "KEY": "value", "ESCAPED": "\"escaped quotes\"\nhere", "OTHER_KEY": "Some Value", }, }, { input: ` export KEY=value OTHER_KEY=Some Value `, expect: map[string]string{ "KEY": "value", "OTHER_KEY": "Some Value", }, }, { input: ` =value OTHER_KEY=Some Value `, shouldErr: true, }, { input: ` EMPTY= OTHER_KEY=Some Value `, expect: map[string]string{ "EMPTY": "", "OTHER_KEY": "Some Value", }, }, { input: ` EMPTY="" OTHER_KEY=Some Value `, expect: map[string]string{ "EMPTY": "", "OTHER_KEY": "Some Value", }, }, { input: ` KEY=value #OTHER_KEY=Some Value `, expect: map[string]string{ "KEY": "value", }, }, { input: ` KEY=value COMMENT=foo bar # some comment here OTHER_KEY=Some Value `, expect: map[string]string{ "KEY": "value", "COMMENT": "foo bar", "OTHER_KEY": "Some Value", }, }, { input: ` KEY=value WHITESPACE= foo OTHER_KEY=Some Value `, shouldErr: true, }, { input: ` KEY=value WHITESPACE=" foo bar " OTHER_KEY=Some Value `, expect: map[string]string{ "KEY": "value", "WHITESPACE": " foo bar ", "OTHER_KEY": "Some Value", }, }, } { actual, err := parseEnvFile(strings.NewReader(tc.input)) if err != nil && !tc.shouldErr { t.Errorf("Test %d: Got error but shouldn't have: %v", i, err) } if err == nil && tc.shouldErr { t.Errorf("Test %d: Did not get error but should have", i) } if tc.shouldErr { continue } if !reflect.DeepEqual(tc.expect, actual) { t.Errorf("Test %d: Expected %v but got %v", i, tc.expect, actual) } } } func Test_isCaddyfile(t *testing.T) { type args struct { configFile string adapterName string } tests := []struct { name string args args want bool wantErr bool }{ { name: "bare Caddyfile without adapter", args: args{ configFile: "Caddyfile", adapterName: "", }, want: true, wantErr: false, }, { name: "local Caddyfile without adapter", args: args{ configFile: "./Caddyfile", adapterName: "", }, want: true, wantErr: false, }, { name: "local caddyfile with adapter", args: args{ configFile: "./Caddyfile", adapterName: "caddyfile", }, want: true, wantErr: false, }, { name: "ends with .caddyfile with adapter", args: args{ configFile: "./conf.caddyfile", adapterName: "caddyfile", }, want: true, wantErr: false, }, { name: "ends with .caddyfile without adapter", args: args{ configFile: "./conf.caddyfile", adapterName: "", }, want: true, wantErr: false, }, { name: "config is Caddyfile.yaml with adapter", args: args{ configFile: "./Caddyfile.yaml", adapterName: "yaml", }, want: false, wantErr: false, }, { name: "json is not caddyfile but not error", args: args{ configFile: "./Caddyfile.json", adapterName: "", }, want: false, wantErr: false, }, { name: "prefix of Caddyfile and ./ with any extension is Caddyfile", args: args{ configFile: "./Caddyfile.prd", adapterName: "", }, want: true, wantErr: false, }, { name: "prefix of Caddyfile without ./ with any extension is Caddyfile", args: args{ configFile: "Caddyfile.prd", adapterName: "", }, want: true, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := isCaddyfile(tt.args.configFile, tt.args.adapterName) if (err != nil) != tt.wantErr { t.Errorf("isCaddyfile() error = %v, wantErr %v", err, tt.wantErr) return } if got != tt.want { t.Errorf("isCaddyfile() = %v, want %v", got, tt.want) } }) } } ================================================ FILE: cmd/packagesfuncs.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddycmd import ( "encoding/json" "fmt" "io" "net/http" "net/url" "os" "os/exec" "path/filepath" "reflect" "runtime" "runtime/debug" "strings" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" ) func cmdUpgrade(fl Flags) (int, error) { _, nonstandard, _, err := getModules() if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err) } pluginPkgs, err := getPluginPackages(nonstandard) if err != nil { return caddy.ExitCodeFailedStartup, err } return upgradeBuild(pluginPkgs, fl) } func splitModule(arg string) (module, version string, err error) { const versionSplit = "@" // accommodate module paths that have @ in them, but we can only tolerate that if there's also // a version, otherwise we don't know if it's a version separator or part of the file path lastVersionSplit := strings.LastIndex(arg, versionSplit) if lastVersionSplit < 0 { module = arg } else { module, version = arg[:lastVersionSplit], arg[lastVersionSplit+1:] } if module == "" { err = fmt.Errorf("module name is required") } return module, version, err } func cmdAddPackage(fl Flags) (int, error) { if len(fl.Args()) == 0 { return caddy.ExitCodeFailedStartup, fmt.Errorf("at least one package name must be specified") } _, nonstandard, _, err := getModules() if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err) } pluginPkgs, err := getPluginPackages(nonstandard) if err != nil { return caddy.ExitCodeFailedStartup, err } for _, arg := range fl.Args() { module, version, err := splitModule(arg) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid module name: %v", err) } // only allow a version to be specified if it's different from the existing version if _, ok := pluginPkgs[module]; ok && (version == "" || pluginPkgs[module].Version == version) { return caddy.ExitCodeFailedStartup, fmt.Errorf("package is already added") } pluginPkgs[module] = pluginPackage{Version: version, Path: module} } return upgradeBuild(pluginPkgs, fl) } func cmdRemovePackage(fl Flags) (int, error) { if len(fl.Args()) == 0 { return caddy.ExitCodeFailedStartup, fmt.Errorf("at least one package name must be specified") } _, nonstandard, _, err := getModules() if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err) } pluginPkgs, err := getPluginPackages(nonstandard) if err != nil { return caddy.ExitCodeFailedStartup, err } for _, arg := range fl.Args() { module, _, err := splitModule(arg) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid module name: %v", err) } if _, ok := pluginPkgs[module]; !ok { // package does not exist return caddy.ExitCodeFailedStartup, fmt.Errorf("package is not added") } delete(pluginPkgs, arg) } return upgradeBuild(pluginPkgs, fl) } func upgradeBuild(pluginPkgs map[string]pluginPackage, fl Flags) (int, error) { l := caddy.Log() thisExecPath, err := os.Executable() if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("determining current executable path: %v", err) } thisExecStat, err := os.Stat(thisExecPath) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("retrieving current executable permission bits: %v", err) } if thisExecStat.Mode()&os.ModeSymlink == os.ModeSymlink { symSource := thisExecPath // we are a symlink; resolve it thisExecPath, err = filepath.EvalSymlinks(thisExecPath) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("resolving current executable symlink: %v", err) } l.Info("this executable is a symlink", zap.String("source", symSource), zap.String("target", thisExecPath)) } l.Info("this executable will be replaced", zap.String("path", thisExecPath)) // build the request URL to download this custom build qs := url.Values{ "os": {runtime.GOOS}, "arch": {runtime.GOARCH}, } for _, pkgInfo := range pluginPkgs { qs.Add("p", pkgInfo.String()) } // initiate the build resp, err := downloadBuild(qs) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("download failed: %v", err) } defer resp.Body.Close() // back up the current binary, in case something goes wrong we can replace it backupExecPath := thisExecPath + ".tmp" l.Info("build acquired; backing up current executable", zap.String("current_path", thisExecPath), zap.String("backup_path", backupExecPath)) err = os.Rename(thisExecPath, backupExecPath) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("backing up current binary: %v", err) } defer func() { if err != nil { err2 := os.Rename(backupExecPath, thisExecPath) if err2 != nil { l.Error("restoring original executable failed; will need to be restored manually", zap.String("backup_path", backupExecPath), zap.String("original_path", thisExecPath), zap.Error(err2)) } } }() // download the file; do this in a closure to close reliably before we execute it err = writeCaddyBinary(thisExecPath, &resp.Body, thisExecStat) if err != nil { return caddy.ExitCodeFailedStartup, err } l.Info("download successful; displaying new binary details", zap.String("location", thisExecPath)) // use the new binary to print out version and module info fmt.Print("\nModule versions:\n\n") if err = listModules(thisExecPath); err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to execute 'caddy list-modules': %v", err) } fmt.Println("\nVersion:") if err = showVersion(thisExecPath); err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to execute 'caddy version': %v", err) } fmt.Println() // clean up the backup file if !fl.Bool("keep-backup") { if err = removeCaddyBinary(backupExecPath); err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to clean up backup binary: %v", err) } } else { l.Info("skipped cleaning up the backup file", zap.String("backup_path", backupExecPath)) } l.Info("upgrade successful; please restart any running Caddy instances", zap.String("executable", thisExecPath)) return caddy.ExitCodeSuccess, nil } func getModules() (standard, nonstandard, unknown []moduleInfo, err error) { bi, ok := debug.ReadBuildInfo() if !ok { err = fmt.Errorf("no build info") return standard, nonstandard, unknown, err } for _, modID := range caddy.Modules() { modInfo, err := caddy.GetModule(modID) if err != nil { // that's weird, shouldn't happen unknown = append(unknown, moduleInfo{caddyModuleID: modID, err: err}) continue } // to get the Caddy plugin's version info, we need to know // the package that the Caddy module's value comes from; we // can use reflection but we need a non-pointer value (I'm // not sure why), and since New() should return a pointer // value, we need to dereference it first iface := any(modInfo.New()) if rv := reflect.ValueOf(iface); rv.Kind() == reflect.Ptr { iface = reflect.New(reflect.TypeOf(iface).Elem()).Elem().Interface() } modPkgPath := reflect.TypeOf(iface).PkgPath() // now we find the Go module that the Caddy module's package // belongs to; we assume the Caddy module package path will // be prefixed by its Go module path, and we will choose the // longest matching prefix in case there are nested modules var matched *debug.Module for _, dep := range bi.Deps { if strings.HasPrefix(modPkgPath, dep.Path) { if matched == nil || len(dep.Path) > len(matched.Path) { matched = dep } } } caddyModGoMod := moduleInfo{caddyModuleID: modID, goModule: matched} if strings.HasPrefix(modPkgPath, caddy.ImportPath) { standard = append(standard, caddyModGoMod) } else { nonstandard = append(nonstandard, caddyModGoMod) } } return standard, nonstandard, unknown, err } func listModules(path string) error { cmd := exec.Command(path, "list-modules", "--versions", "--skip-standard") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd.Run() } func showVersion(path string) error { cmd := exec.Command(path, "version") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd.Run() } func downloadBuild(qs url.Values) (*http.Response, error) { l := caddy.Log() l.Info("requesting build", zap.String("os", qs.Get("os")), zap.String("arch", qs.Get("arch")), zap.Strings("packages", qs["p"])) resp, err := http.Get(fmt.Sprintf("%s?%s", downloadPath, qs.Encode())) if err != nil { return nil, fmt.Errorf("secure request failed: %v", err) } if resp.StatusCode >= 400 { var details struct { StatusCode int `json:"status_code"` Error struct { Message string `json:"message"` ID string `json:"id"` } `json:"error"` } err2 := json.NewDecoder(resp.Body).Decode(&details) if err2 != nil { return nil, fmt.Errorf("download and error decoding failed: HTTP %d: %v", resp.StatusCode, err2) } return nil, fmt.Errorf("download failed: HTTP %d: %s (id=%s)", resp.StatusCode, details.Error.Message, details.Error.ID) } return resp, nil } func getPluginPackages(modules []moduleInfo) (map[string]pluginPackage, error) { pluginPkgs := make(map[string]pluginPackage) for _, mod := range modules { if mod.goModule.Replace != nil { return nil, fmt.Errorf("cannot auto-upgrade when Go module has been replaced: %s => %s", mod.goModule.Path, mod.goModule.Replace.Path) } pluginPkgs[mod.goModule.Path] = pluginPackage{Version: mod.goModule.Version, Path: mod.goModule.Path} } return pluginPkgs, nil } func writeCaddyBinary(path string, body *io.ReadCloser, fileInfo os.FileInfo) error { l := caddy.Log() destFile, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fileInfo.Mode()) if err != nil { return fmt.Errorf("unable to open destination file: %v", err) } defer destFile.Close() l.Info("downloading binary", zap.String("destination", path)) _, err = io.Copy(destFile, *body) if err != nil { return fmt.Errorf("unable to download file: %v", err) } err = destFile.Sync() if err != nil { return fmt.Errorf("syncing downloaded file to device: %v", err) } return nil } const downloadPath = "https://caddyserver.com/api/download" type pluginPackage struct { Version string Path string } func (p pluginPackage) String() string { if p.Version == "" { return p.Path } return p.Path + "@" + p.Version } ================================================ FILE: cmd/removebinary.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !windows package caddycmd import ( "os" ) // removeCaddyBinary removes the Caddy binary at the given path. // // On any non-Windows OS, this simply calls os.Remove, since they should // probably not exhibit any issue with processes deleting themselves. func removeCaddyBinary(path string) error { return os.Remove(path) } ================================================ FILE: cmd/removebinary_windows.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddycmd import ( "os" "path/filepath" "syscall" ) // removeCaddyBinary removes the Caddy binary at the given path. // // On Windows, this uses a syscall to indirectly remove the file, // because otherwise we get an "Access is denied." error when trying // to delete the binary while Caddy is still running and performing // the upgrade. "cmd.exe /C" executes a command specified by the // following arguments, i.e. "del" which will run as a separate process, // which avoids the "Access is denied." error. func removeCaddyBinary(path string) error { var sI syscall.StartupInfo var pI syscall.ProcessInformation argv, err := syscall.UTF16PtrFromString(filepath.Join(os.Getenv("windir"), "system32", "cmd.exe") + " /C del " + path) if err != nil { return err } return syscall.CreateProcess(nil, argv, nil, nil, true, 0, nil, nil, &sI, &pI) } ================================================ FILE: cmd/storagefuncs.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddycmd import ( "archive/tar" "context" "encoding/json" "errors" "fmt" "io" "io/fs" "os" "github.com/caddyserver/certmagic" "github.com/caddyserver/caddy/v2" ) type storVal struct { StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"` } // determineStorage returns the top-level storage module from the given config. // It may return nil even if no error. func determineStorage(configFile string, configAdapter string) (*storVal, error) { cfg, _, _, err := LoadConfig(configFile, configAdapter) if err != nil { return nil, err } // storage defaults to FileStorage if not explicitly // defined in the config, so the config can be valid // json but unmarshaling will fail. if !json.Valid(cfg) { return nil, &json.SyntaxError{} } var tmpStruct storVal err = json.Unmarshal(cfg, &tmpStruct) if err != nil { // default case, ignore the error var jsonError *json.SyntaxError if errors.As(err, &jsonError) { return nil, nil } return nil, err } return &tmpStruct, nil } func cmdImportStorage(fl Flags) (int, error) { importStorageCmdConfigFlag := fl.String("config") importStorageCmdImportFile := fl.String("input") if importStorageCmdConfigFlag == "" { return caddy.ExitCodeFailedStartup, errors.New("--config is required") } if importStorageCmdImportFile == "" { return caddy.ExitCodeFailedStartup, errors.New("--input is required") } // extract storage from config if possible storageCfg, err := determineStorage(importStorageCmdConfigFlag, "") if err != nil { return caddy.ExitCodeFailedStartup, err } // load specified storage or fallback to default var stor certmagic.Storage ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() if storageCfg != nil && storageCfg.StorageRaw != nil { val, err := ctx.LoadModule(storageCfg, "StorageRaw") if err != nil { return caddy.ExitCodeFailedStartup, err } stor, err = val.(caddy.StorageConverter).CertMagicStorage() if err != nil { return caddy.ExitCodeFailedStartup, err } } else { stor = caddy.DefaultStorage } // setup input var f *os.File if importStorageCmdImportFile == "-" { f = os.Stdin } else { f, err = os.Open(importStorageCmdImportFile) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("opening input file: %v", err) } defer f.Close() } // store each archive element tr := tar.NewReader(f) for { hdr, err := tr.Next() if err == io.EOF { break } if err != nil { return caddy.ExitCodeFailedQuit, fmt.Errorf("reading archive: %v", err) } b, err := io.ReadAll(tr) if err != nil { return caddy.ExitCodeFailedQuit, fmt.Errorf("reading archive: %v", err) } err = stor.Store(ctx, hdr.Name, b) if err != nil { return caddy.ExitCodeFailedQuit, fmt.Errorf("reading archive: %v", err) } } fmt.Println("Successfully imported storage") return caddy.ExitCodeSuccess, nil } func cmdExportStorage(fl Flags) (int, error) { exportStorageCmdConfigFlag := fl.String("config") exportStorageCmdOutputFlag := fl.String("output") if exportStorageCmdConfigFlag == "" { return caddy.ExitCodeFailedStartup, errors.New("--config is required") } if exportStorageCmdOutputFlag == "" { return caddy.ExitCodeFailedStartup, errors.New("--output is required") } // extract storage from config if possible storageCfg, err := determineStorage(exportStorageCmdConfigFlag, "") if err != nil { return caddy.ExitCodeFailedStartup, err } // load specified storage or fallback to default var stor certmagic.Storage ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() if storageCfg != nil && storageCfg.StorageRaw != nil { val, err := ctx.LoadModule(storageCfg, "StorageRaw") if err != nil { return caddy.ExitCodeFailedStartup, err } stor, err = val.(caddy.StorageConverter).CertMagicStorage() if err != nil { return caddy.ExitCodeFailedStartup, err } } else { stor = caddy.DefaultStorage } // enumerate all keys keys, err := stor.List(ctx, "", true) if err != nil { return caddy.ExitCodeFailedStartup, err } // setup output var f *os.File if exportStorageCmdOutputFlag == "-" { f = os.Stdout } else { f, err = os.Create(exportStorageCmdOutputFlag) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("opening output file: %v", err) } defer f.Close() } // `IsTerminal: true` keys hold the values we // care about, write them out tw := tar.NewWriter(f) for _, k := range keys { info, err := stor.Stat(ctx, k) if err != nil { if errors.Is(err, fs.ErrNotExist) { caddy.Log().Warn(fmt.Sprintf("key: %s removed while export is in-progress", k)) continue } return caddy.ExitCodeFailedQuit, err } if info.IsTerminal { v, err := stor.Load(ctx, k) if err != nil { if errors.Is(err, fs.ErrNotExist) { caddy.Log().Warn(fmt.Sprintf("key: %s removed while export is in-progress", k)) continue } return caddy.ExitCodeFailedQuit, err } hdr := &tar.Header{ Name: k, Mode: 0o600, Size: int64(len(v)), ModTime: info.Modified, } if err = tw.WriteHeader(hdr); err != nil { return caddy.ExitCodeFailedQuit, fmt.Errorf("writing archive: %v", err) } if _, err = tw.Write(v); err != nil { return caddy.ExitCodeFailedQuit, fmt.Errorf("writing archive: %v", err) } } } if err = tw.Close(); err != nil { return caddy.ExitCodeFailedQuit, fmt.Errorf("writing archive: %v", err) } return caddy.ExitCodeSuccess, nil } ================================================ FILE: cmd/x509rootsfallback.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddycmd import ( // For running in minimal environments, this can ease // headaches related to establishing TLS connections. // "Package fallback embeds a set of fallback X.509 trusted // roots in the application by automatically invoking // x509.SetFallbackRoots. This allows the application to // work correctly even if the operating system does not // provide a verifier or system roots pool. ... It's // recommended that only binaries, and not libraries, // import this package. This package must be kept up to // date for security and compatibility reasons." // // This is in its own file only because of conflicts // between gci and goimports when in main.go. // See https://github.com/daixiang0/gci/issues/76 _ "golang.org/x/crypto/x509roots/fallback" ) ================================================ FILE: context.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "context" "encoding/json" "fmt" "log" "log/slog" "reflect" "sync" "github.com/caddyserver/certmagic" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" "go.uber.org/zap" "go.uber.org/zap/exp/zapslog" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2/internal/filesystems" ) // Context is a type which defines the lifetime of modules that // are loaded and provides access to the parent configuration // that spawned the modules which are loaded. It should be used // with care and wrapped with derivation functions from the // standard context package only if you don't need the Caddy // specific features. These contexts are canceled when the // lifetime of the modules loaded from it is over. // // Use NewContext() to get a valid value (but most modules will // not actually need to do this). type Context struct { context.Context moduleInstances map[string][]Module cfg *Config ancestry []Module cleanupFuncs []func() // invoked at every config unload exitFuncs []func(context.Context) // invoked at config unload ONLY IF the process is exiting (EXPERIMENTAL) metricsRegistry *prometheus.Registry } // NewContext provides a new context derived from the given // context ctx. Normally, you will not need to call this // function unless you are loading modules which have a // different lifespan than the ones for the context the // module was provisioned with. Be sure to call the cancel // func when the context is to be cleaned up so that // modules which are loaded will be properly unloaded. // See standard library context package's documentation. func NewContext(ctx Context) (Context, context.CancelFunc) { newCtx, cancelCause := NewContextWithCause(ctx) return newCtx, func() { cancelCause(nil) } } // NewContextWithCause is like NewContext but returns a context.CancelCauseFunc. // EXPERIMENTAL: This API is subject to change. func NewContextWithCause(ctx Context) (Context, context.CancelCauseFunc) { newCtx := Context{moduleInstances: make(map[string][]Module), cfg: ctx.cfg, metricsRegistry: prometheus.NewPedanticRegistry()} c, cancel := context.WithCancelCause(ctx.Context) wrappedCancel := func(cause error) { cancel(cause) for _, f := range ctx.cleanupFuncs { f() } for modName, modInstances := range newCtx.moduleInstances { for _, inst := range modInstances { if cu, ok := inst.(CleanerUpper); ok { err := cu.Cleanup() if err != nil { log.Printf("[ERROR] %s (%p): cleanup: %v", modName, inst, err) } } } } } newCtx.Context = c newCtx.initMetrics() return newCtx, wrappedCancel } // OnCancel executes f when ctx is canceled. func (ctx *Context) OnCancel(f func()) { ctx.cleanupFuncs = append(ctx.cleanupFuncs, f) } // FileSystems returns a ref to the FilesystemMap. // EXPERIMENTAL: This API is subject to change. func (ctx *Context) FileSystems() FileSystems { // if no config is loaded, we use a default filesystemmap, which includes the osfs if ctx.cfg == nil { return &filesystems.FileSystemMap{} } return ctx.cfg.fileSystems } // Returns the active metrics registry for the context // EXPERIMENTAL: This API is subject to change. func (ctx *Context) GetMetricsRegistry() *prometheus.Registry { return ctx.metricsRegistry } func (ctx *Context) initMetrics() { ctx.metricsRegistry.MustRegister( collectors.NewBuildInfoCollector(), collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), collectors.NewGoCollector(), adminMetrics.requestCount, adminMetrics.requestErrors, globalMetrics.configSuccess, globalMetrics.configSuccessTime, ) } // OnExit executes f when the process exits gracefully. // The function is only executed if the process is gracefully // shut down while this context is active. // // EXPERIMENTAL API: subject to change or removal. func (ctx *Context) OnExit(f func(context.Context)) { ctx.exitFuncs = append(ctx.exitFuncs, f) } // LoadModule loads the Caddy module(s) from the specified field of the parent struct // pointer and returns the loaded module(s). The struct pointer and its field name as // a string are necessary so that reflection can be used to read the struct tag on the // field to get the module namespace and inline module name key (if specified). // // The field can be any one of the supported raw module types: json.RawMessage, // []json.RawMessage, map[string]json.RawMessage, or []map[string]json.RawMessage. // ModuleMap may be used in place of map[string]json.RawMessage. The return value's // underlying type mirrors the input field's type: // // json.RawMessage => any // []json.RawMessage => []any // [][]json.RawMessage => [][]any // map[string]json.RawMessage => map[string]any // []map[string]json.RawMessage => []map[string]any // // The field must have a "caddy" struct tag in this format: // // caddy:"key1=val1 key2=val2" // // To load modules, a "namespace" key is required. For example, to load modules // in the "http.handlers" namespace, you'd put: `namespace=http.handlers` in the // Caddy struct tag. // // The module name must also be available. If the field type is a map or slice of maps, // then key is assumed to be the module name if an "inline_key" is NOT specified in the // caddy struct tag. In this case, the module name does NOT need to be specified in-line // with the module itself. // // If not a map, or if inline_key is non-empty, then the module name must be embedded // into the values, which must be objects; then there must be a key in those objects // where its associated value is the module name. This is called the "inline key", // meaning the key containing the module's name that is defined inline with the module // itself. You must specify the inline key in a struct tag, along with the namespace: // // caddy:"namespace=http.handlers inline_key=handler" // // This will look for a key/value pair like `"handler": "..."` in the json.RawMessage // in order to know the module name. // // To make use of the loaded module(s) (the return value), you will probably want // to type-assert each 'any' value(s) to the types that are useful to you // and store them on the same struct. Storing them on the same struct makes for // easy garbage collection when your host module is no longer needed. // // Loaded modules have already been provisioned and validated. Upon returning // successfully, this method clears the json.RawMessage(s) in the field since // the raw JSON is no longer needed, and this allows the GC to free up memory. func (ctx Context) LoadModule(structPointer any, fieldName string) (any, error) { val := reflect.ValueOf(structPointer).Elem().FieldByName(fieldName) typ := val.Type() field, ok := reflect.TypeOf(structPointer).Elem().FieldByName(fieldName) if !ok { panic(fmt.Sprintf("field %s does not exist in %#v", fieldName, structPointer)) } opts, err := ParseStructTag(field.Tag.Get("caddy")) if err != nil { panic(fmt.Sprintf("malformed tag on field %s: %v", fieldName, err)) } moduleNamespace, ok := opts["namespace"] if !ok { panic(fmt.Sprintf("missing 'namespace' key in struct tag on field %s", fieldName)) } inlineModuleKey := opts["inline_key"] var result any switch val.Kind() { case reflect.Slice: if isJSONRawMessage(typ) { // val is `json.RawMessage` ([]uint8 under the hood) if inlineModuleKey == "" { panic("unable to determine module name without inline_key when type is not a ModuleMap") } val, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, val.Interface().(json.RawMessage)) if err != nil { return nil, err } result = val } else if isJSONRawMessage(typ.Elem()) { // val is `[]json.RawMessage` if inlineModuleKey == "" { panic("unable to determine module name without inline_key because type is not a ModuleMap") } var all []any for i := 0; i < val.Len(); i++ { val, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, val.Index(i).Interface().(json.RawMessage)) if err != nil { return nil, fmt.Errorf("position %d: %v", i, err) } all = append(all, val) } result = all } else if typ.Elem().Kind() == reflect.Slice && isJSONRawMessage(typ.Elem().Elem()) { // val is `[][]json.RawMessage` if inlineModuleKey == "" { panic("unable to determine module name without inline_key because type is not a ModuleMap") } var all [][]any for i := 0; i < val.Len(); i++ { innerVal := val.Index(i) var allInner []any for j := 0; j < innerVal.Len(); j++ { innerInnerVal, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, innerVal.Index(j).Interface().(json.RawMessage)) if err != nil { return nil, fmt.Errorf("position %d: %v", j, err) } allInner = append(allInner, innerInnerVal) } all = append(all, allInner) } result = all } else if isModuleMapType(typ.Elem()) { // val is `[]map[string]json.RawMessage` var all []map[string]any for i := 0; i < val.Len(); i++ { thisSet, err := ctx.loadModulesFromSomeMap(moduleNamespace, inlineModuleKey, val.Index(i)) if err != nil { return nil, err } all = append(all, thisSet) } result = all } case reflect.Map: // val is a ModuleMap or some other kind of map result, err = ctx.loadModulesFromSomeMap(moduleNamespace, inlineModuleKey, val) if err != nil { return nil, err } default: return nil, fmt.Errorf("unrecognized type for module: %s", typ) } // we're done with the raw bytes; allow GC to deallocate val.Set(reflect.Zero(typ)) return result, nil } // emitEvent is a small convenience method so the caddy core can emit events, if the event app is configured. func (ctx Context) emitEvent(name string, data map[string]any) Event { if ctx.cfg == nil || ctx.cfg.eventEmitter == nil { return Event{} } return ctx.cfg.eventEmitter.Emit(ctx, name, data) } // loadModulesFromSomeMap loads modules from val, which must be a type of map[string]any. // Depending on inlineModuleKey, it will be interpreted as either a ModuleMap (key is the module // name) or as a regular map (key is not the module name, and module name is defined inline). func (ctx Context) loadModulesFromSomeMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]any, error) { // if no inline_key is specified, then val must be a ModuleMap, // where the key is the module name if inlineModuleKey == "" { if !isModuleMapType(val.Type()) { panic(fmt.Sprintf("expected ModuleMap because inline_key is empty; but we do not recognize this type: %s", val.Type())) } return ctx.loadModuleMap(namespace, val) } // otherwise, val is a map with modules, but the module name is // inline with each value (the key means something else) return ctx.loadModulesFromRegularMap(namespace, inlineModuleKey, val) } // loadModulesFromRegularMap loads modules from val, where val is a map[string]json.RawMessage. // Map keys are NOT interpreted as module names, so module names are still expected to appear // inline with the objects. func (ctx Context) loadModulesFromRegularMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]any, error) { mods := make(map[string]any) iter := val.MapRange() for iter.Next() { k := iter.Key() v := iter.Value() mod, err := ctx.loadModuleInline(inlineModuleKey, namespace, v.Interface().(json.RawMessage)) if err != nil { return nil, fmt.Errorf("key %s: %v", k, err) } mods[k.String()] = mod } return mods, nil } // loadModuleMap loads modules from a ModuleMap, i.e. map[string]any, where the key is the // module name. With a module map, module names do not need to be defined inline with their values. func (ctx Context) loadModuleMap(namespace string, val reflect.Value) (map[string]any, error) { all := make(map[string]any) iter := val.MapRange() for iter.Next() { k := iter.Key().Interface().(string) v := iter.Value().Interface().(json.RawMessage) moduleName := namespace + "." + k if namespace == "" { moduleName = k } val, err := ctx.LoadModuleByID(moduleName, v) if err != nil { return nil, fmt.Errorf("module name '%s': %v", k, err) } all[k] = val } return all, nil } // LoadModuleByID decodes rawMsg into a new instance of mod and // returns the value. If mod.New is nil, an error is returned. // If the module implements Validator or Provisioner interfaces, // those methods are invoked to ensure the module is fully // configured and valid before being used. // // This is a lower-level method and will usually not be called // directly by most modules. However, this method is useful when // dynamically loading/unloading modules in their own context, // like from embedded scripts, etc. func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (any, error) { modulesMu.RLock() modInfo, ok := modules[id] modulesMu.RUnlock() if !ok { return nil, fmt.Errorf("unknown module: %s", id) } if modInfo.New == nil { return nil, fmt.Errorf("module '%s' has no constructor", modInfo.ID) } val := modInfo.New() // value must be a pointer for unmarshaling into concrete type, even if // the module's concrete type is a slice or map; New() *should* return // a pointer, otherwise unmarshaling errors or panics will occur if rv := reflect.ValueOf(val); rv.Kind() != reflect.Ptr { log.Printf("[WARNING] ModuleInfo.New() for module '%s' did not return a pointer,"+ " so we are using reflection to make a pointer instead; please fix this by"+ " using new(Type) or &Type notation in your module's New() function.", id) val = reflect.New(rv.Type()).Elem().Addr().Interface().(Module) } // fill in its config only if there is a config to fill in if len(rawMsg) > 0 { err := StrictUnmarshalJSON(rawMsg, &val) if err != nil { return nil, fmt.Errorf("decoding module config: %s: %v", modInfo, err) } } if val == nil { // returned module values are almost always type-asserted // before being used, so a nil value would panic; and there // is no good reason to explicitly declare null modules in // a config; it might be because the user is trying to achieve // a result the developer isn't expecting, which is a smell return nil, fmt.Errorf("module value cannot be null") } var err error // if this is an app module, keep a reference to it, // since submodules may need to reference it during // provisioning (even though the parent app module // may not be fully provisioned yet; this is the case // with the tls app's automation policies, which may // refer to the tls app to check if a global DNS // module has been configured for DNS challenges) if appModule, ok := val.(App); ok { ctx.cfg.apps[id] = appModule defer func() { if err != nil { ctx.cfg.failedApps[id] = err } }() } ctx.ancestry = append(ctx.ancestry, val) if prov, ok := val.(Provisioner); ok { err = prov.Provision(ctx) if err != nil { // incomplete provisioning could have left state // dangling, so make sure it gets cleaned up if cleanerUpper, ok := val.(CleanerUpper); ok { err2 := cleanerUpper.Cleanup() if err2 != nil { err = fmt.Errorf("%v; additionally, cleanup: %v", err, err2) } } return nil, fmt.Errorf("provision %s: %v", modInfo, err) } } if validator, ok := val.(Validator); ok { err = validator.Validate() if err != nil { // since the module was already provisioned, make sure we clean up if cleanerUpper, ok := val.(CleanerUpper); ok { err2 := cleanerUpper.Cleanup() if err2 != nil { err = fmt.Errorf("%v; additionally, cleanup: %v", err, err2) } } return nil, fmt.Errorf("%s: invalid configuration: %v", modInfo, err) } } ctx.moduleInstances[id] = append(ctx.moduleInstances[id], val) // if the loaded module happens to be an app that can emit events, store it so the // core can have access to emit events without an import cycle if ee, ok := val.(eventEmitter); ok { if _, ok := ee.(App); ok { ctx.cfg.eventEmitter = ee } } return val, nil } // loadModuleInline loads a module from a JSON raw message which decodes to // a map[string]any, where one of the object keys is moduleNameKey // and the corresponding value is the module name (as a string) which can // be found in the given scope. In other words, the module name is declared // in-line with the module itself. // // This allows modules to be decoded into their concrete types and used when // their names cannot be the unique key in a map, such as when there are // multiple instances in the map or it appears in an array (where there are // no custom keys). In other words, the key containing the module name is // treated special/separate from all the other keys in the object. func (ctx Context) loadModuleInline(moduleNameKey, moduleScope string, raw json.RawMessage) (any, error) { moduleName, raw, err := getModuleNameInline(moduleNameKey, raw) if err != nil { return nil, err } val, err := ctx.LoadModuleByID(moduleScope+"."+moduleName, raw) if err != nil { return nil, fmt.Errorf("loading module '%s': %v", moduleName, err) } return val, nil } // App returns the configured app named name. If that app has // not yet been loaded and provisioned, it will be immediately // loaded and provisioned. If no app with that name is // configured, a new empty one will be instantiated instead. // (The app module must still be registered.) This must not be // called during the Provision/Validate phase to reference a // module's own host app (since the parent app module is still // in the process of being provisioned, it is not yet ready). // // We return any type instead of the App type because it is NOT // intended for the caller of this method to be the one to start // or stop App modules. The caller is expected to assert to the // concrete type. func (ctx Context) App(name string) (any, error) { // if the app failed to load before, return the cached error if err, ok := ctx.cfg.failedApps[name]; ok { return nil, fmt.Errorf("loading %s app module: %v", name, err) } if app, ok := ctx.cfg.apps[name]; ok { return app, nil } appRaw := ctx.cfg.AppsRaw[name] modVal, err := ctx.LoadModuleByID(name, appRaw) if err != nil { return nil, fmt.Errorf("loading %s app module: %v", name, err) } if appRaw != nil { ctx.cfg.AppsRaw[name] = nil // allow GC to deallocate } return modVal, nil } // AppIfConfigured is like App, but it returns an error if the // app has not been configured. This is useful when the app is // required and its absence is a configuration error; or when // the app is optional and you don't want to instantiate a // new one that hasn't been explicitly configured. If the app // is not in the configuration, the error wraps ErrNotConfigured. func (ctx Context) AppIfConfigured(name string) (any, error) { if ctx.cfg == nil { return nil, fmt.Errorf("app module %s: %w", name, ErrNotConfigured) } // if the app failed to load before, return the cached error if err, ok := ctx.cfg.failedApps[name]; ok { return nil, fmt.Errorf("loading %s app module: %v", name, err) } if app, ok := ctx.cfg.apps[name]; ok { return app, nil } appRaw := ctx.cfg.AppsRaw[name] if appRaw == nil { return nil, fmt.Errorf("app module %s: %w", name, ErrNotConfigured) } return ctx.App(name) } // ErrNotConfigured indicates a module is not configured. var ErrNotConfigured = fmt.Errorf("module not configured") // Storage returns the configured Caddy storage implementation. func (ctx Context) Storage() certmagic.Storage { return ctx.cfg.storage } // Logger returns a logger that is intended for use by the most // recent module associated with the context. Callers should not // pass in any arguments unless they want to associate with a // different module; it panics if more than 1 value is passed in. // // Originally, this method's signature was `Logger(mod Module)`, // requiring that an instance of a Caddy module be passed in. // However, that is no longer necessary, as the closest module // most recently associated with the context will be automatically // assumed. To prevent a sudden breaking change, this method's // signature has been changed to be variadic, but we may remove // the parameter altogether in the future. Callers should not // pass in any argument. If there is valid need to specify a // different module, please open an issue to discuss. // // PARTIALLY DEPRECATED: The Logger(module) form is deprecated and // may be removed in the future. Do not pass in any arguments. func (ctx Context) Logger(module ...Module) *zap.Logger { if len(module) > 1 { panic("more than 1 module passed in") } if ctx.cfg == nil { // often the case in tests; just use a dev logger l, err := zap.NewDevelopment() if err != nil { panic("config missing, unable to create dev logger: " + err.Error()) } return l } mod := ctx.Module() if len(module) > 0 { mod = module[0] } if mod == nil { return Log() } return ctx.cfg.Logging.Logger(mod) } type slogHandlerFactory func(handler slog.Handler, core zapcore.Core, moduleID string) slog.Handler var ( slogHandlerFactories []slogHandlerFactory slogHandlerFactoriesMu sync.RWMutex ) // RegisterSlogHandlerFactory allows modules to register custom log/slog.Handler, // for instance, to add contextual data to the logs. func RegisterSlogHandlerFactory(factory slogHandlerFactory) { slogHandlerFactoriesMu.Lock() slogHandlerFactories = append(slogHandlerFactories, factory) slogHandlerFactoriesMu.Unlock() } // Slogger returns a slog logger that is intended for use by // the most recent module associated with the context. func (ctx Context) Slogger() *slog.Logger { var ( handler slog.Handler core zapcore.Core moduleID string ) // the default enables traces at ERROR level, this disables // them by setting it to a level higher than any other level tracesOpt := zapslog.AddStacktraceAt(slog.Level(127)) if ctx.cfg == nil { // often the case in tests; just use a dev logger l, err := zap.NewDevelopment() if err != nil { panic("config missing, unable to create dev logger: " + err.Error()) } core = l.Core() handler = zapslog.NewHandler(core, tracesOpt) } else { mod := ctx.Module() if mod == nil { core = Log().Core() handler = zapslog.NewHandler(core, tracesOpt) } else { moduleID = string(mod.CaddyModule().ID) core = ctx.cfg.Logging.Logger(mod).Core() handler = zapslog.NewHandler(core, zapslog.WithName(moduleID), tracesOpt) } } slogHandlerFactoriesMu.RLock() for _, f := range slogHandlerFactories { handler = f(handler, core, moduleID) } slogHandlerFactoriesMu.RUnlock() return slog.New(handler) } // Modules returns the lineage of modules that this context provisioned, // with the most recent/current module being last in the list. func (ctx Context) Modules() []Module { mods := make([]Module, len(ctx.ancestry)) copy(mods, ctx.ancestry) return mods } // Module returns the current module, or the most recent one // provisioned by the context. func (ctx Context) Module() Module { if len(ctx.ancestry) == 0 { return nil } return ctx.ancestry[len(ctx.ancestry)-1] } // WithValue returns a new context with the given key-value pair. func (ctx *Context) WithValue(key, value any) Context { return Context{ Context: context.WithValue(ctx.Context, key, value), moduleInstances: ctx.moduleInstances, cfg: ctx.cfg, ancestry: ctx.ancestry, cleanupFuncs: ctx.cleanupFuncs, exitFuncs: ctx.exitFuncs, } } // eventEmitter is a small interface that inverts dependencies for // the caddyevents package, so the core can emit events without an // import cycle (i.e. the caddy package doesn't have to import // the caddyevents package, which imports the caddy package). type eventEmitter interface { Emit(ctx Context, eventName string, data map[string]any) Event } ================================================ FILE: context_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "encoding/json" "io" ) func ExampleContext_LoadModule() { // this whole first part is just setting up for the example; // note the struct tags - very important; we specify inline_key // because that is the only way to know the module name var ctx Context myStruct := &struct { // This godoc comment will appear in module documentation. GuestModuleRaw json.RawMessage `json:"guest_module,omitempty" caddy:"namespace=example inline_key=name"` // this is where the decoded module will be stored; in this // example, we pretend we need an io.Writer but it can be // any interface type that is useful to you guestModule io.Writer }{ GuestModuleRaw: json.RawMessage(`{"name":"module_name","foo":"bar"}`), } // if a guest module is provided, we can load it easily if myStruct.GuestModuleRaw != nil { mod, err := ctx.LoadModule(myStruct, "GuestModuleRaw") if err != nil { // you'd want to actually handle the error here // return fmt.Errorf("loading guest module: %v", err) } // mod contains the loaded and provisioned module, // it is now ready for us to use myStruct.guestModule = mod.(io.Writer) } // use myStruct.guestModule from now on } func ExampleContext_LoadModule_array() { // this whole first part is just setting up for the example; // note the struct tags - very important; we specify inline_key // because that is the only way to know the module name var ctx Context myStruct := &struct { // This godoc comment will appear in module documentation. GuestModulesRaw []json.RawMessage `json:"guest_modules,omitempty" caddy:"namespace=example inline_key=name"` // this is where the decoded module will be stored; in this // example, we pretend we need an io.Writer but it can be // any interface type that is useful to you guestModules []io.Writer }{ GuestModulesRaw: []json.RawMessage{ json.RawMessage(`{"name":"module1_name","foo":"bar1"}`), json.RawMessage(`{"name":"module2_name","foo":"bar2"}`), }, } // since our input is []json.RawMessage, the output will be []any mods, err := ctx.LoadModule(myStruct, "GuestModulesRaw") if err != nil { // you'd want to actually handle the error here // return fmt.Errorf("loading guest modules: %v", err) } for _, mod := range mods.([]any) { myStruct.guestModules = append(myStruct.guestModules, mod.(io.Writer)) } // use myStruct.guestModules from now on } func ExampleContext_LoadModule_map() { // this whole first part is just setting up for the example; // note the struct tags - very important; we don't specify // inline_key because the map key is the module name var ctx Context myStruct := &struct { // This godoc comment will appear in module documentation. GuestModulesRaw ModuleMap `json:"guest_modules,omitempty" caddy:"namespace=example"` // this is where the decoded module will be stored; in this // example, we pretend we need an io.Writer but it can be // any interface type that is useful to you guestModules map[string]io.Writer }{ GuestModulesRaw: ModuleMap{ "module1_name": json.RawMessage(`{"foo":"bar1"}`), "module2_name": json.RawMessage(`{"foo":"bar2"}`), }, } // since our input is map[string]json.RawMessage, the output will be map[string]any mods, err := ctx.LoadModule(myStruct, "GuestModulesRaw") if err != nil { // you'd want to actually handle the error here // return fmt.Errorf("loading guest modules: %v", err) } for modName, mod := range mods.(map[string]any) { myStruct.guestModules[modName] = mod.(io.Writer) } // use myStruct.guestModules from now on } ================================================ FILE: duration_fuzz.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build gofuzz package caddy func FuzzParseDuration(data []byte) int { _, err := ParseDuration(string(data)) if err != nil { return 0 } return 1 } ================================================ FILE: filepath.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !windows package caddy import ( "os" "path/filepath" ) // FastAbs is an optimized version of filepath.Abs for Unix systems, // since we don't expect the working directory to ever change once // Caddy is running. Avoid the os.Getwd() syscall overhead. // It's overall the same as stdlib's implementation, the difference // being cached working directory. func FastAbs(path string) (string, error) { if filepath.IsAbs(path) { return filepath.Clean(path), nil } if wderr != nil { return "", wderr } return filepath.Join(wd, path), nil } var wd, wderr = os.Getwd() ================================================ FILE: filepath_windows.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "path/filepath" ) // FastAbs can't be optimized on Windows because there // are special file paths that require the use of syscall.FullPath // to handle correctly. // Just call stdlib's implementation which uses that function. func FastAbs(path string) (string, error) { return filepath.Abs(path) } ================================================ FILE: filesystem.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import "io/fs" type FileSystems interface { Register(k string, v fs.FS) Unregister(k string) Get(k string) (v fs.FS, ok bool) Default() fs.FS } ================================================ FILE: go.mod ================================================ module github.com/caddyserver/caddy/v2 go 1.25.0 require ( github.com/BurntSushi/toml v1.6.0 github.com/DeRuina/timberjack v1.3.9 github.com/KimMachineGun/automemlimit v0.7.5 github.com/Masterminds/sprig/v3 v3.3.0 github.com/alecthomas/chroma/v2 v2.23.1 github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b github.com/caddyserver/certmagic v0.25.2 github.com/caddyserver/zerossl v0.1.5 github.com/cloudflare/circl v1.6.3 github.com/dustin/go-humanize v1.0.1 github.com/go-chi/chi/v5 v5.2.5 github.com/google/cel-go v0.27.0 github.com/google/uuid v1.6.0 github.com/klauspost/compress v1.18.4 github.com/klauspost/cpuid/v2 v2.3.0 github.com/mholt/acmez/v3 v3.1.6 github.com/prometheus/client_golang v1.23.2 github.com/quic-go/quic-go v0.59.0 github.com/smallstep/certificates v0.30.0-rc3 github.com/smallstep/nosql v0.7.0 github.com/smallstep/truststore v0.13.0 github.com/spf13/cobra v1.10.2 github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 github.com/tailscale/tscert v0.0.0-20251216020129-aea342f6d747 github.com/yuin/goldmark v1.7.16 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc go.opentelemetry.io/contrib/exporters/autoexport v0.65.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 go.opentelemetry.io/contrib/propagators/autoprop v0.65.0 go.opentelemetry.io/otel v1.40.0 go.opentelemetry.io/otel/sdk v1.40.0 go.step.sm/crypto v0.76.2 go.uber.org/automaxprocs v1.6.0 go.uber.org/zap v1.27.1 go.uber.org/zap/exp v0.3.0 golang.org/x/crypto v0.48.0 golang.org/x/crypto/x509roots/fallback v0.0.0-20260213171211-a408498e5541 golang.org/x/net v0.51.0 golang.org/x/sync v0.19.0 golang.org/x/term v0.40.0 golang.org/x/time v0.14.0 gopkg.in/yaml.v3 v3.0.1 ) require ( cel.dev/expr v0.25.1 // indirect cloud.google.com/go/auth v0.18.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect dario.cat/mergo v1.0.2 // indirect filippo.io/bigmod v0.1.0 // indirect github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/ccoveille/go-safecast/v2 v2.0.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/coreos/go-oidc/v3 v3.17.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-jose/go-jose/v3 v3.0.4 // indirect github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/google/certificate-transparency-go v1.1.8-0.20240110162603-74a5dd331745 // indirect github.com/google/go-tpm v0.9.8 // indirect github.com/google/go-tspi v0.3.0 // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect github.com/googleapis/gax-go/v2 v2.17.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect github.com/jackc/pgx/v5 v5.6.0 // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/otlptranslator v1.0.0 // indirect github.com/quic-go/qpack v0.6.0 // indirect github.com/smallstep/cli-utils v0.12.2 // indirect github.com/smallstep/go-attestation v0.4.4-0.20241119153605-2306d5b464ca // indirect github.com/smallstep/linkedca v0.25.0 // indirect github.com/smallstep/pkcs7 v0.2.1 // indirect github.com/smallstep/scep v0.0.0-20250318231241-a25cabb69492 // indirect github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/zeebo/blake3 v0.2.4 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.65.0 // indirect go.opentelemetry.io/contrib/propagators/aws v1.40.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.40.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.40.0 // indirect go.opentelemetry.io/contrib/propagators/ot v1.40.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.16.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.16.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.40.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.62.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.16.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 // indirect go.opentelemetry.io/otel/log v0.16.0 // indirect go.opentelemetry.io/otel/sdk/log v0.16.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect golang.org/x/oauth2 v0.35.0 // indirect google.golang.org/api v0.266.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 // indirect google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 // indirect ) require ( filippo.io/edwards25519 v1.2.0 // indirect github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 github.com/chzyer/readline v1.5.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/dgraph-io/badger v1.6.2 // indirect github.com/dgraph-io/badger/v2 v2.2007.4 // indirect github.com/dgraph-io/ristretto v0.2.0 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/dlclark/regexp2 v1.11.5 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-sql-driver/mysql v1.8.1 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/libdns/libdns v1.1.1 github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/miekg/dns v1.1.72 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-ps v1.0.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pires/go-proxyproto v0.11.0 github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_model v0.6.2 github.com/prometheus/common v0.67.5 // indirect github.com/prometheus/procfs v0.19.2 // indirect github.com/rs/xid v1.6.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect github.com/sirupsen/logrus v1.9.4 // indirect github.com/slackhq/nebula v1.10.3 // indirect github.com/spf13/cast v1.7.0 // indirect github.com/urfave/cli v1.22.17 // indirect go.etcd.io/bbolt v1.3.10 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect go.opentelemetry.io/otel/trace v1.40.0 go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.33.0 // indirect golang.org/x/sys v0.41.0 golang.org/x/text v0.34.0 golang.org/x/tools v0.42.0 // indirect google.golang.org/grpc v1.79.1 // indirect google.golang.org/protobuf v1.36.11 // indirect howett.net/plist v1.0.0 // indirect ) ================================================ FILE: go.sum ================================================ cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= cloud.google.com/go/auth v0.18.1 h1:IwTEx92GFUo2pJ6Qea0EU3zYvKnTAeRCODxfA/G5UWs= cloud.google.com/go/auth v0.18.1/go.mod h1:GfTYoS9G3CWpRA3Va9doKN9mjPGRS+v41jmZAhBzbrA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= cloud.google.com/go/kms v1.25.0 h1:gVqvGGUmz0nYCmtoxWmdc1wli2L1apgP8U4fghPGSbQ= cloud.google.com/go/kms v1.25.0/go.mod h1:XIdHkzfj0bUO3E+LvwPg+oc7s58/Ns8Nd8Sdtljihbk= cloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7EhfW8= cloud.google.com/go/longrunning v0.8.0/go.mod h1:UmErU2Onzi+fKDg2gR7dusz11Pe26aknR4kHmJJqIfk= code.pfad.fr/check v1.1.0 h1:GWvjdzhSEgHvEHe2uJujDcpmZoySKuHQNrZMfzfO0bE= code.pfad.fr/check v1.1.0/go.mod h1:NiUH13DtYsb7xp5wll0U4SXx7KhXQVCtRgdC96IPfoM= dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= filippo.io/bigmod v0.1.0 h1:UNzDk7y9ADKST+axd9skUpBQeW7fG2KrTZyOE4uGQy8= filippo.io/bigmod v0.1.0/go.mod h1:OjOXDNlClLblvXdwgFFOQFJEocLhhtai8vGLy0JCZlI= filippo.io/edwards25519 v1.2.0 h1:crnVqOiS4jqYleHd9vaKZ+HKtHfllngJIiOpNpoJsjo= filippo.io/edwards25519 v1.2.0/go.mod h1:xzAOLCNug/yB62zG1bQ8uziwrIqIuxhctzJT18Q77mc= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk= github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/DeRuina/timberjack v1.3.9 h1:6UXZ1I7ExPGTX/1UNYawR58LlOJUHKBPiYC7WQ91eBo= github.com/DeRuina/timberjack v1.3.9/go.mod h1:RLoeQrwrCGIEF8gO5nV5b/gMD0QIy7bzQhBUgpp1EqE= github.com/KimMachineGun/automemlimit v0.7.5 h1:RkbaC0MwhjL1ZuBKunGDjE/ggwAX43DwZrJqVwyveTk= github.com/KimMachineGun/automemlimit v0.7.5/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= github.com/alecthomas/chroma/v2 v2.2.0/go.mod h1:vf4zrexSH54oEjJ7EdB65tGNHmH3pGZmVkgTP5RHvAs= github.com/alecthomas/chroma/v2 v2.23.1 h1:nv2AVZdTyClGbVQkIzlDm/rnhk1E9bU9nXwmZ/Vk/iY= github.com/alecthomas/chroma/v2 v2.23.1/go.mod h1:NqVhfBR0lte5Ouh3DcthuUCTUpDC9cxBOfyMbMQPs3o= github.com/alecthomas/repr v0.0.0-20220113201626-b1b626ac65ae/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= github.com/alecthomas/repr v0.5.2 h1:SU73FTI9D1P5UNtvseffFSGmdNci/O6RsqzeXJtP0Qs= github.com/alecthomas/repr v0.5.2/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b h1:uUXgbcPDK3KpW29o4iy7GtuappbWT0l5NaMo9H9pJDw= github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU= github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY= github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY= github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8= github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU= github.com/aws/aws-sdk-go-v2/service/kms v1.49.5 h1:DKibav4XF66XSeaXcrn9GlWGHos6D/vJ4r7jsK7z5CE= github.com/aws/aws-sdk-go-v2/service/kms v1.49.5/go.mod h1:1SdcmEGUEQE1mrU2sIgeHtcMSxHuybhPvuEPANzIDfI= github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y= github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M= github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk= github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo= github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ= github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ= github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/caddyserver/certmagic v0.25.2 h1:D7xcS7ggX/WEY54x0czj7ioTkmDWKIgxtIi2OcQclUc= github.com/caddyserver/certmagic v0.25.2/go.mod h1:llW/CvsNmza8S6hmsuggsZeiX+uS27dkqY27wDIuBWg= github.com/caddyserver/zerossl v0.1.5 h1:dkvOjBAEEtY6LIGAHei7sw2UgqSD6TrWweXpV7lvEvE= github.com/caddyserver/zerossl v0.1.5/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4= github.com/ccoveille/go-safecast/v2 v2.0.0 h1:+5eyITXAUj3wMjad6cRVJKGnC7vDS55zk0INzJagub0= github.com/ccoveille/go-safecast/v2 v2.0.0/go.mod h1:JIYA4CAR33blIDuE6fSwCp2sz1oOBahXnvmdBhOAABs= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE= github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug= github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0= github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY= github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.27.0 h1:e7ih85+4qVrBuqQWTW4FKSqZYokVuc3HnhH5keboFTo= github.com/google/cel-go v0.27.0/go.mod h1:tTJ11FWqnhw5KKpnWpvW9CJC3Y9GK4EIS0WXnBbebzw= github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= github.com/google/certificate-transparency-go v1.1.8-0.20240110162603-74a5dd331745 h1:heyoXNxkRT155x4jTAiSv5BVSVkueifPUm+Q8LUXMRo= github.com/google/certificate-transparency-go v1.1.8-0.20240110162603-74a5dd331745/go.mod h1:zN0wUQgV9LjwLZeFHnrAbQi8hzMVvEWePyk+MhPOk7k= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-tpm v0.9.8 h1:slArAR9Ft+1ybZu0lBwpSmpwhRXaa85hWtMinMyRAWo= github.com/google/go-tpm v0.9.8/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= github.com/google/go-tpm-tools v0.4.7 h1:J3ycC8umYxM9A4eF73EofRZu4BxY0jjQnUnkhIBbvws= github.com/google/go-tpm-tools v0.4.7/go.mod h1:gSyXTZHe3fgbzb6WEGd90QucmsnT1SRdlye82gH8QjQ= github.com/google/go-tspi v0.3.0 h1:ADtq8RKfP+jrTyIWIZDIYcKOMecRqNJFOew2IT0Inus= github.com/google/go-tspi v0.3.0/go.mod h1:xfMGI3G0PhxCdNVcYr1C4C+EizojDg/TXuX5by8CiHI= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.11 h1:vAe81Msw+8tKUxi2Dqh/NZMz7475yUvmRIkXr4oN2ao= github.com/googleapis/enterprise-certificate-proxy v0.3.11/go.mod h1:RFV7MUdlb7AgEq2v7FmMCfeSMCllAzWxFgRdusoGks8= github.com/googleapis/gax-go/v2 v2.17.0 h1:RksgfBpxqff0EZkDWYuz9q/uWsTVz+kf43LsZ1J6SMc= github.com/googleapis/gax-go/v2 v2.17.0/go.mod h1:mzaqghpQp4JDh3HvADwrat+6M3MOIDp5YKHhb9PAgDY= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c= github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/letsencrypt/challtestsrv v1.4.2 h1:0ON3ldMhZyWlfVNYYpFuWRTmZNnyfiL9Hh5YzC3JVwU= github.com/letsencrypt/challtestsrv v1.4.2/go.mod h1:GhqMqcSoeGpYd5zX5TgwA6er/1MbWzx/o7yuuVya+Wk= github.com/letsencrypt/pebble/v2 v2.10.0 h1:Wq6gYXlsY6ubqI3hhxsTzdyotvfdjFBxuwYqCLCnj/U= github.com/letsencrypt/pebble/v2 v2.10.0/go.mod h1:Sk8cmUIPcIdv2nINo+9PB4L+ZBhzY+F9A1a/h/xmWiQ= github.com/libdns/libdns v1.1.1 h1:wPrHrXILoSHKWJKGd0EiAVmiJbFShguILTg9leS/P/U= github.com/libdns/libdns v1.1.1/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mholt/acmez/v3 v3.1.6 h1:eGVQNObP0pBN4sxqrXeg7MYqTOWyoiYpQqITVWlrevk= github.com/mholt/acmez/v3 v3.1.6/go.mod h1:5nTPosTGosLxF3+LU4ygbgMRFDhbAVpqMI4+a4aHLBY= github.com/miekg/dns v1.1.72 h1:vhmr+TF2A3tuoGNkLDFK9zi36F2LS+hKTRW0Uf8kbzI= github.com/miekg/dns v1.1.72/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv/v3 v3.0.1 h1:x06SQA46+PKIUftmEujdwSEpIx8kR+M9eLYsUxeYveU= github.com/peterbourgon/diskv/v3 v3.0.1/go.mod h1:kJ5Ny7vLdARGU3WUuy6uzO6T0nb/2gWcT1JiBvRmb5o= github.com/pires/go-proxyproto v0.11.0 h1:gUQpS85X/VJMdUsYyEgyn59uLJvGqPhJV5YvG68wXH4= github.com/pires/go-proxyproto v0.11.0/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos= github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM= github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII= github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw= github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/schollz/jsonstore v1.1.0 h1:WZBDjgezFS34CHI+myb4s8GGpir3UMpy7vWoCeO0n6E= github.com/schollz/jsonstore v1.1.0/go.mod h1:15c6+9guw8vDRyozGjN3FoILt0wpruJk9Pi66vjaZfg= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= github.com/slackhq/nebula v1.10.3 h1:EstYj8ODEcv6T0R9X5BVq1zgWZnyU5gtPzk99QF1PMU= github.com/slackhq/nebula v1.10.3/go.mod h1:IL5TUQm4x9IFx2kCKPYm1gP47pwd5b8QGnnBH2RHnvs= github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY= github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc= github.com/smallstep/certificates v0.30.0-rc3 h1:Lx/NNJ4n+L3Pyx5NtVRGXeqviPPXTFFGLRiC1fCwU50= github.com/smallstep/certificates v0.30.0-rc3/go.mod h1:e5/ylYYpvnjCVZz6RpyOkpTe73EGPYoL+8TZZ5EtLjI= github.com/smallstep/cli-utils v0.12.2 h1:lGzM9PJrH/qawbzMC/s2SvgLdJPKDWKwKzx9doCVO+k= github.com/smallstep/cli-utils v0.12.2/go.mod h1:uCPqefO29goHLGqFnwk0i8W7XJu18X3WHQFRtOm/00Y= github.com/smallstep/go-attestation v0.4.4-0.20241119153605-2306d5b464ca h1:VX8L0r8vybH0bPeaIxh4NQzafKQiqvlOn8pmOXbFLO4= github.com/smallstep/go-attestation v0.4.4-0.20241119153605-2306d5b464ca/go.mod h1:vNAduivU014fubg6ewygkAvQC0IQVXqdc8vaGl/0er4= github.com/smallstep/linkedca v0.25.0 h1:txT9QHGbCsJq0MhAghBq7qhurGY727tQuqUi+n4BVBo= github.com/smallstep/linkedca v0.25.0/go.mod h1:Q3jVAauFKNlF86W5/RFtgQeyDKz98GL/KN3KG4mJOvc= github.com/smallstep/nosql v0.7.0 h1:YiWC9ZAHcrLCrayfaF+QJUv16I2bZ7KdLC3RpJcnAnE= github.com/smallstep/nosql v0.7.0/go.mod h1:H5VnKMCbeq9QA6SRY5iqPylfxLfYcLwvUff3onQ8+HU= github.com/smallstep/pkcs7 v0.2.1 h1:6Kfzr/QizdIuB6LSv8y1LJdZ3aPSfTNhTLqAx9CTLfA= github.com/smallstep/pkcs7 v0.2.1/go.mod h1:RcXHsMfL+BzH8tRhmrF1NkkpebKpq3JEM66cOFxanf0= github.com/smallstep/scep v0.0.0-20250318231241-a25cabb69492 h1:k23+s51sgYix4Zgbvpmy+1ZgXLjr4ZTkBTqXmpnImwA= github.com/smallstep/scep v0.0.0-20250318231241-a25cabb69492/go.mod h1:QQhwLqCS13nhv8L5ov7NgusowENUtXdEzdytjmJHdZQ= github.com/smallstep/truststore v0.13.0 h1:90if9htAOblavbMeWlqNLnO9bsjjgVv2hQeQJCi/py4= github.com/smallstep/truststore v0.13.0/go.mod h1:3tmMp2aLKZ/OA/jnFUB0cYPcho402UG2knuJoPh4j7A= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8Jj4P4c1a3CtQyMaTVCznlkLZI++hok4= github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg= github.com/tailscale/tscert v0.0.0-20251216020129-aea342f6d747 h1:RnBbFMmodYzhC6adOjTbtUQXyzV8dcvKYbolzs6Qch0= github.com/tailscale/tscert v0.0.0-20251216020129-aea342f6d747/go.mod h1:ejPAJui3kVK4u5TgMtqtXlWf5HnKh9fLy5kvpaeuas0= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.22.17 h1:SYzXoiPfQjHBbkYxbew5prZHS1TOLT3ierW8SYLqtVQ= github.com/urfave/cli v1.22.17/go.mod h1:b0ht0aqgH/6pBYzzxURyrM4xXNgsoT/n2ZzwQiEhNVo= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.15/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.7.16 h1:n+CJdUxaFMiDUNnWC3dMWCIQJSkxH4uz3ZwQBkAlVNE= github.com/yuin/goldmark v1.7.16/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc h1:+IAOyRda+RLrxa1WC7umKOZRsGq4QrFFMYApOeHzQwQ= github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc/go.mod h1:ovIvrum6DQJA4QsJSovrkC4saKHQVs7TvcaeO8AIl5I= github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/bridges/prometheus v0.65.0 h1:I/7S/yWobR3QHFLqHsJ8QOndoiFsj1VgHpQiq43KlUI= go.opentelemetry.io/contrib/bridges/prometheus v0.65.0/go.mod h1:jPF6gn3y1E+nozCAEQj3c6NZ8KY+tvAgSVfvoOJUFac= go.opentelemetry.io/contrib/exporters/autoexport v0.65.0 h1:2gApdml7SznX9szEKFjKjM4qGcGSvAybYLBY319XG3g= go.opentelemetry.io/contrib/exporters/autoexport v0.65.0/go.mod h1:0QqAGlbHXhmPYACG3n5hNzO5DnEqqtg4VcK5pr22RI0= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= go.opentelemetry.io/contrib/propagators/autoprop v0.65.0 h1:kTaCycF9Xkm8VBBvH0rJ4wFeRjtIV55Erk3uuVsIs5s= go.opentelemetry.io/contrib/propagators/autoprop v0.65.0/go.mod h1:rooPzAbXfxMX9fsPJjmOBg2SN4RhFEV8D7cfGK+N3tE= go.opentelemetry.io/contrib/propagators/aws v1.40.0 h1:4VIrh75jW4RTimUNx1DSk+6H9/nDr1FvmKoOVDh3K04= go.opentelemetry.io/contrib/propagators/aws v1.40.0/go.mod h1:B0dCov9KNQGlut3T8wZZjDnLXEXdBroM7bFsHh/gRos= go.opentelemetry.io/contrib/propagators/b3 v1.40.0 h1:xariChe8OOVF3rNlfzGFgQc61npQmXhzZj/i82mxMfg= go.opentelemetry.io/contrib/propagators/b3 v1.40.0/go.mod h1:72WvbdxbOfXaELEQfonFfOL6osvcVjI7uJEE8C2nkrs= go.opentelemetry.io/contrib/propagators/jaeger v1.40.0 h1:aXl9uobjJs5vquMLt9ZkI/3zIuz8XQ3TqOKSWx0/xdU= go.opentelemetry.io/contrib/propagators/jaeger v1.40.0/go.mod h1:ioMePqe6k6c/ovXSkmkMr1mbN5qRBGJxNTVop7/2XO0= go.opentelemetry.io/contrib/propagators/ot v1.40.0 h1:Lon8J5SPmWaL1Ko2TIlCNHJ42/J1b5XbJlgJaE/9m7I= go.opentelemetry.io/contrib/propagators/ot v1.40.0/go.mod h1:dKWtJTlp1Yj+8Cneye5idO46eRPIbi23qVuJYKjNnvY= go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.16.0 h1:ZVg+kCXxd9LtAaQNKBxAvJ5NpMf7LpvEr4MIZqb0TMQ= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.16.0/go.mod h1:hh0tMeZ75CCXrHd9OXRYxTlCAdxcXioWHFIpYw2rZu8= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.16.0 h1:djrxvDxAe44mJUrKataUbOhCKhR3F8QCyWucO16hTQs= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.16.0/go.mod h1:dt3nxpQEiSoKvfTVxp3TUg5fHPLhKtbcnN3Z1I1ePD0= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 h1:NOyNnS19BF2SUDApbOKbDtWZ0IK7b8FJ2uAGdIWOGb0= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0/go.mod h1:VL6EgVikRLcJa9ftukrHu/ZkkhFBSo1lzvdBC9CF1ss= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.40.0 h1:9y5sHvAxWzft1WQ4BwqcvA+IFVUJ1Ya75mSAUnFEVwE= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.40.0/go.mod h1:eQqT90eR3X5Dbs1g9YSM30RavwLF725Ris5/XSXWvqE= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40= go.opentelemetry.io/otel/exporters/prometheus v0.62.0 h1:krvC4JMfIOVdEuNPTtQ0ZjCiXrybhv+uOHMfHRmnvVo= go.opentelemetry.io/otel/exporters/prometheus v0.62.0/go.mod h1:fgOE6FM/swEnsVQCqCnbOfRV4tOnWPg7bVeo4izBuhQ= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.16.0 h1:ivlbaajBWJqhcCPniDqDJmRwj4lc6sRT+dCAVKNmxlQ= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.16.0/go.mod h1:u/G56dEKDDwXNCVLsbSrllB2o8pbtFLUC4HpR66r2dc= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0 h1:ZrPRak/kS4xI3AVXy8F7pipuDXmDsrO8Lg+yQjBLjw0= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0/go.mod h1:3y6kQCWztq6hyW8Z9YxQDDm0Je9AJoFar2G0yDcmhRk= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 h1:MzfofMZN8ulNqobCmCAVbqVL5syHw+eB2qPRkCMA/fQ= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0/go.mod h1:E73G9UFtKRXrxhBsHtG00TB5WxX57lpsQzogDkqBTz8= go.opentelemetry.io/otel/log v0.16.0 h1:DeuBPqCi6pQwtCK0pO4fvMB5eBq6sNxEnuTs88pjsN4= go.opentelemetry.io/otel/log v0.16.0/go.mod h1:rWsmqNVTLIA8UnwYVOItjyEZDbKIkMxdQunsIhpUMes= go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= go.opentelemetry.io/otel/sdk/log v0.16.0 h1:e/b4bdlQwC5fnGtG3dlXUrNOnP7c8YLVSpSfEBIkTnI= go.opentelemetry.io/otel/sdk/log v0.16.0/go.mod h1:JKfP3T6ycy7QEuv3Hj8oKDy7KItrEkus8XJE6EoSzw4= go.opentelemetry.io/otel/sdk/log/logtest v0.16.0 h1:/XVkpZ41rVRTP4DfMgYv1nEtNmf65XPPyAdqV90TMy4= go.opentelemetry.io/otel/sdk/log/logtest v0.16.0/go.mod h1:iOOPgQr5MY9oac/F5W86mXdeyWZGleIx3uXO98X2R6Y= go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= go.step.sm/crypto v0.76.2 h1:JJ/yMcs/rmcCAwlo+afrHjq74XBFRTJw5B2y4Q4Z4c4= go.step.sm/crypto v0.76.2/go.mod h1:m6KlB/HzIuGFep0UWI5e0SYi38UxpoKeCg6qUaHV6/Q= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U= go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/crypto/x509roots/fallback v0.0.0-20260213171211-a408498e5541 h1:FmKxj9ocLKn45jiR2jQMwCVhDvaK7fKQFzfuT9GvyK8= golang.org/x/crypto/x509roots/fallback v0.0.0-20260213171211-a408498e5541/go.mod h1:+UoQFNBq2p2wO+Q6ddVtYc25GZ6VNdOMyyrd4nrqrKs= golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo= golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y= golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ= golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.266.0 h1:hco+oNCf9y7DmLeAtHJi/uBAY7n/7XC9mZPxu1ROiyk= google.golang.org/api v0.266.0/go.mod h1:Jzc0+ZfLnyvXma3UtaTl023TdhZu6OMBP9tJ+0EmFD0= google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 h1:VQZ/yAbAtjkHgH80teYd2em3xtIkkHd7ZhqfH2N9CsM= google.golang.org/genproto v0.0.0-20260128011058-8636f8732409/go.mod h1:rxKD3IEILWEu3P44seeNOAwZN4SaoKaQ/2eTg4mM6EM= google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M= google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I= google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 h1:Jr5R2J6F6qWyzINc+4AM8t5pfUz6beZpHp678GNrMbE= google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 h1:F29+wU6Ee6qgu9TddPgooOdaqsxTMunOoj8KA5yuS5A= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= ================================================ FILE: internal/filesystems/map.go ================================================ package filesystems import ( "io/fs" "strings" "sync" ) const ( DefaultFileSystemKey = "default" ) var DefaultFileSystem = &wrapperFs{key: DefaultFileSystemKey, FS: OsFS{}} // wrapperFs exists so can easily add to wrapperFs down the line type wrapperFs struct { key string fs.FS } // FileSystemMap stores a map of filesystems // the empty key will be overwritten to be the default key // it includes a default filesystem, based off the os fs type FileSystemMap struct { m sync.Map } // note that the first invocation of key cannot be called in a racy context. func (f *FileSystemMap) key(k string) string { if k == "" { k = DefaultFileSystemKey } return k } // Register will add the filesystem with key to later be retrieved // A call with a nil fs will call unregister, ensuring that a call to Default() will never be nil func (f *FileSystemMap) Register(k string, v fs.FS) { k = f.key(k) if v == nil { f.Unregister(k) return } f.m.Store(k, &wrapperFs{key: k, FS: v}) } // Unregister will remove the filesystem with key from the filesystem map // if the key is the default key, it will set the default to the osFS instead of deleting it // modules should call this on cleanup to be safe func (f *FileSystemMap) Unregister(k string) { k = f.key(k) if k == DefaultFileSystemKey { f.m.Store(k, DefaultFileSystem) } else { f.m.Delete(k) } } // Get will get a filesystem with a given key func (f *FileSystemMap) Get(k string) (v fs.FS, ok bool) { k = f.key(k) c, ok := f.m.Load(strings.TrimSpace(k)) if !ok { if k == DefaultFileSystemKey { f.m.Store(k, DefaultFileSystem) return DefaultFileSystem, true } return nil, ok } return c.(fs.FS), true } // Default will get the default filesystem in the filesystem map func (f *FileSystemMap) Default() fs.FS { val, _ := f.Get(DefaultFileSystemKey) return val } ================================================ FILE: internal/filesystems/os.go ================================================ package filesystems import ( "io/fs" "os" "path/filepath" ) // OsFS is a simple fs.FS implementation that uses the local // file system. (We do not use os.DirFS because we do our own // rooting or path prefixing without being constrained to a single // root folder. The standard os.DirFS implementation is problematic // since roots can be dynamic in our application.) // // OsFS also implements fs.StatFS, fs.GlobFS, fs.ReadDirFS, and fs.ReadFileFS. type OsFS struct{} func (OsFS) Open(name string) (fs.File, error) { return os.Open(name) } func (OsFS) Stat(name string) (fs.FileInfo, error) { return os.Stat(name) } func (OsFS) Glob(pattern string) ([]string, error) { return filepath.Glob(pattern) } func (OsFS) ReadDir(name string) ([]fs.DirEntry, error) { return os.ReadDir(name) } func (OsFS) ReadFile(name string) ([]byte, error) { return os.ReadFile(name) } var ( _ fs.StatFS = (*OsFS)(nil) _ fs.GlobFS = (*OsFS)(nil) _ fs.ReadDirFS = (*OsFS)(nil) _ fs.ReadFileFS = (*OsFS)(nil) ) ================================================ FILE: internal/logbuffer.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "sync" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) // LogBufferCore is a zapcore.Core that buffers log entries in memory. type LogBufferCore struct { mu sync.Mutex entries []zapcore.Entry fields [][]zapcore.Field level zapcore.LevelEnabler } type LogBufferCoreInterface interface { zapcore.Core FlushTo(*zap.Logger) } func NewLogBufferCore(level zapcore.LevelEnabler) *LogBufferCore { return &LogBufferCore{ level: level, } } func (c *LogBufferCore) Enabled(lvl zapcore.Level) bool { return c.level.Enabled(lvl) } func (c *LogBufferCore) With(fields []zapcore.Field) zapcore.Core { return c } func (c *LogBufferCore) Check(entry zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { if c.Enabled(entry.Level) { return ce.AddCore(entry, c) } return ce } func (c *LogBufferCore) Write(entry zapcore.Entry, fields []zapcore.Field) error { c.mu.Lock() defer c.mu.Unlock() c.entries = append(c.entries, entry) c.fields = append(c.fields, fields) return nil } func (c *LogBufferCore) Sync() error { return nil } // FlushTo flushes buffered logs to the given zap.Logger. func (c *LogBufferCore) FlushTo(logger *zap.Logger) { c.mu.Lock() defer c.mu.Unlock() for idx, entry := range c.entries { logger.WithOptions().Check(entry.Level, entry.Message).Write(c.fields[idx]...) } c.entries = nil c.fields = nil } var ( _ zapcore.Core = (*LogBufferCore)(nil) _ LogBufferCoreInterface = (*LogBufferCore)(nil) ) ================================================ FILE: internal/logs.go ================================================ package internal import "fmt" // MaxSizeSubjectsListForLog returns the keys in the map as a slice of maximum length // maxToDisplay. It is useful for logging domains being managed, for example, since a // map is typically needed for quick lookup, but a slice is needed for logging, and this // can be quite a doozy since there may be a huge amount (hundreds of thousands). func MaxSizeSubjectsListForLog(subjects map[string]struct{}, maxToDisplay int) []string { numberOfNamesToDisplay := min(len(subjects), maxToDisplay) domainsToDisplay := make([]string, 0, numberOfNamesToDisplay) for domain := range subjects { domainsToDisplay = append(domainsToDisplay, domain) if len(domainsToDisplay) >= numberOfNamesToDisplay { break } } if len(subjects) > maxToDisplay { domainsToDisplay = append(domainsToDisplay, fmt.Sprintf("(and %d more...)", len(subjects)-maxToDisplay)) } return domainsToDisplay } ================================================ FILE: internal/metrics/metrics.go ================================================ package metrics import ( "net/http" "strconv" ) func SanitizeCode(s int) string { switch s { case 0, 200: return "200" default: return strconv.Itoa(s) } } // Only support the list of "regular" HTTP methods, see // https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods var methodMap = map[string]string{ "GET": http.MethodGet, "get": http.MethodGet, "HEAD": http.MethodHead, "head": http.MethodHead, "PUT": http.MethodPut, "put": http.MethodPut, "POST": http.MethodPost, "post": http.MethodPost, "DELETE": http.MethodDelete, "delete": http.MethodDelete, "CONNECT": http.MethodConnect, "connect": http.MethodConnect, "OPTIONS": http.MethodOptions, "options": http.MethodOptions, "TRACE": http.MethodTrace, "trace": http.MethodTrace, "PATCH": http.MethodPatch, "patch": http.MethodPatch, } // SanitizeMethod sanitizes the method for use as a metric label. This helps // prevent high cardinality on the method label. The name is always upper case. func SanitizeMethod(m string) string { if m, ok := methodMap[m]; ok { return m } return "OTHER" } ================================================ FILE: internal/metrics/metrics_test.go ================================================ package metrics import ( "strings" "testing" ) func TestSanitizeMethod(t *testing.T) { tests := []struct { method string expected string }{ {method: "get", expected: "GET"}, {method: "POST", expected: "POST"}, {method: "OPTIONS", expected: "OPTIONS"}, {method: "connect", expected: "CONNECT"}, {method: "trace", expected: "TRACE"}, {method: "UNKNOWN", expected: "OTHER"}, {method: strings.Repeat("ohno", 9999), expected: "OTHER"}, } for _, d := range tests { actual := SanitizeMethod(d.method) if actual != d.expected { t.Errorf("Not same: expected %#v, but got %#v", d.expected, actual) } } } ================================================ FILE: internal/ranges.go ================================================ package internal // PrivateRangesCIDR returns a list of private CIDR range // strings, which can be used as a configuration shortcut. func PrivateRangesCIDR() []string { return []string{ "192.168.0.0/16", "172.16.0.0/12", "10.0.0.0/8", "127.0.0.1/8", "fd00::/8", "::1", } } ================================================ FILE: internal/sockets.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "fmt" "io/fs" "strconv" "strings" ) // SplitUnixSocketPermissionsBits takes a unix socket address in the // unusual "path|bits" format (e.g. /run/caddy.sock|0222) and tries // to split it into socket path (host) and permissions bits (port). // Colons (":") can't be used as separator, as socket paths on Windows // may include a drive letter (e.g. `unix/c:\absolute\path.sock`). // Permission bits will default to 0200 if none are specified. // Throws an error, if the first carrying bit does not // include write perms (e.g. `0422` or `022`). // Symbolic permission representation (e.g. `u=w,g=w,o=w`) // is not supported and will throw an error for now! func SplitUnixSocketPermissionsBits(addr string) (path string, fileMode fs.FileMode, err error) { addrSplit := strings.SplitN(addr, "|", 2) if len(addrSplit) == 2 { // parse octal permission bit string as uint32 fileModeUInt64, err := strconv.ParseUint(addrSplit[1], 8, 32) if err != nil { return "", 0, fmt.Errorf("could not parse octal permission bits in %s: %v", addr, err) } fileMode = fs.FileMode(fileModeUInt64) // FileMode.String() returns a string like `-rwxr-xr--` for `u=rwx,g=rx,o=r` (`0754`) if string(fileMode.String()[2]) != "w" { return "", 0, fmt.Errorf("owner of the socket requires '-w-' (write, octal: '2') permissions at least; got '%s' in %s", fileMode.String()[1:4], addr) } return addrSplit[0], fileMode, nil } // default to 0200 (symbolic: `u=w,g=,o=`) // if no permission bits are specified return addr, 0o200, nil } ================================================ FILE: internal/testmocks/dummyverifier.go ================================================ package testmocks import ( "crypto/x509" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddytls" ) func init() { caddy.RegisterModule(new(dummyVerifier)) } type dummyVerifier struct{} // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (dummyVerifier) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { return nil } // CaddyModule implements caddy.Module. func (dummyVerifier) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.client_auth.verifier.dummy", New: func() caddy.Module { return new(dummyVerifier) }, } } // VerifyClientCertificate implements ClientCertificateVerifier. func (dummyVerifier) VerifyClientCertificate(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { return nil } var ( _ caddy.Module = dummyVerifier{} _ caddytls.ClientCertificateVerifier = dummyVerifier{} _ caddyfile.Unmarshaler = dummyVerifier{} ) ================================================ FILE: listen.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !unix || solaris package caddy import ( "context" "fmt" "net" "os" "slices" "strconv" "sync" "sync/atomic" "time" "go.uber.org/zap" ) func reuseUnixSocket(_, _ string) (any, error) { return nil, nil } func listenReusable(ctx context.Context, lnKey string, network, address string, config net.ListenConfig) (any, error) { var socketFile *os.File fd := slices.Contains([]string{"fd", "fdgram"}, network) if fd { socketFd, err := strconv.ParseUint(address, 0, strconv.IntSize) if err != nil { return nil, fmt.Errorf("invalid file descriptor: %v", err) } func() { socketFilesMu.Lock() defer socketFilesMu.Unlock() socketFdWide := uintptr(socketFd) var ok bool socketFile, ok = socketFiles[socketFdWide] if !ok { socketFile = os.NewFile(socketFdWide, lnKey) if socketFile != nil { socketFiles[socketFdWide] = socketFile } } }() if socketFile == nil { return nil, fmt.Errorf("invalid socket file descriptor: %d", socketFd) } } datagram := slices.Contains([]string{"udp", "udp4", "udp6", "unixgram", "fdgram"}, network) if datagram { sharedPc, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) { var ( pc net.PacketConn err error ) if fd { pc, err = net.FilePacketConn(socketFile) } else { pc, err = config.ListenPacket(ctx, network, address) } if err != nil { return nil, err } return &sharedPacketConn{PacketConn: pc, key: lnKey}, nil }) if err != nil { return nil, err } return &fakeClosePacketConn{sharedPacketConn: sharedPc.(*sharedPacketConn)}, nil } sharedLn, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) { var ( ln net.Listener err error ) if fd { ln, err = net.FileListener(socketFile) } else { ln, err = config.Listen(ctx, network, address) } if err != nil { return nil, err } return &sharedListener{Listener: ln, key: lnKey}, nil }) if err != nil { return nil, err } return &fakeCloseListener{sharedListener: sharedLn.(*sharedListener), keepAliveConfig: config.KeepAliveConfig}, nil } // fakeCloseListener is a private wrapper over a listener that // is shared. The state of fakeCloseListener is not shared. // This allows one user of a socket to "close" the listener // while in reality the socket stays open for other users of // the listener. In this way, servers become hot-swappable // while the listener remains running. Listeners should be // re-wrapped in a new fakeCloseListener each time the listener // is reused. This type is atomic and values must not be copied. type fakeCloseListener struct { closed int32 // accessed atomically; belongs to this struct only *sharedListener // embedded, so we also become a net.Listener keepAliveConfig net.KeepAliveConfig } type canSetKeepAliveConfig interface { SetKeepAliveConfig(config net.KeepAliveConfig) error } func (fcl *fakeCloseListener) Accept() (net.Conn, error) { // if the listener is already "closed", return error if atomic.LoadInt32(&fcl.closed) == 1 { return nil, fakeClosedErr(fcl) } // call underlying accept conn, err := fcl.sharedListener.Accept() if err == nil { // if 0, do nothing, Go's default is already set // and if the connection allows setting KeepAlive, set it if tconn, ok := conn.(canSetKeepAliveConfig); ok && fcl.keepAliveConfig.Enable { err = tconn.SetKeepAliveConfig(fcl.keepAliveConfig) if err != nil { Log().With(zap.String("server", fcl.sharedListener.key)).Warn("unable to set keepalive for new connection:", zap.Error(err)) } } return conn, nil } // since Accept() returned an error, it may be because our reference to // the listener (this fakeCloseListener) may have been closed, i.e. the // server is shutting down; in that case, we need to clear the deadline // that we set when Close() was called, and return a non-temporary and // non-timeout error value to the caller, masking the "true" error, so // that server loops / goroutines won't retry, linger, and leak if atomic.LoadInt32(&fcl.closed) == 1 { // we dereference the sharedListener explicitly even though it's embedded // so that it's clear in the code that side-effects are shared with other // users of this listener, not just our own reference to it; we also don't // do anything with the error because all we could do is log it, but we // explicitly assign it to nothing so we don't forget it's there if needed _ = fcl.sharedListener.clearDeadline() if netErr, ok := err.(net.Error); ok && netErr.Timeout() { return nil, fakeClosedErr(fcl) } } return nil, err } // Close stops accepting new connections without closing the // underlying listener. The underlying listener is only closed // if the caller is the last known user of the socket. func (fcl *fakeCloseListener) Close() error { if atomic.CompareAndSwapInt32(&fcl.closed, 0, 1) { // There are two ways I know of to get an Accept() // function to return to the server loop that called // it: close the listener, or set a deadline in the // past. Obviously, we can't close the socket yet // since others may be using it (hence this whole // file). But we can set the deadline in the past, // and this is kind of cheating, but it works, and // it apparently even works on Windows. _ = fcl.sharedListener.setDeadline() _, _ = listenerPool.Delete(fcl.sharedListener.key) } return nil } // sharedListener is a wrapper over an underlying listener. The listener // and the other fields on the struct are shared state that is synchronized, // so sharedListener structs must never be copied (always use a pointer). type sharedListener struct { net.Listener key string // uniquely identifies this listener deadline bool // whether a deadline is currently set deadlineMu sync.Mutex } func (sl *sharedListener) clearDeadline() error { var err error sl.deadlineMu.Lock() if sl.deadline { switch ln := sl.Listener.(type) { case *net.TCPListener: err = ln.SetDeadline(time.Time{}) } sl.deadline = false } sl.deadlineMu.Unlock() return err } func (sl *sharedListener) setDeadline() error { timeInPast := time.Now().Add(-1 * time.Minute) var err error sl.deadlineMu.Lock() if !sl.deadline { switch ln := sl.Listener.(type) { case *net.TCPListener: err = ln.SetDeadline(timeInPast) } sl.deadline = true } sl.deadlineMu.Unlock() return err } // Destruct is called by the UsagePool when the listener is // finally not being used anymore. It closes the socket. func (sl *sharedListener) Destruct() error { return sl.Listener.Close() } // fakeClosePacketConn is like fakeCloseListener, but for PacketConns, // or more specifically, *net.UDPConn type fakeClosePacketConn struct { closed int32 // accessed atomically; belongs to this struct only *sharedPacketConn // embedded, so we also become a net.PacketConn; its key is used in Close } func (fcpc *fakeClosePacketConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) { // if the listener is already "closed", return error if atomic.LoadInt32(&fcpc.closed) == 1 { return 0, nil, &net.OpError{ Op: "readfrom", Net: fcpc.LocalAddr().Network(), Addr: fcpc.LocalAddr(), Err: errFakeClosed, } } // call underlying readfrom n, addr, err = fcpc.sharedPacketConn.ReadFrom(p) if err != nil { // this server was stopped, so clear the deadline and let // any new server continue reading; but we will exit if atomic.LoadInt32(&fcpc.closed) == 1 { if netErr, ok := err.(net.Error); ok && netErr.Timeout() { if err = fcpc.SetReadDeadline(time.Time{}); err != nil { return n, addr, err } } } return n, addr, err } return n, addr, err } // Close won't close the underlying socket unless there is no more reference, then listenerPool will close it. func (fcpc *fakeClosePacketConn) Close() error { if atomic.CompareAndSwapInt32(&fcpc.closed, 0, 1) { _ = fcpc.SetReadDeadline(time.Now()) // unblock ReadFrom() calls to kick old servers out of their loops _, _ = listenerPool.Delete(fcpc.sharedPacketConn.key) } return nil } func (fcpc *fakeClosePacketConn) Unwrap() net.PacketConn { return fcpc.sharedPacketConn.PacketConn } // sharedPacketConn is like sharedListener, but for net.PacketConns. type sharedPacketConn struct { net.PacketConn key string } // Destruct closes the underlying socket. func (spc *sharedPacketConn) Destruct() error { return spc.PacketConn.Close() } // Unwrap returns the underlying socket func (spc *sharedPacketConn) Unwrap() net.PacketConn { return spc.PacketConn } // Interface guards (see https://github.com/caddyserver/caddy/issues/3998) var ( _ (interface { Unwrap() net.PacketConn }) = (*fakeClosePacketConn)(nil) ) // socketFiles is a fd -> *os.File map used to make a FileListener/FilePacketConn from a socket file descriptor. var socketFiles = map[uintptr]*os.File{} // socketFilesMu synchronizes socketFiles insertions var socketFilesMu sync.Mutex ================================================ FILE: listen_unix.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Even though the filename ends in _unix.go, we still have to specify the // build constraint here, because the filename convention only works for // literal GOOS values, and "unix" is a shortcut unique to build tags. //go:build unix && !solaris package caddy import ( "context" "errors" "fmt" "io" "io/fs" "net" "os" "slices" "strconv" "sync" "sync/atomic" "syscall" "go.uber.org/zap" "golang.org/x/sys/unix" ) // reuseUnixSocket copies and reuses the unix domain socket (UDS) if we already // have it open; if not, unlink it so we can have it. // No-op if not a unix network. func reuseUnixSocket(network, addr string) (any, error) { socketKey := listenerKey(network, addr) socket, exists := unixSockets[socketKey] if exists { // make copy of file descriptor socketFile, err := socket.File() // does dup() deep down if err != nil { return nil, err } // use copied fd to make new Listener or PacketConn, then replace // it in the map so that future copies always come from the most // recent fd (as the previous ones will be closed, and we'd get // "use of closed network connection" errors) -- note that we // preserve the *pointer* to the counter (not just the value) so // that all socket wrappers will refer to the same value switch unixSocket := socket.(type) { case *unixListener: ln, err := net.FileListener(socketFile) if err != nil { return nil, err } atomic.AddInt32(unixSocket.count, 1) unixSockets[socketKey] = &unixListener{ln.(*net.UnixListener), socketKey, unixSocket.count} case *unixConn: pc, err := net.FilePacketConn(socketFile) if err != nil { return nil, err } atomic.AddInt32(unixSocket.count, 1) unixSockets[socketKey] = &unixConn{pc.(*net.UnixConn), socketKey, unixSocket.count} } return unixSockets[socketKey], nil } // from what I can tell after some quick research, it's quite common for programs to // leave their socket file behind after they close, so the typical pattern is to // unlink it before you bind to it -- this is often crucial if the last program using // it was killed forcefully without a chance to clean up the socket, but there is a // race, as the comment in net.UnixListener.close() explains... oh well, I guess? if err := syscall.Unlink(addr); err != nil && !errors.Is(err, fs.ErrNotExist) { return nil, err } return nil, nil } // listenReusable creates a new listener for the given network and address, and adds it to listenerPool. func listenReusable(ctx context.Context, lnKey string, network, address string, config net.ListenConfig) (any, error) { // even though SO_REUSEPORT lets us bind the socket multiple times, // we still put it in the listenerPool so we can count how many // configs are using this socket; necessary to ensure we can know // whether to enforce shutdown delays, for example (see #5393). var ( ln io.Closer err error socketFile *os.File ) fd := slices.Contains([]string{"fd", "fdgram"}, network) if fd { socketFd, err := strconv.ParseUint(address, 0, strconv.IntSize) if err != nil { return nil, fmt.Errorf("invalid file descriptor: %v", err) } func() { socketFilesMu.Lock() defer socketFilesMu.Unlock() socketFdWide := uintptr(socketFd) var ok bool socketFile, ok = socketFiles[socketFdWide] if !ok { socketFile = os.NewFile(socketFdWide, lnKey) if socketFile != nil { socketFiles[socketFdWide] = socketFile } } }() if socketFile == nil { return nil, fmt.Errorf("invalid socket file descriptor: %d", socketFd) } } else { // wrap any Control function set by the user so we can also add our reusePort control without clobbering theirs oldControl := config.Control config.Control = func(network, address string, c syscall.RawConn) error { if oldControl != nil { if err := oldControl(network, address, c); err != nil { return err } } return reusePort(network, address, c) } } datagram := slices.Contains([]string{"udp", "udp4", "udp6", "unixgram", "fdgram"}, network) if datagram { if fd { ln, err = net.FilePacketConn(socketFile) } else { ln, err = config.ListenPacket(ctx, network, address) } } else { if fd { ln, err = net.FileListener(socketFile) } else { ln, err = config.Listen(ctx, network, address) } } if err == nil { listenerPool.LoadOrStore(lnKey, nil) } if datagram { if !fd { // TODO: Not 100% sure this is necessary, but we do this for net.UnixListener, so... if unix, ok := ln.(*net.UnixConn); ok { one := int32(1) ln = &unixConn{unix, lnKey, &one} unixSockets[lnKey] = ln.(*unixConn) } } // lightly wrap the connection so that when it is closed, // we can decrement the usage pool counter if specificLn, ok := ln.(net.PacketConn); ok { ln = deletePacketConn{specificLn, lnKey} } } else { if !fd { // if new listener is a unix socket, make sure we can reuse it later // (we do our own "unlink on close" -- not required, but more tidy) if unix, ok := ln.(*net.UnixListener); ok { unix.SetUnlinkOnClose(false) one := int32(1) ln = &unixListener{unix, lnKey, &one} unixSockets[lnKey] = ln.(*unixListener) } } // lightly wrap the listener so that when it is closed, // we can decrement the usage pool counter if specificLn, ok := ln.(net.Listener); ok { ln = deleteListener{specificLn, lnKey} } } // other types, I guess we just return them directly return ln, err } // reusePort sets SO_REUSEPORT. Ineffective for unix sockets. func reusePort(network, address string, conn syscall.RawConn) error { if IsUnixNetwork(network) { return nil } return conn.Control(func(descriptor uintptr) { if err := unix.SetsockoptInt(int(descriptor), unix.SOL_SOCKET, unixSOREUSEPORT, 1); err != nil { Log().Error("setting SO_REUSEPORT", zap.String("network", network), zap.String("address", address), zap.Uintptr("descriptor", descriptor), zap.Error(err)) } }) } type unixListener struct { *net.UnixListener mapKey string count *int32 // accessed atomically } func (uln *unixListener) Close() error { newCount := atomic.AddInt32(uln.count, -1) if newCount == 0 { file, err := uln.File() var name string if err == nil { name = file.Name() } defer func() { unixSocketsMu.Lock() delete(unixSockets, uln.mapKey) unixSocketsMu.Unlock() if err == nil { _ = syscall.Unlink(name) } }() } return uln.UnixListener.Close() } type unixConn struct { *net.UnixConn mapKey string count *int32 // accessed atomically } func (uc *unixConn) Close() error { newCount := atomic.AddInt32(uc.count, -1) if newCount == 0 { file, err := uc.File() var name string if err == nil { name = file.Name() } defer func() { unixSocketsMu.Lock() delete(unixSockets, uc.mapKey) unixSocketsMu.Unlock() if err == nil { _ = syscall.Unlink(name) } }() } return uc.UnixConn.Close() } func (uc *unixConn) Unwrap() net.PacketConn { return uc.UnixConn } // unixSockets keeps track of the currently-active unix sockets // so we can transfer their FDs gracefully during reloads. var unixSockets = make(map[string]interface { File() (*os.File, error) }) // socketFiles is a fd -> *os.File map used to make a FileListener/FilePacketConn from a socket file descriptor. var socketFiles = map[uintptr]*os.File{} // socketFilesMu synchronizes socketFiles insertions var socketFilesMu sync.Mutex // deleteListener is a type that simply deletes itself // from the listenerPool when it closes. It is used // solely for the purpose of reference counting (i.e. // counting how many configs are using a given socket). type deleteListener struct { net.Listener lnKey string } func (dl deleteListener) Close() error { _, _ = listenerPool.Delete(dl.lnKey) return dl.Listener.Close() } // deletePacketConn is like deleteListener, but // for net.PacketConns. type deletePacketConn struct { net.PacketConn lnKey string } func (dl deletePacketConn) Close() error { _, _ = listenerPool.Delete(dl.lnKey) return dl.PacketConn.Close() } func (dl deletePacketConn) Unwrap() net.PacketConn { return dl.PacketConn } ================================================ FILE: listen_unix_setopt.go ================================================ //go:build unix && !freebsd && !solaris package caddy import "golang.org/x/sys/unix" const unixSOREUSEPORT = unix.SO_REUSEPORT ================================================ FILE: listen_unix_setopt_freebsd.go ================================================ //go:build freebsd package caddy import "golang.org/x/sys/unix" const unixSOREUSEPORT = unix.SO_REUSEPORT_LB ================================================ FILE: listeners.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "context" "crypto/tls" "errors" "fmt" "io" "io/fs" "net" "net/netip" "os" "strconv" "strings" "sync" "sync/atomic" "github.com/quic-go/quic-go" "github.com/quic-go/quic-go/http3" h3qlog "github.com/quic-go/quic-go/http3/qlog" "go.uber.org/zap" "golang.org/x/time/rate" "github.com/caddyserver/caddy/v2/internal" ) // NetworkAddress represents one or more network addresses. // It contains the individual components for a parsed network // address of the form accepted by ParseNetworkAddress(). type NetworkAddress struct { // Should be a network value accepted by Go's net package or // by a plugin providing a listener for that network type. Network string // The "main" part of the network address is the host, which // often takes the form of a hostname, DNS name, IP address, // or socket path. Host string // For addresses that contain a port, ranges are given by // [StartPort, EndPort]; i.e. for a single port, StartPort // and EndPort are the same. For no port, they are 0. StartPort uint EndPort uint } // ListenAll calls Listen for all addresses represented by this struct, i.e. all ports in the range. // (If the address doesn't use ports or has 1 port only, then only 1 listener will be created.) // It returns an error if any listener failed to bind, and closes any listeners opened up to that point. func (na NetworkAddress) ListenAll(ctx context.Context, config net.ListenConfig) ([]any, error) { var listeners []any var err error // if one of the addresses has a failure, we need to close // any that did open a socket to avoid leaking resources defer func() { if err == nil { return } for _, ln := range listeners { if cl, ok := ln.(io.Closer); ok { cl.Close() } } }() // an address can contain a port range, which represents multiple addresses; // some addresses don't use ports at all and have a port range size of 1; // whatever the case, iterate each address represented and bind a socket for portOffset := uint(0); portOffset < na.PortRangeSize(); portOffset++ { select { case <-ctx.Done(): return nil, ctx.Err() default: } // create (or reuse) the listener ourselves var ln any ln, err = na.Listen(ctx, portOffset, config) if err != nil { return nil, err } listeners = append(listeners, ln) } return listeners, nil } // Listen is similar to net.Listen, with a few differences: // // Listen announces on the network address using the port calculated by adding // portOffset to the start port. (For network types that do not use ports, the // portOffset is ignored.) // // First Listen checks if a plugin can provide a listener from this address. Otherwise, // the provided ListenConfig is used to create the listener. Its Control function, // if set, may be wrapped by an internally-used Control function. The provided // context may be used to cancel long operations early. The context is not used // to close the listener after it has been created. // // Caddy's listeners can overlap each other: multiple listeners may be created on // the same socket at the same time. This is useful because during config changes, // the new config is started while the old config is still running. How this is // accomplished varies by platform and network type. For example, on Unix, SO_REUSEPORT // is set except on Unix sockets, for which the file descriptor is duplicated and // reused; on Windows, the close logic is virtualized using timeouts. Like normal // listeners, be sure to Close() them when you are done. // // This method returns any type, as the implementations of listeners for various // network types are not interchangeable. The type of listener returned is switched // on the network type. Stream-based networks ("tcp", "unix", "unixpacket", etc.) // return a net.Listener; datagram-based networks ("udp", "unixgram", etc.) return // a net.PacketConn; and so forth. The actual concrete types are not guaranteed to // be standard, exported types (wrapping is necessary to provide graceful reloads). // // Unix sockets will be unlinked before being created, to ensure we can bind to // it even if the previous program using it exited uncleanly; it will also be // unlinked upon a graceful exit (or when a new config does not use that socket). // Listen synchronizes binds to unix domain sockets to avoid race conditions // while an existing socket is unlinked. func (na NetworkAddress) Listen(ctx context.Context, portOffset uint, config net.ListenConfig) (any, error) { if na.IsUnixNetwork() { unixSocketsMu.Lock() defer unixSocketsMu.Unlock() } // check to see if plugin provides listener if ln, err := getListenerFromPlugin(ctx, na.Network, na.Host, na.port(), portOffset, config); ln != nil || err != nil { return ln, err } // create (or reuse) the listener ourselves return na.listen(ctx, portOffset, config) } func (na NetworkAddress) listen(ctx context.Context, portOffset uint, config net.ListenConfig) (any, error) { var ( ln any err error address string unixFileMode fs.FileMode ) // split unix socket addr early so lnKey // is independent of permissions bits if na.IsUnixNetwork() { address, unixFileMode, err = internal.SplitUnixSocketPermissionsBits(na.Host) if err != nil { return nil, err } } else if na.IsFdNetwork() { address = na.Host } else { address = na.JoinHostPort(portOffset) } if strings.HasPrefix(na.Network, "ip") { ln, err = config.ListenPacket(ctx, na.Network, address) } else { if na.IsUnixNetwork() { // if this is a unix socket, see if we already have it open ln, err = reuseUnixSocket(na.Network, address) } if ln == nil && err == nil { // otherwise, create a new listener lnKey := listenerKey(na.Network, address) ln, err = listenReusable(ctx, lnKey, na.Network, address, config) } } if err != nil { return nil, err } if ln == nil { return nil, fmt.Errorf("unsupported network type: %s", na.Network) } if IsUnixNetwork(na.Network) { isAbstractUnixSocket := strings.HasPrefix(address, "@") if !isAbstractUnixSocket { err = os.Chmod(address, unixFileMode) if err != nil { return nil, fmt.Errorf("unable to set permissions (%s) on %s: %v", unixFileMode, address, err) } } } return ln, nil } // IsUnixNetwork returns true if na.Network is // unix, unixgram, or unixpacket. func (na NetworkAddress) IsUnixNetwork() bool { return IsUnixNetwork(na.Network) } // IsFdNetwork returns true if na.Network is // fd or fdgram. func (na NetworkAddress) IsFdNetwork() bool { return IsFdNetwork(na.Network) } // JoinHostPort is like net.JoinHostPort, but where the port // is StartPort + offset. func (na NetworkAddress) JoinHostPort(offset uint) string { if na.IsUnixNetwork() || na.IsFdNetwork() { return na.Host } return net.JoinHostPort(na.Host, strconv.FormatUint(uint64(na.StartPort+offset), 10)) } // Expand returns one NetworkAddress for each port in the port range. func (na NetworkAddress) Expand() []NetworkAddress { size := na.PortRangeSize() addrs := make([]NetworkAddress, size) for portOffset := range size { addrs[portOffset] = na.At(portOffset) } return addrs } // At returns a NetworkAddress with a port range of just 1 // at the given port offset; i.e. a NetworkAddress that // represents precisely 1 address only. func (na NetworkAddress) At(portOffset uint) NetworkAddress { na2 := na na2.StartPort, na2.EndPort = na.StartPort+portOffset, na.StartPort+portOffset return na2 } // PortRangeSize returns how many ports are in // pa's port range. Port ranges are inclusive, // so the size is the difference of start and // end ports plus one. func (na NetworkAddress) PortRangeSize() uint { if na.EndPort < na.StartPort { return 0 } return (na.EndPort - na.StartPort) + 1 } func (na NetworkAddress) isLoopback() bool { if na.IsUnixNetwork() || na.IsFdNetwork() { return true } if na.Host == "localhost" { return true } if ip, err := netip.ParseAddr(na.Host); err == nil { return ip.IsLoopback() } return false } func (na NetworkAddress) isWildcardInterface() bool { if na.Host == "" { return true } if ip, err := netip.ParseAddr(na.Host); err == nil { return ip.IsUnspecified() } return false } func (na NetworkAddress) port() string { if na.StartPort == na.EndPort { return strconv.FormatUint(uint64(na.StartPort), 10) } return fmt.Sprintf("%d-%d", na.StartPort, na.EndPort) } // String reconstructs the address string for human display. // The output can be parsed by ParseNetworkAddress(). If the // address is a unix socket, any non-zero port will be dropped. func (na NetworkAddress) String() string { if na.Network == "tcp" && (na.Host != "" || na.port() != "") { na.Network = "" // omit default network value for brevity } return JoinNetworkAddress(na.Network, na.Host, na.port()) } // IsUnixNetwork returns true if the netw is a unix network. func IsUnixNetwork(netw string) bool { return strings.HasPrefix(netw, "unix") } // IsFdNetwork returns true if the netw is a fd network. func IsFdNetwork(netw string) bool { return strings.HasPrefix(netw, "fd") } // ParseNetworkAddress parses addr into its individual // components. The input string is expected to be of // the form "network/host:port-range" where any part is // optional. The default network, if unspecified, is tcp. // Port ranges are inclusive. // // Network addresses are distinct from URLs and do not // use URL syntax. func ParseNetworkAddress(addr string) (NetworkAddress, error) { return ParseNetworkAddressWithDefaults(addr, "tcp", 0) } // ParseNetworkAddressWithDefaults is like ParseNetworkAddress but allows // the default network and port to be specified. func ParseNetworkAddressWithDefaults(addr, defaultNetwork string, defaultPort uint) (NetworkAddress, error) { var host, port string network, host, port, err := SplitNetworkAddress(addr) if err != nil { return NetworkAddress{}, err } if network == "" { network = defaultNetwork } if IsUnixNetwork(network) { _, _, err := internal.SplitUnixSocketPermissionsBits(host) return NetworkAddress{ Network: network, Host: host, }, err } if IsFdNetwork(network) { return NetworkAddress{ Network: network, Host: host, }, nil } var start, end uint64 if port == "" { start = uint64(defaultPort) end = uint64(defaultPort) } else { before, after, found := strings.Cut(port, "-") if !found { after = before } start, err = strconv.ParseUint(before, 10, 16) if err != nil { return NetworkAddress{}, fmt.Errorf("invalid start port: %v", err) } end, err = strconv.ParseUint(after, 10, 16) if err != nil { return NetworkAddress{}, fmt.Errorf("invalid end port: %v", err) } if end < start { return NetworkAddress{}, fmt.Errorf("end port must not be less than start port") } if (end - start) > maxPortSpan { return NetworkAddress{}, fmt.Errorf("port range exceeds %d ports", maxPortSpan) } } return NetworkAddress{ Network: network, Host: host, StartPort: uint(start), EndPort: uint(end), }, nil } // SplitNetworkAddress splits a into its network, host, and port components. // Note that port may be a port range (:X-Y), or omitted for unix sockets. func SplitNetworkAddress(a string) (network, host, port string, err error) { beforeSlash, afterSlash, slashFound := strings.Cut(a, "/") if slashFound { network = strings.ToLower(strings.TrimSpace(beforeSlash)) a = afterSlash if IsUnixNetwork(network) || IsFdNetwork(network) { host = a return network, host, port, err } } host, port, err = net.SplitHostPort(a) firstErr := err if err != nil { // in general, if there was an error, it was likely "missing port", // so try removing square brackets around an IPv6 host, adding a bogus // port to take advantage of standard library's robust parser, then // strip the artificial port. host, _, err = net.SplitHostPort(net.JoinHostPort(strings.Trim(a, "[]"), "0")) port = "" } if err != nil { err = errors.Join(firstErr, err) } return network, host, port, err } // JoinNetworkAddress combines network, host, and port into a single // address string of the form accepted by ParseNetworkAddress(). For // unix sockets, the network should be "unix" (or "unixgram" or // "unixpacket") and the path to the socket should be given as the // host parameter. func JoinNetworkAddress(network, host, port string) string { var a string if network != "" { a = network + "/" } if (host != "" && port == "") || IsUnixNetwork(network) || IsFdNetwork(network) { a += host } else if port != "" { a += net.JoinHostPort(host, port) } return a } // ListenQUIC returns a http3.QUICEarlyListener suitable for use in a Caddy module. // // The network will be transformed into a QUIC-compatible type if the same address can be used with // different networks. Currently this just means that for tcp, udp will be used with the same // address instead. // // NOTE: This API is EXPERIMENTAL and may be changed or removed. // NOTE: user should close the returned listener twice, once to stop accepting new connections, the second time to free up the packet conn. func (na NetworkAddress) ListenQUIC(ctx context.Context, portOffset uint, config net.ListenConfig, tlsConf *tls.Config, pcWrappers []PacketConnWrapper, allow0rttconf *bool) (http3.QUICListener, error) { lnKey := listenerKey("quic"+na.Network, na.JoinHostPort(portOffset)) sharedEarlyListener, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) { lnAny, err := na.Listen(ctx, portOffset, config) if err != nil { return nil, err } ln := lnAny.(net.PacketConn) h3ln := ln if len(pcWrappers) == 0 { for { // retrieve the underlying socket, so quic-go can optimize. if unwrapper, ok := h3ln.(interface{ Unwrap() net.PacketConn }); ok { h3ln = unwrapper.Unwrap() } else { break } } } else { // wrap packet conn before QUIC for _, pcWrapper := range pcWrappers { h3ln = pcWrapper.WrapPacketConn(h3ln) } } sqs := newSharedQUICState(tlsConf) // http3.ConfigureTLSConfig only uses this field and tls App sets this field as well //nolint:gosec quicTlsConfig := &tls.Config{GetConfigForClient: sqs.getConfigForClient} // Require clients to verify their source address when we're handling more than 1000 handshakes per second. // TODO: make tunable? limiter := rate.NewLimiter(1000, 1000) tr := &quic.Transport{ Conn: h3ln, VerifySourceAddress: func(addr net.Addr) bool { return !limiter.Allow() }, } allow0rtt := true if allow0rttconf != nil { allow0rtt = *allow0rttconf } earlyLn, err := tr.ListenEarly( http3.ConfigureTLSConfig(quicTlsConfig), &quic.Config{ Allow0RTT: allow0rtt, Tracer: h3qlog.DefaultConnectionTracer, }, ) if err != nil { return nil, err } // TODO: figure out when to close the listener and the transport // using the original net.PacketConn to close them properly return &sharedQuicListener{EarlyListener: earlyLn, packetConn: ln, sqs: sqs, key: lnKey}, nil }) if err != nil { return nil, err } sql := sharedEarlyListener.(*sharedQuicListener) // add current tls.Config to sqs, so GetConfigForClient will always return the latest tls.Config in case of context cancellation ctx, cancel := sql.sqs.addState(tlsConf) return &fakeCloseQuicListener{ sharedQuicListener: sql, context: ctx, contextCancel: cancel, }, nil } // ListenerUsage returns the current usage count of the given listener address. func ListenerUsage(network, addr string) int { count, _ := listenerPool.References(listenerKey(network, addr)) return count } // contextAndCancelFunc groups context and its cancelFunc type contextAndCancelFunc struct { context.Context context.CancelCauseFunc } // sharedQUICState manages GetConfigForClient // see issue: https://github.com/caddyserver/caddy/pull/4849 type sharedQUICState struct { rmu sync.RWMutex tlsConfs map[*tls.Config]contextAndCancelFunc activeTlsConf *tls.Config } // newSharedQUICState creates a new sharedQUICState func newSharedQUICState(tlsConfig *tls.Config) *sharedQUICState { sqtc := &sharedQUICState{ tlsConfs: make(map[*tls.Config]contextAndCancelFunc), activeTlsConf: tlsConfig, } sqtc.addState(tlsConfig) return sqtc } // getConfigForClient is used as tls.Config's GetConfigForClient field func (sqs *sharedQUICState) getConfigForClient(ch *tls.ClientHelloInfo) (*tls.Config, error) { sqs.rmu.RLock() defer sqs.rmu.RUnlock() return sqs.activeTlsConf.GetConfigForClient(ch) } // addState adds tls.Config and activeRequests to the map if not present and returns the corresponding context and its cancelFunc // so that when cancelled, the active tls.Config will change func (sqs *sharedQUICState) addState(tlsConfig *tls.Config) (context.Context, context.CancelCauseFunc) { sqs.rmu.Lock() defer sqs.rmu.Unlock() if cacc, ok := sqs.tlsConfs[tlsConfig]; ok { return cacc.Context, cacc.CancelCauseFunc } ctx, cancel := context.WithCancelCause(context.Background()) wrappedCancel := func(cause error) { cancel(cause) sqs.rmu.Lock() defer sqs.rmu.Unlock() delete(sqs.tlsConfs, tlsConfig) if sqs.activeTlsConf == tlsConfig { // select another tls.Config, if there is none, // related sharedQuicListener will be destroyed anyway for tc := range sqs.tlsConfs { sqs.activeTlsConf = tc break } } } sqs.tlsConfs[tlsConfig] = contextAndCancelFunc{ctx, wrappedCancel} // there should be at most 2 tls.Configs if len(sqs.tlsConfs) > 2 { Log().Warn("quic listener tls configs are more than 2", zap.Int("number of configs", len(sqs.tlsConfs))) } return ctx, wrappedCancel } // sharedQuicListener is like sharedListener, but for quic.EarlyListeners. type sharedQuicListener struct { *quic.EarlyListener packetConn net.PacketConn // we have to hold these because quic-go won't close listeners it didn't create sqs *sharedQUICState key string } // Destruct closes the underlying QUIC listener and its associated net.PacketConn. func (sql *sharedQuicListener) Destruct() error { // close EarlyListener first to stop any operations being done to the net.PacketConn _ = sql.EarlyListener.Close() // then close the net.PacketConn return sql.packetConn.Close() } // fakeClosedErr returns an error value that is not temporary // nor a timeout, suitable for making the caller think the // listener is actually closed func fakeClosedErr(l interface{ Addr() net.Addr }) error { return &net.OpError{ Op: "accept", Net: l.Addr().Network(), Addr: l.Addr(), Err: errFakeClosed, } } // errFakeClosed is the underlying error value returned by // fakeCloseListener.Accept() after Close() has been called, // indicating that it is pretending to be closed so that the // server using it can terminate, while the underlying // socket is actually left open. var errFakeClosed = fmt.Errorf("QUIC listener 'closed' 😉") type fakeCloseQuicListener struct { closed int32 // accessed atomically; belongs to this struct only *sharedQuicListener // embedded, so we also become a quic.EarlyListener context context.Context contextCancel context.CancelCauseFunc } // Currently Accept ignores the passed context, however a situation where // someone would need a hotswappable QUIC-only (not http3, since it uses context.Background here) // server on which Accept would be called with non-empty contexts // (mind that the default net listeners' Accept doesn't take a context argument) // sounds way too rare for us to sacrifice efficiency here. func (fcql *fakeCloseQuicListener) Accept(_ context.Context) (*quic.Conn, error) { conn, err := fcql.sharedQuicListener.Accept(fcql.context) if err == nil { return conn, nil } // if the listener is "closed", return a fake closed error instead if atomic.LoadInt32(&fcql.closed) == 1 && errors.Is(err, context.Canceled) { return nil, fakeClosedErr(fcql) } return nil, err } func (fcql *fakeCloseQuicListener) Close() error { if atomic.CompareAndSwapInt32(&fcql.closed, 0, 1) { fcql.contextCancel(errFakeClosed) } else if atomic.CompareAndSwapInt32(&fcql.closed, 1, 2) { _, _ = listenerPool.Delete(fcql.sharedQuicListener.key) } return nil } // RegisterNetwork registers a network type with Caddy so that if a listener is // created for that network type, getListener will be invoked to get the listener. // This should be called during init() and will panic if the network type is standard // or reserved, or if it is already registered. EXPERIMENTAL and subject to change. func RegisterNetwork(network string, getListener ListenerFunc) { network = strings.TrimSpace(strings.ToLower(network)) if network == "tcp" || network == "tcp4" || network == "tcp6" || network == "udp" || network == "udp4" || network == "udp6" || network == "unix" || network == "unixpacket" || network == "unixgram" || strings.HasPrefix(network, "ip:") || strings.HasPrefix(network, "ip4:") || strings.HasPrefix(network, "ip6:") || network == "fd" || network == "fdgram" { panic("network type " + network + " is reserved") } if _, ok := networkTypes[strings.ToLower(network)]; ok { panic("network type " + network + " is already registered") } networkTypes[network] = getListener } var unixSocketsMu sync.Mutex // getListenerFromPlugin returns a listener on the given network and address // if a plugin has registered the network name. It may return (nil, nil) if // no plugin can provide a listener. func getListenerFromPlugin(ctx context.Context, network, host, port string, portOffset uint, config net.ListenConfig) (any, error) { // get listener from plugin if network type is registered if getListener, ok := networkTypes[network]; ok { Log().Debug("getting listener from plugin", zap.String("network", network)) return getListener(ctx, network, host, port, portOffset, config) } return nil, nil } func listenerKey(network, addr string) string { return network + "/" + addr } // ListenerFunc is a function that can return a listener given a network and address. // The listeners must be capable of overlapping: with Caddy, new configs are loaded // before old ones are unloaded, so listeners may overlap briefly if the configs // both need the same listener. EXPERIMENTAL and subject to change. type ListenerFunc func(ctx context.Context, network, host, portRange string, portOffset uint, cfg net.ListenConfig) (any, error) var networkTypes = map[string]ListenerFunc{} // ListenerWrapper is a type that wraps a listener // so it can modify the input listener's methods. // Modules that implement this interface are found // in the caddy.listeners namespace. Usually, to // wrap a listener, you will define your own struct // type that embeds the input listener, then // implement your own methods that you want to wrap, // calling the underlying listener's methods where // appropriate. type ListenerWrapper interface { WrapListener(net.Listener) net.Listener } // PacketConnWrapper is a type that wraps a packet conn // so it can modify the input packet conn methods. // Modules that implement this interface are found // in the caddy.packetconns namespace. Usually, to // wrap a packet conn, you will define your own struct // type that embeds the input packet conn, then // implement your own methods that you want to wrap, // calling the underlying packet conn methods where // appropriate. type PacketConnWrapper interface { WrapPacketConn(net.PacketConn) net.PacketConn } // listenerPool stores and allows reuse of active listeners. var listenerPool = NewUsagePool() const maxPortSpan = 65535 ================================================ FILE: listeners_fuzz.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build gofuzz package caddy func FuzzParseNetworkAddress(data []byte) int { _, err := ParseNetworkAddress(string(data)) if err != nil { return 0 } return 1 } ================================================ FILE: listeners_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "reflect" "testing" "github.com/caddyserver/caddy/v2/internal" ) func TestSplitNetworkAddress(t *testing.T) { for i, tc := range []struct { input string expectNetwork string expectHost string expectPort string expectErr bool }{ { input: "", expectHost: "", }, { input: "foo", expectHost: "foo", }, { input: ":", // empty host & empty port }, { input: "::", expectHost: "::", }, { input: "[::]", expectHost: "::", }, { input: ":1234", expectPort: "1234", }, { input: "foo:1234", expectHost: "foo", expectPort: "1234", }, { input: "foo:1234-5678", expectHost: "foo", expectPort: "1234-5678", }, { input: "udp/foo:1234", expectNetwork: "udp", expectHost: "foo", expectPort: "1234", }, { input: "tcp6/foo:1234-5678", expectNetwork: "tcp6", expectHost: "foo", expectPort: "1234-5678", }, { input: "udp/", expectNetwork: "udp", expectHost: "", }, { input: "unix//foo/bar", expectNetwork: "unix", expectHost: "/foo/bar", }, { input: "unixgram//foo/bar", expectNetwork: "unixgram", expectHost: "/foo/bar", }, { input: "unixpacket//foo/bar", expectNetwork: "unixpacket", expectHost: "/foo/bar", }, } { actualNetwork, actualHost, actualPort, err := SplitNetworkAddress(tc.input) if tc.expectErr && err == nil { t.Errorf("Test %d: Expected error but got %v", i, err) } if !tc.expectErr && err != nil { t.Errorf("Test %d: Expected no error but got %v", i, err) } if actualNetwork != tc.expectNetwork { t.Errorf("Test %d: Expected network '%s' but got '%s'", i, tc.expectNetwork, actualNetwork) } if actualHost != tc.expectHost { t.Errorf("Test %d: Expected host '%s' but got '%s'", i, tc.expectHost, actualHost) } if actualPort != tc.expectPort { t.Errorf("Test %d: Expected port '%s' but got '%s'", i, tc.expectPort, actualPort) } } } func TestJoinNetworkAddress(t *testing.T) { for i, tc := range []struct { network, host, port string expect string }{ { network: "", host: "", port: "", expect: "", }, { network: "tcp", host: "", port: "", expect: "tcp/", }, { network: "", host: "foo", port: "", expect: "foo", }, { network: "", host: "", port: "1234", expect: ":1234", }, { network: "", host: "", port: "1234-5678", expect: ":1234-5678", }, { network: "", host: "foo", port: "1234", expect: "foo:1234", }, { network: "udp", host: "foo", port: "1234", expect: "udp/foo:1234", }, { network: "udp", host: "", port: "1234", expect: "udp/:1234", }, { network: "unix", host: "/foo/bar", port: "", expect: "unix//foo/bar", }, { network: "unix", host: "/foo/bar", port: "0", expect: "unix//foo/bar", }, { network: "unix", host: "/foo/bar", port: "1234", expect: "unix//foo/bar", }, { network: "", host: "::1", port: "1234", expect: "[::1]:1234", }, } { actual := JoinNetworkAddress(tc.network, tc.host, tc.port) if actual != tc.expect { t.Errorf("Test %d: Expected '%s' but got '%s'", i, tc.expect, actual) } } } func TestParseNetworkAddress(t *testing.T) { for i, tc := range []struct { input string defaultNetwork string defaultPort uint expectAddr NetworkAddress expectErr bool }{ { input: "", expectAddr: NetworkAddress{}, }, { input: ":", defaultNetwork: "udp", expectAddr: NetworkAddress{ Network: "udp", }, }, { input: "[::]", defaultNetwork: "udp", defaultPort: 53, expectAddr: NetworkAddress{ Network: "udp", Host: "::", StartPort: 53, EndPort: 53, }, }, { input: ":1234", defaultNetwork: "udp", expectAddr: NetworkAddress{ Network: "udp", Host: "", StartPort: 1234, EndPort: 1234, }, }, { input: "udp/:1234", defaultNetwork: "udp", expectAddr: NetworkAddress{ Network: "udp", Host: "", StartPort: 1234, EndPort: 1234, }, }, { input: "tcp6/:1234", defaultNetwork: "tcp", expectAddr: NetworkAddress{ Network: "tcp6", Host: "", StartPort: 1234, EndPort: 1234, }, }, { input: "tcp4/localhost:1234", defaultNetwork: "tcp", expectAddr: NetworkAddress{ Network: "tcp4", Host: "localhost", StartPort: 1234, EndPort: 1234, }, }, { input: "unix//foo/bar", defaultNetwork: "tcp", expectAddr: NetworkAddress{ Network: "unix", Host: "/foo/bar", }, }, { input: "localhost:1234-1234", defaultNetwork: "tcp", expectAddr: NetworkAddress{ Network: "tcp", Host: "localhost", StartPort: 1234, EndPort: 1234, }, }, { input: "localhost:2-1", defaultNetwork: "tcp", expectErr: true, }, { input: "localhost:0", defaultNetwork: "tcp", expectAddr: NetworkAddress{ Network: "tcp", Host: "localhost", StartPort: 0, EndPort: 0, }, }, { input: "localhost:1-999999999999", defaultNetwork: "tcp", expectErr: true, }, } { actualAddr, err := ParseNetworkAddressWithDefaults(tc.input, tc.defaultNetwork, tc.defaultPort) if tc.expectErr && err == nil { t.Errorf("Test %d: Expected error but got: %v", i, err) } if !tc.expectErr && err != nil { t.Errorf("Test %d: Expected no error but got: %v", i, err) } if actualAddr.Network != tc.expectAddr.Network { t.Errorf("Test %d: Expected network '%v' but got '%v'", i, tc.expectAddr, actualAddr) } if !reflect.DeepEqual(tc.expectAddr, actualAddr) { t.Errorf("Test %d: Expected addresses %v but got %v", i, tc.expectAddr, actualAddr) } } } func TestParseNetworkAddressWithDefaults(t *testing.T) { for i, tc := range []struct { input string defaultNetwork string defaultPort uint expectAddr NetworkAddress expectErr bool }{ { input: "", expectAddr: NetworkAddress{}, }, { input: ":", defaultNetwork: "udp", expectAddr: NetworkAddress{ Network: "udp", }, }, { input: "[::]", defaultNetwork: "udp", defaultPort: 53, expectAddr: NetworkAddress{ Network: "udp", Host: "::", StartPort: 53, EndPort: 53, }, }, { input: ":1234", defaultNetwork: "udp", expectAddr: NetworkAddress{ Network: "udp", Host: "", StartPort: 1234, EndPort: 1234, }, }, { input: "udp/:1234", defaultNetwork: "udp", expectAddr: NetworkAddress{ Network: "udp", Host: "", StartPort: 1234, EndPort: 1234, }, }, { input: "tcp6/:1234", defaultNetwork: "tcp", expectAddr: NetworkAddress{ Network: "tcp6", Host: "", StartPort: 1234, EndPort: 1234, }, }, { input: "tcp4/localhost:1234", defaultNetwork: "tcp", expectAddr: NetworkAddress{ Network: "tcp4", Host: "localhost", StartPort: 1234, EndPort: 1234, }, }, { input: "unix//foo/bar", defaultNetwork: "tcp", expectAddr: NetworkAddress{ Network: "unix", Host: "/foo/bar", }, }, { input: "localhost:1234-1234", defaultNetwork: "tcp", expectAddr: NetworkAddress{ Network: "tcp", Host: "localhost", StartPort: 1234, EndPort: 1234, }, }, { input: "localhost:2-1", defaultNetwork: "tcp", expectErr: true, }, { input: "localhost:0", defaultNetwork: "tcp", expectAddr: NetworkAddress{ Network: "tcp", Host: "localhost", StartPort: 0, EndPort: 0, }, }, { input: "localhost:1-999999999999", defaultNetwork: "tcp", expectErr: true, }, } { actualAddr, err := ParseNetworkAddressWithDefaults(tc.input, tc.defaultNetwork, tc.defaultPort) if tc.expectErr && err == nil { t.Errorf("Test %d: Expected error but got: %v", i, err) } if !tc.expectErr && err != nil { t.Errorf("Test %d: Expected no error but got: %v", i, err) } if actualAddr.Network != tc.expectAddr.Network { t.Errorf("Test %d: Expected network '%v' but got '%v'", i, tc.expectAddr, actualAddr) } if !reflect.DeepEqual(tc.expectAddr, actualAddr) { t.Errorf("Test %d: Expected addresses %v but got %v", i, tc.expectAddr, actualAddr) } } } func TestJoinHostPort(t *testing.T) { for i, tc := range []struct { pa NetworkAddress offset uint expect string }{ { pa: NetworkAddress{ Network: "tcp", Host: "localhost", StartPort: 1234, EndPort: 1234, }, expect: "localhost:1234", }, { pa: NetworkAddress{ Network: "tcp", Host: "localhost", StartPort: 1234, EndPort: 1235, }, expect: "localhost:1234", }, { pa: NetworkAddress{ Network: "tcp", Host: "localhost", StartPort: 1234, EndPort: 1235, }, offset: 1, expect: "localhost:1235", }, { pa: NetworkAddress{ Network: "unix", Host: "/run/php/php7.3-fpm.sock", }, expect: "/run/php/php7.3-fpm.sock", }, } { actual := tc.pa.JoinHostPort(tc.offset) if actual != tc.expect { t.Errorf("Test %d: Expected '%s' but got '%s'", i, tc.expect, actual) } } } func TestExpand(t *testing.T) { for i, tc := range []struct { input NetworkAddress expect []NetworkAddress }{ { input: NetworkAddress{ Network: "tcp", Host: "localhost", StartPort: 2000, EndPort: 2000, }, expect: []NetworkAddress{ { Network: "tcp", Host: "localhost", StartPort: 2000, EndPort: 2000, }, }, }, { input: NetworkAddress{ Network: "tcp", Host: "localhost", StartPort: 2000, EndPort: 2002, }, expect: []NetworkAddress{ { Network: "tcp", Host: "localhost", StartPort: 2000, EndPort: 2000, }, { Network: "tcp", Host: "localhost", StartPort: 2001, EndPort: 2001, }, { Network: "tcp", Host: "localhost", StartPort: 2002, EndPort: 2002, }, }, }, { input: NetworkAddress{ Network: "tcp", Host: "localhost", StartPort: 2000, EndPort: 1999, }, expect: []NetworkAddress{}, }, { input: NetworkAddress{ Network: "unix", Host: "/foo/bar", StartPort: 0, EndPort: 0, }, expect: []NetworkAddress{ { Network: "unix", Host: "/foo/bar", StartPort: 0, EndPort: 0, }, }, }, } { actual := tc.input.Expand() if !reflect.DeepEqual(actual, tc.expect) { t.Errorf("Test %d: Expected %+v but got %+v", i, tc.expect, actual) } } } func TestSplitUnixSocketPermissionsBits(t *testing.T) { for i, tc := range []struct { input string expectNetwork string expectPath string expectFileMode string expectErr bool }{ { input: "./foo.socket", expectPath: "./foo.socket", expectFileMode: "--w-------", }, { input: `.\relative\path.socket`, expectPath: `.\relative\path.socket`, expectFileMode: "--w-------", }, { // literal colon in resulting address // and defaulting to 0200 bits input: "./foo.socket:0666", expectPath: "./foo.socket:0666", expectFileMode: "--w-------", }, { input: "./foo.socket|0220", expectPath: "./foo.socket", expectFileMode: "--w--w----", }, { input: "/var/run/foo|222", expectPath: "/var/run/foo", expectFileMode: "--w--w--w-", }, { input: "./foo.socket|0660", expectPath: "./foo.socket", expectFileMode: "-rw-rw----", }, { input: "./foo.socket|0666", expectPath: "./foo.socket", expectFileMode: "-rw-rw-rw-", }, { input: "/var/run/foo|666", expectPath: "/var/run/foo", expectFileMode: "-rw-rw-rw-", }, { input: `c:\absolute\path.socket|220`, expectPath: `c:\absolute\path.socket`, expectFileMode: "--w--w----", }, { // symbolic permission representation is not supported for now input: "./foo.socket|u=rw,g=rw,o=rw", expectErr: true, }, { // octal (base-8) permission representation has to be between // `0` for no read, no write, no exec (`---`) and // `7` for read (4), write (2), exec (1) (`rwx` => `4+2+1 = 7`) input: "./foo.socket|888", expectErr: true, }, { // too many colons in address input: "./foo.socket|123456|0660", expectErr: true, }, { // owner is missing write perms input: "./foo.socket|0522", expectErr: true, }, } { actualPath, actualFileMode, err := internal.SplitUnixSocketPermissionsBits(tc.input) if tc.expectErr && err == nil { t.Errorf("Test %d: Expected error but got: %v", i, err) } if !tc.expectErr && err != nil { t.Errorf("Test %d: Expected no error but got: %v", i, err) } if actualPath != tc.expectPath { t.Errorf("Test %d: Expected path '%s' but got '%s'", i, tc.expectPath, actualPath) } // fileMode.Perm().String() parses 0 to "----------" if !tc.expectErr && actualFileMode.Perm().String() != tc.expectFileMode { t.Errorf("Test %d: Expected perms '%s' but got '%s'", i, tc.expectFileMode, actualFileMode.Perm().String()) } } } ================================================ FILE: logging.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "encoding/json" "fmt" "io" "log" "os" "slices" "strings" "sync" "time" "go.uber.org/zap" "go.uber.org/zap/zapcore" "golang.org/x/term" "github.com/caddyserver/caddy/v2/internal" ) func init() { RegisterModule(StdoutWriter{}) RegisterModule(StderrWriter{}) RegisterModule(DiscardWriter{}) } // Logging facilitates logging within Caddy. The default log is // called "default" and you can customize it. You can also define // additional logs. // // By default, all logs at INFO level and higher are written to // standard error ("stderr" writer) in a human-readable format // ("console" encoder if stdout is an interactive terminal, "json" // encoder otherwise). // // All defined logs accept all log entries by default, but you // can filter by level and module/logger names. A logger's name // is the same as the module's name, but a module may append to // logger names for more specificity. For example, you can // filter logs emitted only by HTTP handlers using the name // "http.handlers", because all HTTP handler module names have // that prefix. // // Caddy logs (except the sink) are zero-allocation, so they are // very high-performing in terms of memory and CPU time. Enabling // sampling can further increase throughput on extremely high-load // servers. type Logging struct { // Sink is the destination for all unstructured logs emitted // from Go's standard library logger. These logs are common // in dependencies that are not designed specifically for use // in Caddy. Because it is global and unstructured, the sink // lacks most advanced features and customizations. Sink *SinkLog `json:"sink,omitempty"` // Logs are your logs, keyed by an arbitrary name of your // choosing. The default log can be customized by defining // a log called "default". You can further define other logs // and filter what kinds of entries they accept. Logs map[string]*CustomLog `json:"logs,omitempty"` // a list of all keys for open writers; all writers // that are opened to provision this logging config // must have their keys added to this list so they // can be closed when cleaning up writerKeys []string } // openLogs sets up the config and opens all the configured writers. // It closes its logs when ctx is canceled, so it should clean up // after itself. func (logging *Logging) openLogs(ctx Context) error { // make sure to deallocate resources when context is done ctx.OnCancel(func() { err := logging.closeLogs() if err != nil { Log().Error("closing logs", zap.Error(err)) } }) // set up the "sink" log first (std lib's default global logger) if logging.Sink != nil { err := logging.Sink.provision(ctx, logging) if err != nil { return fmt.Errorf("setting up sink log: %v", err) } } // as a special case, set up the default structured Caddy log next if err := logging.setupNewDefault(ctx); err != nil { return err } // then set up any other custom logs for name, l := range logging.Logs { // the default log is already set up if name == DefaultLoggerName { continue } err := l.provision(ctx, logging) if err != nil { return fmt.Errorf("setting up custom log '%s': %v", name, err) } // Any other logs that use the discard writer can be deleted // entirely. This avoids encoding and processing of each // log entry that would just be thrown away anyway. Notably, // we do not reach this point for the default log, which MUST // exist, otherwise core log emissions would panic because // they use the Log() function directly which expects a non-nil // logger. Even if we keep logs with a discard writer, they // have a nop core, and keeping them at all seems unnecessary. if _, ok := l.writerOpener.(*DiscardWriter); ok { delete(logging.Logs, name) continue } } return nil } func (logging *Logging) setupNewDefault(ctx Context) error { if logging.Logs == nil { logging.Logs = make(map[string]*CustomLog) } // extract the user-defined default log, if any newDefault := new(defaultCustomLog) if userDefault, ok := logging.Logs[DefaultLoggerName]; ok { newDefault.CustomLog = userDefault } else { // if none, make one with our own default settings var err error newDefault, err = newDefaultProductionLog() if err != nil { return fmt.Errorf("setting up default Caddy log: %v", err) } logging.Logs[DefaultLoggerName] = newDefault.CustomLog } // options for the default logger options, err := newDefault.CustomLog.buildOptions() if err != nil { return fmt.Errorf("setting up default log: %v", err) } // set up this new log err = newDefault.CustomLog.provision(ctx, logging) if err != nil { return fmt.Errorf("setting up default log: %v", err) } filteringCore := &filteringCore{newDefault.CustomLog.core, newDefault.CustomLog} newDefault.logger = zap.New(filteringCore, options...) // redirect the default caddy logs defaultLoggerMu.Lock() oldDefault := defaultLogger defaultLogger = newDefault defaultLoggerMu.Unlock() // if the new writer is different, indicate it in the logs for convenience var newDefaultLogWriterKey, currentDefaultLogWriterKey string var newDefaultLogWriterStr, currentDefaultLogWriterStr string if newDefault.writerOpener != nil { newDefaultLogWriterKey = newDefault.writerOpener.WriterKey() newDefaultLogWriterStr = newDefault.writerOpener.String() } if oldDefault.writerOpener != nil { currentDefaultLogWriterKey = oldDefault.writerOpener.WriterKey() currentDefaultLogWriterStr = oldDefault.writerOpener.String() } if newDefaultLogWriterKey != currentDefaultLogWriterKey { oldDefault.logger.Info("redirected default logger", zap.String("from", currentDefaultLogWriterStr), zap.String("to", newDefaultLogWriterStr), ) } // if we had a buffered core, flush its contents ASAP // before we try to log anything else, so the order of // logs is preserved if oldBufferCore, ok := oldDefault.logger.Core().(*internal.LogBufferCore); ok { oldBufferCore.FlushTo(newDefault.logger) } return nil } // closeLogs cleans up resources allocated during openLogs. // A successful call to openLogs calls this automatically // when the context is canceled. func (logging *Logging) closeLogs() error { for _, key := range logging.writerKeys { _, err := writers.Delete(key) if err != nil { log.Printf("[ERROR] Closing log writer %v: %v", key, err) } } return nil } // Logger returns a logger that is ready for the module to use. func (logging *Logging) Logger(mod Module) *zap.Logger { modID := string(mod.CaddyModule().ID) var cores []zapcore.Core var options []zap.Option if logging != nil { for _, l := range logging.Logs { if l.matchesModule(modID) { if len(l.Include) == 0 && len(l.Exclude) == 0 { cores = append(cores, l.core) continue } if len(options) == 0 { newOptions, err := l.buildOptions() if err != nil { Log().Error("building options for logger", zap.String("module", modID), zap.Error(err)) } options = newOptions } cores = append(cores, &filteringCore{Core: l.core, cl: l}) } } } multiCore := zapcore.NewTee(cores...) return zap.New(multiCore, options...).Named(modID) } // openWriter opens a writer using opener, and returns true if // the writer is new, or false if the writer already exists. func (logging *Logging) openWriter(opener WriterOpener) (io.WriteCloser, bool, error) { key := opener.WriterKey() writer, loaded, err := writers.LoadOrNew(key, func() (Destructor, error) { w, err := opener.OpenWriter() return writerDestructor{w}, err }) if err != nil { return nil, false, err } logging.writerKeys = append(logging.writerKeys, key) return writer.(io.WriteCloser), !loaded, nil } // WriterOpener is a module that can open a log writer. // It can return a human-readable string representation // of itself so that operators can understand where // the logs are going. type WriterOpener interface { fmt.Stringer // WriterKey is a string that uniquely identifies this // writer configuration. It is not shown to humans. WriterKey() string // OpenWriter opens a log for writing. The writer // should be safe for concurrent use but need not // be synchronous. OpenWriter() (io.WriteCloser, error) } // IsWriterStandardStream returns true if the input is a // writer-opener to a standard stream (stdout, stderr). func IsWriterStandardStream(wo WriterOpener) bool { switch wo.(type) { case StdoutWriter, StderrWriter, *StdoutWriter, *StderrWriter: return true } return false } type writerDestructor struct { io.WriteCloser } func (wdest writerDestructor) Destruct() error { return wdest.Close() } // BaseLog contains the common logging parameters for logging. type BaseLog struct { // The module that writes out log entries for the sink. WriterRaw json.RawMessage `json:"writer,omitempty" caddy:"namespace=caddy.logging.writers inline_key=output"` // The encoder is how the log entries are formatted or encoded. EncoderRaw json.RawMessage `json:"encoder,omitempty" caddy:"namespace=caddy.logging.encoders inline_key=format"` // Tees entries through a zap.Core module which can extract // log entry metadata and fields for further processing. CoreRaw json.RawMessage `json:"core,omitempty" caddy:"namespace=caddy.logging.cores inline_key=module"` // Level is the minimum level to emit, and is inclusive. // Possible levels: DEBUG, INFO, WARN, ERROR, PANIC, and FATAL Level string `json:"level,omitempty"` // Sampling configures log entry sampling. If enabled, // only some log entries will be emitted. This is useful // for improving performance on extremely high-pressure // servers. Sampling *LogSampling `json:"sampling,omitempty"` // If true, the log entry will include the caller's // file name and line number. Default off. WithCaller bool `json:"with_caller,omitempty"` // If non-zero, and `with_caller` is true, this many // stack frames will be skipped when determining the // caller. Default 0. WithCallerSkip int `json:"with_caller_skip,omitempty"` // If not empty, the log entry will include a stack trace // for all logs at the given level or higher. See `level` // for possible values. Default off. WithStacktrace string `json:"with_stacktrace,omitempty"` writerOpener WriterOpener writer io.WriteCloser encoder zapcore.Encoder levelEnabler zapcore.LevelEnabler core zapcore.Core } func (cl *BaseLog) provisionCommon(ctx Context, logging *Logging) error { if cl.WriterRaw != nil { mod, err := ctx.LoadModule(cl, "WriterRaw") if err != nil { return fmt.Errorf("loading log writer module: %v", err) } cl.writerOpener = mod.(WriterOpener) } if cl.writerOpener == nil { cl.writerOpener = StderrWriter{} } var err error cl.writer, _, err = logging.openWriter(cl.writerOpener) if err != nil { return fmt.Errorf("opening log writer using %#v: %v", cl.writerOpener, err) } // set up the log level cl.levelEnabler, err = parseLevel(cl.Level) if err != nil { return err } if cl.EncoderRaw != nil { mod, err := ctx.LoadModule(cl, "EncoderRaw") if err != nil { return fmt.Errorf("loading log encoder module: %v", err) } cl.encoder = mod.(zapcore.Encoder) // if the encoder module needs the writer to determine // the correct default to use for a nested encoder, we // pass it down as a secondary provisioning step if cfd, ok := mod.(ConfiguresFormatterDefault); ok { if err := cfd.ConfigureDefaultFormat(cl.writerOpener); err != nil { return fmt.Errorf("configuring default format for encoder module: %v", err) } } } if cl.encoder == nil { cl.encoder = newDefaultProductionLogEncoder(cl.writerOpener) } cl.buildCore() if cl.CoreRaw != nil { mod, err := ctx.LoadModule(cl, "CoreRaw") if err != nil { return fmt.Errorf("loading log core module: %v", err) } core := mod.(zapcore.Core) cl.core = zapcore.NewTee(cl.core, core) } return nil } func (cl *BaseLog) buildCore() { // logs which only discard their output don't need // to perform encoding or any other processing steps // at all, so just shortcut to a nop core instead if _, ok := cl.writerOpener.(*DiscardWriter); ok { cl.core = zapcore.NewNopCore() return } c := zapcore.NewCore( cl.encoder, zapcore.AddSync(cl.writer), cl.levelEnabler, ) if cl.Sampling != nil { if cl.Sampling.Interval == 0 { cl.Sampling.Interval = 1 * time.Second } if cl.Sampling.First == 0 { cl.Sampling.First = 100 } if cl.Sampling.Thereafter == 0 { cl.Sampling.Thereafter = 100 } c = zapcore.NewSamplerWithOptions(c, cl.Sampling.Interval, cl.Sampling.First, cl.Sampling.Thereafter) } cl.core = c } func (cl *BaseLog) buildOptions() ([]zap.Option, error) { var options []zap.Option if cl.WithCaller { options = append(options, zap.AddCaller()) if cl.WithCallerSkip != 0 { options = append(options, zap.AddCallerSkip(cl.WithCallerSkip)) } } if cl.WithStacktrace != "" { levelEnabler, err := parseLevel(cl.WithStacktrace) if err != nil { return options, fmt.Errorf("setting up default Caddy log: %v", err) } options = append(options, zap.AddStacktrace(levelEnabler)) } return options, nil } // SinkLog configures the default Go standard library // global logger in the log package. This is necessary because // module dependencies which are not built specifically for // Caddy will use the standard logger. This is also known as // the "sink" logger. type SinkLog struct { BaseLog } func (sll *SinkLog) provision(ctx Context, logging *Logging) error { if err := sll.provisionCommon(ctx, logging); err != nil { return err } options, err := sll.buildOptions() if err != nil { return err } logger := zap.New(sll.core, options...) ctx.cleanupFuncs = append(ctx.cleanupFuncs, zap.RedirectStdLog(logger)) return nil } // CustomLog represents a custom logger configuration. // // By default, a log will emit all log entries. Some entries // will be skipped if sampling is enabled. Further, the Include // and Exclude parameters define which loggers (by name) are // allowed or rejected from emitting in this log. If both Include // and Exclude are populated, their values must be mutually // exclusive, and longer namespaces have priority. If neither // are populated, all logs are emitted. type CustomLog struct { BaseLog // Include defines the names of loggers to emit in this // log. For example, to include only logs emitted by the // admin API, you would include "admin.api". Include []string `json:"include,omitempty"` // Exclude defines the names of loggers that should be // skipped by this log. For example, to exclude only // HTTP access logs, you would exclude "http.log.access". Exclude []string `json:"exclude,omitempty"` } func (cl *CustomLog) provision(ctx Context, logging *Logging) error { if err := cl.provisionCommon(ctx, logging); err != nil { return err } // If both Include and Exclude lists are populated, then each item must // be a superspace or subspace of an item in the other list, because // populating both lists means that any given item is either a rule // or an exception to another rule. But if the item is not a super- // or sub-space of any item in the other list, it is neither a rule // nor an exception, and is a contradiction. Ensure, too, that the // sets do not intersect, which is also a contradiction. if len(cl.Include) > 0 && len(cl.Exclude) > 0 { // prevent intersections for _, allow := range cl.Include { if slices.Contains(cl.Exclude, allow) { return fmt.Errorf("include and exclude must not intersect, but found %s in both lists", allow) } } // ensure namespaces are nested outer: for _, allow := range cl.Include { for _, deny := range cl.Exclude { if strings.HasPrefix(allow+".", deny+".") || strings.HasPrefix(deny+".", allow+".") { continue outer } } return fmt.Errorf("when both include and exclude are populated, each element must be a superspace or subspace of one in the other list; check '%s' in include", allow) } } return nil } func (cl *CustomLog) matchesModule(moduleID string) bool { return cl.loggerAllowed(moduleID, true) } // loggerAllowed returns true if name is allowed to emit // to cl. isModule should be true if name is the name of // a module and you want to see if ANY of that module's // logs would be permitted. func (cl *CustomLog) loggerAllowed(name string, isModule bool) bool { // accept all loggers by default if len(cl.Include) == 0 && len(cl.Exclude) == 0 { return true } // append a dot so that partial names don't match // (i.e. we don't want "foo.b" to match "foo.bar"); we // will also have to append a dot when we do HasPrefix // below to compensate for when namespaces are equal if name != "" && name != "*" && name != "." { name += "." } var longestAccept, longestReject int if len(cl.Include) > 0 { for _, namespace := range cl.Include { var hasPrefix bool if isModule { hasPrefix = strings.HasPrefix(namespace+".", name) } else { hasPrefix = strings.HasPrefix(name, namespace+".") } if hasPrefix && len(namespace) > longestAccept { longestAccept = len(namespace) } } // the include list was populated, meaning that // a match in this list is absolutely required // if we are to accept the entry if longestAccept == 0 { return false } } if len(cl.Exclude) > 0 { for _, namespace := range cl.Exclude { // * == all logs emitted by modules // . == all logs emitted by core if (namespace == "*" && name != ".") || (namespace == "." && name == ".") { return false } if strings.HasPrefix(name, namespace+".") && len(namespace) > longestReject { longestReject = len(namespace) } } // the reject list is populated, so we have to // reject this entry if its match is better // than the best from the accept list if longestReject > longestAccept { return false } } return (longestAccept > longestReject) || (len(cl.Include) == 0 && longestReject == 0) } // filteringCore filters log entries based on logger name, // according to the rules of a CustomLog. type filteringCore struct { zapcore.Core cl *CustomLog } // With properly wraps With. func (fc *filteringCore) With(fields []zapcore.Field) zapcore.Core { return &filteringCore{ Core: fc.Core.With(fields), cl: fc.cl, } } // Check only allows the log entry if its logger name // is allowed from the include/exclude rules of fc.cl. func (fc *filteringCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { if fc.cl.loggerAllowed(e.LoggerName, false) { return fc.Core.Check(e, ce) } return ce } // LogSampling configures log entry sampling. type LogSampling struct { // The window over which to conduct sampling. Interval time.Duration `json:"interval,omitempty"` // Log this many entries within a given level and // message for each interval. First int `json:"first,omitempty"` // If more entries with the same level and message // are seen during the same interval, keep one in // this many entries until the end of the interval. Thereafter int `json:"thereafter,omitempty"` } type ( // StdoutWriter writes logs to standard out. StdoutWriter struct{} // StderrWriter writes logs to standard error. StderrWriter struct{} // DiscardWriter discards all writes. DiscardWriter struct{} ) // CaddyModule returns the Caddy module information. func (StdoutWriter) CaddyModule() ModuleInfo { return ModuleInfo{ ID: "caddy.logging.writers.stdout", New: func() Module { return new(StdoutWriter) }, } } // CaddyModule returns the Caddy module information. func (StderrWriter) CaddyModule() ModuleInfo { return ModuleInfo{ ID: "caddy.logging.writers.stderr", New: func() Module { return new(StderrWriter) }, } } // CaddyModule returns the Caddy module information. func (DiscardWriter) CaddyModule() ModuleInfo { return ModuleInfo{ ID: "caddy.logging.writers.discard", New: func() Module { return new(DiscardWriter) }, } } func (StdoutWriter) String() string { return "stdout" } func (StderrWriter) String() string { return "stderr" } func (DiscardWriter) String() string { return "discard" } // WriterKey returns a unique key representing stdout. func (StdoutWriter) WriterKey() string { return "std:out" } // WriterKey returns a unique key representing stderr. func (StderrWriter) WriterKey() string { return "std:err" } // WriterKey returns a unique key representing discard. func (DiscardWriter) WriterKey() string { return "discard" } // OpenWriter returns os.Stdout that can't be closed. func (StdoutWriter) OpenWriter() (io.WriteCloser, error) { return notClosable{os.Stdout}, nil } // OpenWriter returns os.Stderr that can't be closed. func (StderrWriter) OpenWriter() (io.WriteCloser, error) { return notClosable{os.Stderr}, nil } // OpenWriter returns io.Discard that can't be closed. func (DiscardWriter) OpenWriter() (io.WriteCloser, error) { return notClosable{io.Discard}, nil } // notClosable is an io.WriteCloser that can't be closed. type notClosable struct{ io.Writer } func (fc notClosable) Close() error { return nil } type defaultCustomLog struct { *CustomLog logger *zap.Logger } // newDefaultProductionLog configures a custom log that is // intended for use by default if no other log is specified // in a config. It writes to stderr, uses the console encoder, // and enables INFO-level logs and higher. func newDefaultProductionLog() (*defaultCustomLog, error) { cl := new(CustomLog) cl.writerOpener = StderrWriter{} var err error cl.writer, err = cl.writerOpener.OpenWriter() if err != nil { return nil, err } cl.encoder = newDefaultProductionLogEncoder(cl.writerOpener) cl.levelEnabler = zapcore.InfoLevel cl.buildCore() logger := zap.New(cl.core) // capture logs from other libraries which // may not be using zap logging directly _ = zap.RedirectStdLog(logger) return &defaultCustomLog{ CustomLog: cl, logger: logger, }, nil } func newDefaultProductionLogEncoder(wo WriterOpener) zapcore.Encoder { encCfg := zap.NewProductionEncoderConfig() if IsWriterStandardStream(wo) && term.IsTerminal(int(os.Stderr.Fd())) { // if interactive terminal, make output more human-readable by default encCfg.EncodeTime = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) { encoder.AppendString(ts.UTC().Format("2006/01/02 15:04:05.000")) } if coloringEnabled { encCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder } return zapcore.NewConsoleEncoder(encCfg) } return zapcore.NewJSONEncoder(encCfg) } func parseLevel(levelInput string) (zapcore.LevelEnabler, error) { repl := NewReplacer() level, err := repl.ReplaceOrErr(levelInput, true, true) if err != nil { return nil, fmt.Errorf("invalid log level: %v", err) } level = strings.ToLower(level) // set up the log level switch level { case "debug": return zapcore.DebugLevel, nil case "", "info": return zapcore.InfoLevel, nil case "warn": return zapcore.WarnLevel, nil case "error": return zapcore.ErrorLevel, nil case "panic": return zapcore.PanicLevel, nil case "fatal": return zapcore.FatalLevel, nil default: return nil, fmt.Errorf("unrecognized log level: %s", level) } } // Log returns the current default logger. func Log() *zap.Logger { defaultLoggerMu.RLock() defer defaultLoggerMu.RUnlock() return defaultLogger.logger } // BufferedLog sets the default logger to one that buffers // logs before a config is loaded. // Returns the buffered logger, the original default logger // (for flushing on errors), and the buffer core so that the // caller can flush the logs after the config is loaded or // fails to load. func BufferedLog() (*zap.Logger, *zap.Logger, *internal.LogBufferCore) { defaultLoggerMu.Lock() defer defaultLoggerMu.Unlock() origLogger := defaultLogger.logger bufferCore := internal.NewLogBufferCore(zap.InfoLevel) defaultLogger.logger = zap.New(bufferCore) return defaultLogger.logger, origLogger, bufferCore } var ( coloringEnabled = os.Getenv("NO_COLOR") == "" && os.Getenv("TERM") != "xterm-mono" defaultLogger, _ = newDefaultProductionLog() defaultLoggerMu sync.RWMutex ) var writers = NewUsagePool() // ConfiguresFormatterDefault is an optional interface that // encoder modules can implement to configure the default // format of their encoder. This is useful for encoders // which nest an encoder, that needs to know the writer // in order to determine the correct default. type ConfiguresFormatterDefault interface { ConfigureDefaultFormat(WriterOpener) error } const DefaultLoggerName = "default" // Interface guards var ( _ io.WriteCloser = (*notClosable)(nil) _ WriterOpener = (*StdoutWriter)(nil) _ WriterOpener = (*StderrWriter)(nil) ) ================================================ FILE: logging_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import "testing" func TestCustomLog_loggerAllowed(t *testing.T) { type fields struct { BaseLog BaseLog Include []string Exclude []string } type args struct { name string isModule bool } tests := []struct { name string fields fields args args want bool }{ { name: "include", fields: fields{ Include: []string{"foo"}, }, args: args{ name: "foo", isModule: true, }, want: true, }, { name: "exclude", fields: fields{ Exclude: []string{"foo"}, }, args: args{ name: "foo", isModule: true, }, want: false, }, { name: "include and exclude", fields: fields{ Include: []string{"foo"}, Exclude: []string{"foo"}, }, args: args{ name: "foo", isModule: true, }, want: false, }, { name: "include and exclude (longer namespace)", fields: fields{ Include: []string{"foo.bar"}, Exclude: []string{"foo"}, }, args: args{ name: "foo.bar", isModule: true, }, want: true, }, { name: "excluded module is not printed", fields: fields{ Include: []string{"admin.api.load"}, Exclude: []string{"admin.api"}, }, args: args{ name: "admin.api", isModule: false, }, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cl := &CustomLog{ BaseLog: tt.fields.BaseLog, Include: tt.fields.Include, Exclude: tt.fields.Exclude, } if got := cl.loggerAllowed(tt.args.name, tt.args.isModule); got != tt.want { t.Errorf("CustomLog.loggerAllowed() = %v, want %v", got, tt.want) } }) } } ================================================ FILE: metrics.go ================================================ package caddy import ( "net/http" "github.com/prometheus/client_golang/prometheus" "github.com/caddyserver/caddy/v2/internal/metrics" ) // define and register the metrics used in this package. func init() { const ns, sub = "caddy", "admin" adminMetrics.requestCount = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: ns, Subsystem: sub, Name: "http_requests_total", Help: "Counter of requests made to the Admin API's HTTP endpoints.", }, []string{"handler", "path", "code", "method"}) adminMetrics.requestErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: ns, Subsystem: sub, Name: "http_request_errors_total", Help: "Number of requests resulting in middleware errors.", }, []string{"handler", "path", "method"}) globalMetrics.configSuccess = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "caddy_config_last_reload_successful", Help: "Whether the last configuration reload attempt was successful.", }) globalMetrics.configSuccessTime = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "caddy_config_last_reload_success_timestamp_seconds", Help: "Timestamp of the last successful configuration reload.", }) } // adminMetrics is a collection of metrics that can be tracked for the admin API. var adminMetrics = struct { requestCount *prometheus.CounterVec requestErrors *prometheus.CounterVec }{} // globalMetrics is a collection of metrics that can be tracked for Caddy global state var globalMetrics = struct { configSuccess prometheus.Gauge configSuccessTime prometheus.Gauge }{} // Similar to promhttp.InstrumentHandlerCounter, but upper-cases method names // instead of lower-casing them. // // Unlike promhttp.InstrumentHandlerCounter, this assumes a "code" and "method" // label is present, and will panic otherwise. func instrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w) next.ServeHTTP(d, r) counter.With(prometheus.Labels{ "code": metrics.SanitizeCode(d.status), "method": metrics.SanitizeMethod(r.Method), }).Inc() }) } func newDelegator(w http.ResponseWriter) *delegator { return &delegator{ ResponseWriter: w, } } type delegator struct { http.ResponseWriter status int } func (d *delegator) WriteHeader(code int) { d.status = code d.ResponseWriter.WriteHeader(code) } // Unwrap returns the underlying ResponseWriter, necessary for // http.ResponseController to work correctly. func (d *delegator) Unwrap() http.ResponseWriter { return d.ResponseWriter } ================================================ FILE: modules/caddyevents/app.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyevents import ( "context" "encoding/json" "errors" "fmt" "strings" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(App{}) } // App implements a global eventing system within Caddy. // Modules can emit and subscribe to events, providing // hooks into deep parts of the code base that aren't // otherwise accessible. Events provide information about // what and when things are happening, and this facility // allows handlers to take action when events occur, // add information to the event's metadata, and even // control program flow in some cases. // // Events are propagated in a DOM-like fashion. An event // emitted from module `a.b.c` (the "origin") will first // invoke handlers listening to `a.b.c`, then `a.b`, // then `a`, then those listening regardless of origin. // If a handler returns the special error Aborted, then // propagation immediately stops and the event is marked // as aborted. Emitters may optionally choose to adjust // program flow based on an abort. // // Modules can subscribe to events by origin and/or name. // A handler is invoked only if it is subscribed to the // event by name and origin. Subscriptions should be // registered during the provisioning phase, before apps // are started. // // Event handlers are fired synchronously as part of the // regular flow of the program. This allows event handlers // to control the flow of the program if the origin permits // it and also allows handlers to convey new information // back into the origin module before it continues. // In essence, event handlers are similar to HTTP // middleware handlers. // // Event bindings/subscribers are unordered; i.e. // event handlers are invoked in an arbitrary order. // Event handlers should not rely on the logic of other // handlers to succeed. // // The entirety of this app module is EXPERIMENTAL and // subject to change. Pay attention to release notes. type App struct { // Subscriptions bind handlers to one or more events // either globally or scoped to specific modules or module // namespaces. Subscriptions []*Subscription `json:"subscriptions,omitempty"` // Map of event name to map of module ID/namespace to handlers subscriptions map[string]map[caddy.ModuleID][]Handler logger *zap.Logger started bool } // Subscription represents binding of one or more handlers to // one or more events. type Subscription struct { // The name(s) of the event(s) to bind to. Default: all events. Events []string `json:"events,omitempty"` // The ID or namespace of the module(s) from which events // originate to listen to for events. Default: all modules. // // Events propagate up, so events emitted by module "a.b.c" // will also trigger the event for "a.b" and "a". Thus, to // receive all events from "a.b.c" and "a.b.d", for example, // one can subscribe to either "a.b" or all of "a" entirely. Modules []caddy.ModuleID `json:"modules,omitempty"` // The event handler modules. These implement the actual // behavior to invoke when an event occurs. At least one // handler is required. HandlersRaw []json.RawMessage `json:"handlers,omitempty" caddy:"namespace=events.handlers inline_key=handler"` // The decoded handlers; Go code that is subscribing to // an event should set this field directly; HandlersRaw // is meant for JSON configuration to fill out this field. Handlers []Handler `json:"-"` } // CaddyModule returns the Caddy module information. func (App) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "events", New: func() caddy.Module { return new(App) }, } } // Provision sets up the app. func (app *App) Provision(ctx caddy.Context) error { app.logger = ctx.Logger() app.subscriptions = make(map[string]map[caddy.ModuleID][]Handler) for _, sub := range app.Subscriptions { if sub.HandlersRaw == nil { continue } handlersIface, err := ctx.LoadModule(sub, "HandlersRaw") if err != nil { return fmt.Errorf("loading event subscriber modules: %v", err) } for _, h := range handlersIface.([]any) { sub.Handlers = append(sub.Handlers, h.(Handler)) } if len(sub.Handlers) == 0 { // pointless to bind without any handlers return fmt.Errorf("no handlers defined") } } return nil } // Start runs the app. func (app *App) Start() error { for _, sub := range app.Subscriptions { if err := app.Subscribe(sub); err != nil { return err } } app.started = true return nil } // Stop gracefully shuts down the app. func (app *App) Stop() error { return nil } // Subscribe binds one or more event handlers to one or more events // according to the subscription s. For now, subscriptions can only // be created during the provision phase; new bindings cannot be // created after the events app has started. func (app *App) Subscribe(s *Subscription) error { if app.started { return fmt.Errorf("events already started; new subscriptions closed") } // handle special case of catch-alls (omission of event name or module space implies all) if len(s.Events) == 0 { s.Events = []string{""} } if len(s.Modules) == 0 { s.Modules = []caddy.ModuleID{""} } for _, eventName := range s.Events { if app.subscriptions[eventName] == nil { app.subscriptions[eventName] = make(map[caddy.ModuleID][]Handler) } for _, originModule := range s.Modules { app.subscriptions[eventName][originModule] = append(app.subscriptions[eventName][originModule], s.Handlers...) } } return nil } // On is syntactic sugar for Subscribe() that binds a single handler // to a single event from any module. If the eventName is empty string, // it counts for all events. func (app *App) On(eventName string, handler Handler) error { return app.Subscribe(&Subscription{ Events: []string{eventName}, Handlers: []Handler{handler}, }) } // Emit creates and dispatches an event named eventName to all relevant handlers with // the metadata data. Events are emitted and propagated synchronously. The returned Event // value will have any additional information from the invoked handlers. // // Note that the data map is not copied, for efficiency. After Emit() is called, the // data passed in should not be changed in other goroutines. func (app *App) Emit(ctx caddy.Context, eventName string, data map[string]any) caddy.Event { logger := app.logger.With(zap.String("name", eventName)) e, err := caddy.NewEvent(ctx, eventName, data) if err != nil { logger.Error("failed to create event", zap.Error(err)) } var originModule caddy.ModuleInfo var originModuleID caddy.ModuleID var originModuleName string if origin := e.Origin(); origin != nil { originModule = origin.CaddyModule() originModuleID = originModule.ID originModuleName = originModule.String() } logger = logger.With( zap.String("id", e.ID().String()), zap.String("origin", originModuleName)) // add event info to replacer, make sure it's in the context repl, ok := ctx.Context.Value(caddy.ReplacerCtxKey).(*caddy.Replacer) if !ok { repl = caddy.NewReplacer() ctx.Context = context.WithValue(ctx.Context, caddy.ReplacerCtxKey, repl) } repl.Map(func(key string) (any, bool) { switch key { case "event": return e, true case "event.id": return e.ID(), true case "event.name": return e.Name(), true case "event.time": return e.Timestamp(), true case "event.time_unix": return e.Timestamp().UnixMilli(), true case "event.module": return originModuleID, true case "event.data": return e.Data, true } if after, ok0 := strings.CutPrefix(key, "event.data."); ok0 { key = after if val, ok := e.Data[key]; ok { return val, true } } return nil, false }) logger = logger.WithLazy(zap.Any("data", e.Data)) logger.Debug("event") // invoke handlers bound to the event by name and also all events; this for loop // iterates twice at most: once for the event name, once for "" (all events) for { moduleID := originModuleID // implement propagation up the module tree (i.e. start with "a.b.c" then "a.b" then "a" then "") for { if app.subscriptions[eventName] == nil { break // shortcut if event not bound at all } for _, handler := range app.subscriptions[eventName][moduleID] { select { case <-ctx.Done(): logger.Error("context canceled; event handling stopped") return e default: } // this log can be a useful sanity check to ensure your handlers are in fact being invoked // (see https://github.com/mholt/caddy-events-exec/issues/6) logger.Debug("invoking subscribed handler", zap.String("subscribed_to", eventName), zap.Any("handler", handler)) if err := handler.Handle(ctx, e); err != nil { aborted := errors.Is(err, caddy.ErrEventAborted) logger.Error("handler error", zap.Error(err), zap.Bool("aborted", aborted)) if aborted { e.Aborted = err return e } } } if moduleID == "" { break } lastDot := strings.LastIndex(string(moduleID), ".") if lastDot < 0 { moduleID = "" // include handlers bound to events regardless of module } else { moduleID = moduleID[:lastDot] } } // include handlers listening to all events if eventName == "" { break } eventName = "" } return e } // Handler is a type that can handle events. type Handler interface { Handle(context.Context, caddy.Event) error } // Interface guards var ( _ caddy.App = (*App)(nil) _ caddy.Provisioner = (*App)(nil) ) ================================================ FILE: modules/caddyevents/eventsconfig/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package eventsconfig is for configuring caddyevents.App with the // Caddyfile. This code can't be in the caddyevents package because // the httpcaddyfile package imports caddyhttp, which imports // caddyevents: hence, it creates an import cycle. package eventsconfig import ( "encoding/json" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyevents" ) func init() { httpcaddyfile.RegisterGlobalOption("events", parseApp) } // parseApp configures the "events" global option from Caddyfile to set up the events app. // Syntax: // // events { // on // } // // If is *, then it will bind to all events. func parseApp(d *caddyfile.Dispenser, _ any) (any, error) { d.Next() // consume option name app := new(caddyevents.App) for d.NextBlock(0) { switch d.Val() { case "on": if !d.NextArg() { return nil, d.ArgErr() } eventName := d.Val() if eventName == "*" { eventName = "" } if !d.NextArg() { return nil, d.ArgErr() } handlerName := d.Val() modID := "events.handlers." + handlerName unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return nil, err } app.Subscriptions = append(app.Subscriptions, &caddyevents.Subscription{ Events: []string{eventName}, HandlersRaw: []json.RawMessage{ caddyconfig.JSONModuleObject(unm, "handler", handlerName, nil), }, }) default: return nil, d.ArgErr() } } return httpcaddyfile.App{ Name: "events", Value: caddyconfig.JSON(app, nil), }, nil } ================================================ FILE: modules/caddyfs/filesystem.go ================================================ package caddyfs import ( "encoding/json" "fmt" "io/fs" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" ) func init() { caddy.RegisterModule(Filesystems{}) httpcaddyfile.RegisterGlobalOption("filesystem", parseFilesystems) } type moduleEntry struct { Key string `json:"name,omitempty"` FileSystemRaw json.RawMessage `json:"file_system,omitempty" caddy:"namespace=caddy.fs inline_key=backend"` fileSystem fs.FS } // Filesystems loads caddy.fs modules into the global filesystem map type Filesystems struct { Filesystems []*moduleEntry `json:"filesystems"` defers []func() } func parseFilesystems(d *caddyfile.Dispenser, existingVal any) (any, error) { p := &Filesystems{} current, ok := existingVal.(*Filesystems) if ok { p = current } x := &moduleEntry{} err := x.UnmarshalCaddyfile(d) if err != nil { return nil, err } p.Filesystems = append(p.Filesystems, x) return p, nil } // CaddyModule returns the Caddy module information. func (Filesystems) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.filesystems", New: func() caddy.Module { return new(Filesystems) }, } } func (xs *Filesystems) Start() error { return nil } func (xs *Filesystems) Stop() error { return nil } func (xs *Filesystems) Provision(ctx caddy.Context) error { // load the filesystem module for _, f := range xs.Filesystems { if len(f.FileSystemRaw) > 0 { mod, err := ctx.LoadModule(f, "FileSystemRaw") if err != nil { return fmt.Errorf("loading file system module: %v", err) } f.fileSystem = mod.(fs.FS) } // register that module ctx.Logger().Debug("registering fs", zap.String("fs", f.Key)) ctx.FileSystems().Register(f.Key, f.fileSystem) // remember to unregister the module when we are done xs.defers = append(xs.defers, func() { ctx.Logger().Debug("unregistering fs", zap.String("fs", f.Key)) ctx.FileSystems().Unregister(f.Key) }) } return nil } func (f *Filesystems) Cleanup() error { for _, v := range f.defers { v() } return nil } func (f *moduleEntry) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { for d.Next() { // key required for now if !d.Args(&f.Key) { return d.ArgErr() } // get the module json if !d.NextArg() { return d.ArgErr() } name := d.Val() modID := "caddy.fs." + name unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return err } fsys, ok := unm.(fs.FS) if !ok { return d.Errf("module %s (%T) is not a supported file system implementation (requires fs.FS)", modID, unm) } f.FileSystemRaw = caddyconfig.JSONModuleObject(fsys, "backend", name, nil) } return nil } ================================================ FILE: modules/caddyhttp/app.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "cmp" "context" "crypto/tls" "errors" "fmt" "maps" "net" "net/http" "strconv" "sync" "time" "go.uber.org/zap" "golang.org/x/net/http2" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyevents" "github.com/caddyserver/caddy/v2/modules/caddytls" ) func init() { caddy.RegisterModule(App{}) } // App is a robust, production-ready HTTP server. // // HTTPS is enabled by default if host matchers with qualifying names are used // in any of routes; certificates are automatically provisioned and renewed. // Additionally, automatic HTTPS will also enable HTTPS for servers that listen // only on the HTTPS port but which do not have any TLS connection policies // defined by adding a good, default TLS connection policy. // // In HTTP routes, additional placeholders are available (replace any `*`): // // Placeholder | Description // ------------|--------------- // `{http.request.body}` | The request body (⚠️ inefficient; use only for debugging) // `{http.request.body_base64}` | The request body, base64-encoded (⚠️ for debugging) // `{http.request.cookie.*}` | HTTP request cookie // `{http.request.duration}` | Time up to now spent handling the request (after decoding headers from client) // `{http.request.duration_ms}` | Same as 'duration', but in milliseconds. // `{http.request.uuid}` | The request unique identifier // `{http.request.header.*}` | Specific request header field // `{http.request.host}` | The host part of the request's Host header // `{http.request.host.labels.*}` | Request host labels (0-based from right); e.g. for foo.example.com: 0=com, 1=example, 2=foo // `{http.request.hostport}` | The host and port from the request's Host header // `{http.request.method}` | The request method // `{http.request.orig_method}` | The request's original method // `{http.request.orig_uri}` | The request's original URI // `{http.request.orig_uri.path}` | The request's original path // `{http.request.orig_uri.path.*}` | Parts of the original path, split by `/` (0-based from left) // `{http.request.orig_uri.path.dir}` | The request's original directory // `{http.request.orig_uri.path.file}` | The request's original filename // `{http.request.orig_uri.query}` | The request's original query string (without `?`) // `{http.request.port}` | The port part of the request's Host header // `{http.request.proto}` | The protocol of the request // `{http.request.local.host}` | The host (IP) part of the local address the connection arrived on // `{http.request.local.port}` | The port part of the local address the connection arrived on // `{http.request.local}` | The local address the connection arrived on // `{http.request.remote.host}` | The host (IP) part of the remote client's address, if available (not known with HTTP/3 early data) // `{http.request.remote.port}` | The port part of the remote client's address // `{http.request.remote}` | The address of the remote client // `{http.request.scheme}` | The request scheme, typically `http` or `https` // `{http.request.tls.version}` | The TLS version name // `{http.request.tls.cipher_suite}` | The TLS cipher suite // `{http.request.tls.resumed}` | The TLS connection resumed a previous connection // `{http.request.tls.proto}` | The negotiated next protocol // `{http.request.tls.proto_mutual}` | The negotiated next protocol was advertised by the server // `{http.request.tls.server_name}` | The server name requested by the client, if any // `{http.request.tls.ech}` | Whether ECH was offered by the client and accepted by the server // `{http.request.tls.client.fingerprint}` | The SHA256 checksum of the client certificate // `{http.request.tls.client.public_key}` | The public key of the client certificate. // `{http.request.tls.client.public_key_sha256}` | The SHA256 checksum of the client's public key. // `{http.request.tls.client.certificate_pem}` | The PEM-encoded value of the certificate. // `{http.request.tls.client.certificate_der_base64}` | The base64-encoded value of the certificate. // `{http.request.tls.client.issuer}` | The issuer DN of the client certificate // `{http.request.tls.client.serial}` | The serial number of the client certificate // `{http.request.tls.client.subject}` | The subject DN of the client certificate // `{http.request.tls.client.san.dns_names.*}` | SAN DNS names(index optional) // `{http.request.tls.client.san.emails.*}` | SAN email addresses (index optional) // `{http.request.tls.client.san.ips.*}` | SAN IP addresses (index optional) // `{http.request.tls.client.san.uris.*}` | SAN URIs (index optional) // `{http.request.uri}` | The full request URI // `{http.request.uri.path}` | The path component of the request URI // `{http.request.uri.path.*}` | Parts of the path, split by `/` (0-based from left) // `{http.request.uri.path.dir}` | The directory, excluding leaf filename // `{http.request.uri.path.file}` | The filename of the path, excluding directory // `{http.request.uri.query}` | The query string (without `?`) // `{http.request.uri.query.*}` | Individual query string value // `{http.response.header.*}` | Specific response header field // `{http.vars.*}` | Custom variables in the HTTP handler chain // `{http.shutting_down}` | True if the HTTP app is shutting down // `{http.time_until_shutdown}` | Time until HTTP server shutdown, if scheduled type App struct { // HTTPPort specifies the port to use for HTTP (as opposed to HTTPS), // which is used when setting up HTTP->HTTPS redirects or ACME HTTP // challenge solvers. Default: 80. HTTPPort int `json:"http_port,omitempty"` // HTTPSPort specifies the port to use for HTTPS, which is used when // solving the ACME TLS-ALPN challenges, or whenever HTTPS is needed // but no specific port number is given. Default: 443. HTTPSPort int `json:"https_port,omitempty"` // GracePeriod is how long to wait for active connections when shutting // down the servers. During the grace period, no new connections are // accepted, idle connections are closed, and active connections will // be given the full length of time to become idle and close. // Once the grace period is over, connections will be forcefully closed. // If zero, the grace period is eternal. Default: 0. GracePeriod caddy.Duration `json:"grace_period,omitempty"` // ShutdownDelay is how long to wait before initiating the grace // period. When this app is stopping (e.g. during a config reload or // process exit), all servers will be shut down. Normally this immediately // initiates the grace period. However, if this delay is configured, servers // will not be shut down until the delay is over. During this time, servers // continue to function normally and allow new connections. At the end, the // grace period will begin. This can be useful to allow downstream load // balancers time to move this instance out of the rotation without hiccups. // // When shutdown has been scheduled, placeholders {http.shutting_down} (bool) // and {http.time_until_shutdown} (duration) may be useful for health checks. ShutdownDelay caddy.Duration `json:"shutdown_delay,omitempty"` // Servers is the list of servers, keyed by arbitrary names chosen // at your discretion for your own convenience; the keys do not // affect functionality. Servers map[string]*Server `json:"servers,omitempty"` // If set, metrics observations will be enabled. // This setting is EXPERIMENTAL and subject to change. Metrics *Metrics `json:"metrics,omitempty"` ctx caddy.Context logger *zap.Logger tlsApp *caddytls.TLS // stopped indicates whether the app has stopped // It can only happen if it has started successfully in the first place. // Otherwise, Cleanup will call Stop to clean up resources. stopped bool // used temporarily between phases 1 and 2 of auto HTTPS allCertDomains map[string]struct{} } // CaddyModule returns the Caddy module information. func (App) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http", New: func() caddy.Module { return new(App) }, } } // Provision sets up the app. func (app *App) Provision(ctx caddy.Context) error { // store some references app.logger = ctx.Logger() app.ctx = ctx // provision TLS and events apps tlsAppIface, err := ctx.App("tls") if err != nil { return fmt.Errorf("getting tls app: %v", err) } app.tlsApp = tlsAppIface.(*caddytls.TLS) eventsAppIface, err := ctx.App("events") if err != nil { return fmt.Errorf("getting events app: %v", err) } repl := caddy.NewReplacer() // this provisions the matchers for each route, // and prepares auto HTTP->HTTPS redirects, and // is required before we provision each server err = app.automaticHTTPSPhase1(ctx, repl) if err != nil { return err } if app.Metrics != nil { app.Metrics.init = sync.Once{} app.Metrics.httpMetrics = &httpMetrics{} // Scan config for allowed hosts to prevent cardinality explosion app.Metrics.scanConfigForHosts(app) } // prepare each server oldContext := ctx.Context for srvName, srv := range app.Servers { ctx.Context = context.WithValue(oldContext, ServerCtxKey, srv) srv.name = srvName srv.tlsApp = app.tlsApp srv.events = eventsAppIface.(*caddyevents.App) srv.ctx = ctx srv.logger = app.logger.Named("log") srv.errorLogger = app.logger.Named("log.error") srv.shutdownAtMu = new(sync.RWMutex) if srv.Metrics != nil { srv.logger.Warn("per-server 'metrics' is deprecated; use 'metrics' in the root 'http' app instead") app.Metrics = cmp.Or(app.Metrics, &Metrics{ init: sync.Once{}, httpMetrics: &httpMetrics{}, }) app.Metrics.PerHost = app.Metrics.PerHost || srv.Metrics.PerHost } // only enable access logs if configured if srv.Logs != nil { srv.accessLogger = app.logger.Named("log.access") if srv.Logs.Trace { srv.traceLogger = app.logger.Named("log.trace") } } // if no protocols configured explicitly, enable all except h2c if len(srv.Protocols) == 0 { srv.Protocols = []string{"h1", "h2", "h3"} } srvProtocolsUnique := map[string]struct{}{} for _, srvProtocol := range srv.Protocols { srvProtocolsUnique[srvProtocol] = struct{}{} } if srv.ListenProtocols != nil { if len(srv.ListenProtocols) != len(srv.Listen) { return fmt.Errorf("server %s: listener protocols count does not match address count: %d != %d", srvName, len(srv.ListenProtocols), len(srv.Listen)) } for i, lnProtocols := range srv.ListenProtocols { if lnProtocols != nil { // populate empty listen protocols with server protocols lnProtocolsDefault := false var lnProtocolsInclude []string srvProtocolsInclude := maps.Clone(srvProtocolsUnique) // keep existing listener protocols unless they are empty for _, lnProtocol := range lnProtocols { if lnProtocol == "" { lnProtocolsDefault = true } else { lnProtocolsInclude = append(lnProtocolsInclude, lnProtocol) delete(srvProtocolsInclude, lnProtocol) } } // append server protocols to listener protocols if any listener protocols were empty if lnProtocolsDefault { for _, srvProtocol := range srv.Protocols { if _, ok := srvProtocolsInclude[srvProtocol]; ok { lnProtocolsInclude = append(lnProtocolsInclude, srvProtocol) } } } srv.ListenProtocols[i] = lnProtocolsInclude } } } // if not explicitly configured by the user, disallow TLS // client auth bypass (domain fronting) which could // otherwise be exploited by sending an unprotected SNI // value during a TLS handshake, then putting a protected // domain in the Host header after establishing connection; // this is a safe default, but we allow users to override // it for example in the case of running a proxy where // domain fronting is desired and access is not restricted // based on hostname if srv.StrictSNIHost == nil && srv.hasTLSClientAuth() { app.logger.Warn("enabling strict SNI-Host enforcement because TLS client auth is configured", zap.String("server_id", srvName)) trueBool := true srv.StrictSNIHost = &trueBool } // set up the trusted proxies source for srv.TrustedProxiesRaw != nil { val, err := ctx.LoadModule(srv, "TrustedProxiesRaw") if err != nil { return fmt.Errorf("loading trusted proxies modules: %v", err) } srv.trustedProxies = val.(IPRangeSource) } // set the default client IP header to read from if srv.ClientIPHeaders == nil { srv.ClientIPHeaders = []string{"X-Forwarded-For"} } // process each listener address for i := range srv.Listen { lnOut, err := repl.ReplaceOrErr(srv.Listen[i], true, true) if err != nil { return fmt.Errorf("server %s, listener %d: %v", srvName, i, err) } srv.Listen[i] = lnOut } // set up each listener modifier if srv.ListenerWrappersRaw != nil { vals, err := ctx.LoadModule(srv, "ListenerWrappersRaw") if err != nil { return fmt.Errorf("loading listener wrapper modules: %v", err) } var hasTLSPlaceholder bool for i, val := range vals.([]any) { if _, ok := val.(*tlsPlaceholderWrapper); ok { if i == 0 { // putting the tls placeholder wrapper first is nonsensical because // that is the default, implicit setting: without it, all wrappers // will go after the TLS listener anyway return fmt.Errorf("it is unnecessary to specify the TLS listener wrapper in the first position because that is the default") } if hasTLSPlaceholder { return fmt.Errorf("TLS listener wrapper can only be specified once") } hasTLSPlaceholder = true } srv.listenerWrappers = append(srv.listenerWrappers, val.(caddy.ListenerWrapper)) } // if any wrappers were configured but the TLS placeholder wrapper is // absent, prepend it so all defined wrappers come after the TLS // handshake; this simplifies logic when starting the server, since we // can simply assume the TLS placeholder will always be there if !hasTLSPlaceholder && len(srv.listenerWrappers) > 0 { srv.listenerWrappers = append([]caddy.ListenerWrapper{new(tlsPlaceholderWrapper)}, srv.listenerWrappers...) } } // set up each packet conn modifier if srv.PacketConnWrappersRaw != nil { vals, err := ctx.LoadModule(srv, "PacketConnWrappersRaw") if err != nil { return fmt.Errorf("loading packet conn wrapper modules: %v", err) } // if any wrappers were configured, they come before the QUIC handshake; // unlike TLS above, there is no QUIC placeholder for _, val := range vals.([]any) { srv.packetConnWrappers = append(srv.packetConnWrappers, val.(caddy.PacketConnWrapper)) } } // pre-compile the primary handler chain, and be sure to wrap it in our // route handler so that important security checks are done, etc. primaryRoute := emptyHandler if srv.Routes != nil { err := srv.Routes.ProvisionHandlers(ctx, app.Metrics) if err != nil { return fmt.Errorf("server %s: setting up route handlers: %v", srvName, err) } primaryRoute = srv.Routes.Compile(emptyHandler) } srv.primaryHandlerChain = srv.wrapPrimaryRoute(primaryRoute) // pre-compile the error handler chain if srv.Errors != nil { err := srv.Errors.Routes.Provision(ctx) if err != nil { return fmt.Errorf("server %s: setting up error handling routes: %v", srvName, err) } srv.errorHandlerChain = srv.Errors.Routes.Compile(errorEmptyHandler) } // provision the named routes (they get compiled at runtime) for name, route := range srv.NamedRoutes { err := route.Provision(ctx, app.Metrics) if err != nil { return fmt.Errorf("server %s: setting up named route '%s' handlers: %v", name, srvName, err) } } // prepare the TLS connection policies err = srv.TLSConnPolicies.Provision(ctx) if err != nil { return fmt.Errorf("server %s: setting up TLS connection policies: %v", srvName, err) } // if there is no idle timeout, set a sane default; users have complained // before that aggressive CDNs leave connections open until the server // closes them, so if we don't close them it leads to resource exhaustion if srv.IdleTimeout == 0 { srv.IdleTimeout = defaultIdleTimeout } if srv.ReadHeaderTimeout == 0 { srv.ReadHeaderTimeout = defaultReadHeaderTimeout // see #6663 } } ctx.Context = oldContext return nil } // Validate ensures the app's configuration is valid. func (app *App) Validate() error { lnAddrs := make(map[string]string) for srvName, srv := range app.Servers { // each server must use distinct listener addresses for _, addr := range srv.Listen { listenAddr, err := caddy.ParseNetworkAddress(addr) if err != nil { return fmt.Errorf("invalid listener address '%s': %v", addr, err) } // check that every address in the port range is unique to this server; // we do not use <= here because PortRangeSize() adds 1 to EndPort for us for i := uint(0); i < listenAddr.PortRangeSize(); i++ { addr := caddy.JoinNetworkAddress(listenAddr.Network, listenAddr.Host, strconv.FormatUint(uint64(listenAddr.StartPort+i), 10)) if sn, ok := lnAddrs[addr]; ok { return fmt.Errorf("server %s: listener address repeated: %s (already claimed by server '%s')", srvName, addr, sn) } lnAddrs[addr] = srvName } } // logger names must not have ports if srv.Logs != nil { for host := range srv.Logs.LoggerNames { if _, _, err := net.SplitHostPort(host); err == nil { return fmt.Errorf("server %s: logger name must not have a port: %s", srvName, host) } } } } return nil } func removeTLSALPN(srv *Server, target string) { for _, cp := range srv.TLSConnPolicies { // the TLSConfig was already provisioned, so... manually remove it for i, np := range cp.TLSConfig.NextProtos { if np == target { cp.TLSConfig.NextProtos = append(cp.TLSConfig.NextProtos[:i], cp.TLSConfig.NextProtos[i+1:]...) break } } // remove it from the parent connection policy too, just to keep things tidy for i, alpn := range cp.ALPN { if alpn == target { cp.ALPN = append(cp.ALPN[:i], cp.ALPN[i+1:]...) break } } } } // Start runs the app. It finishes automatic HTTPS if enabled, // including management of certificates. func (app *App) Start() error { // get a logger compatible with http.Server serverLogger, err := zap.NewStdLogAt(app.logger.Named("stdlib"), zap.DebugLevel) if err != nil { return fmt.Errorf("failed to set up server logger: %v", err) } for srvName, srv := range app.Servers { srv.server = &http.Server{ ReadTimeout: time.Duration(srv.ReadTimeout), ReadHeaderTimeout: time.Duration(srv.ReadHeaderTimeout), WriteTimeout: time.Duration(srv.WriteTimeout), IdleTimeout: time.Duration(srv.IdleTimeout), MaxHeaderBytes: srv.MaxHeaderBytes, Handler: srv, ErrorLog: serverLogger, Protocols: new(http.Protocols), ConnContext: func(ctx context.Context, c net.Conn) context.Context { if nc, ok := c.(interface{ tlsNetConn() net.Conn }); ok { getTlsConStateFunc := sync.OnceValue(func() *tls.ConnectionState { tlsConnState := nc.tlsNetConn().(connectionStater).ConnectionState() return &tlsConnState }) ctx = context.WithValue(ctx, tlsConnectionStateFuncCtxKey, getTlsConStateFunc) } return ctx }, } // disable HTTP/2, which we enabled by default during provisioning if !srv.protocol("h2") { srv.server.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) removeTLSALPN(srv, "h2") } if !srv.protocol("h1") { removeTLSALPN(srv, "http/1.1") } // configure the http versions the server will serve if srv.protocol("h1") { srv.server.Protocols.SetHTTP1(true) } if srv.protocol("h2") || srv.protocol("h2c") { // skip setting h2 because if NextProtos is present, it's list of alpn versions will take precedence. // it will always be present because http2.ConfigureServer will populate that field // enabling h2c because some listener wrapper will wrap the connection that is no longer *tls.Conn // However, we need to handle the case that if the connection is h2c but h2c is not enabled. We identify // this type of connection by checking if it's behind a TLS listener wrapper or if it implements tls.ConnectionState. srv.server.Protocols.SetUnencryptedHTTP2(true) // when h2c is enabled but h2 disabled, we already removed h2 from NextProtos // the handshake will never succeed with h2 // http2.ConfigureServer will enable the server to handle both h2 and h2c h2server := new(http2.Server) //nolint:errcheck http2.ConfigureServer(srv.server, h2server) } // this TLS config is used by the std lib to choose the actual TLS config for connections // by looking through the connection policies to find the first one that matches tlsCfg := srv.TLSConnPolicies.TLSConfig(app.ctx) srv.configureServer(srv.server) for lnIndex, lnAddr := range srv.Listen { listenAddr, err := caddy.ParseNetworkAddress(lnAddr) if err != nil { return fmt.Errorf("%s: parsing listen address '%s': %v", srvName, lnAddr, err) } srv.addresses = append(srv.addresses, listenAddr) protocols := srv.Protocols if srv.ListenProtocols != nil && srv.ListenProtocols[lnIndex] != nil { protocols = srv.ListenProtocols[lnIndex] } protocolsUnique := map[string]struct{}{} for _, protocol := range protocols { protocolsUnique[protocol] = struct{}{} } _, h1ok := protocolsUnique["h1"] _, h2ok := protocolsUnique["h2"] _, h2cok := protocolsUnique["h2c"] _, h3ok := protocolsUnique["h3"] for portOffset := uint(0); portOffset < listenAddr.PortRangeSize(); portOffset++ { hostport := listenAddr.JoinHostPort(portOffset) // enable TLS if there is a policy and if this is not the HTTP port useTLS := len(srv.TLSConnPolicies) > 0 && int(listenAddr.StartPort+portOffset) != app.httpPort() if h1ok || h2ok && useTLS || h2cok { // create the listener for this socket lnAny, err := listenAddr.Listen(app.ctx, portOffset, net.ListenConfig{ KeepAliveConfig: net.KeepAliveConfig{ Enable: srv.KeepAliveInterval >= 0, Interval: time.Duration(srv.KeepAliveInterval), Idle: time.Duration(srv.KeepAliveIdle), Count: srv.KeepAliveCount, }, }) if err != nil { return fmt.Errorf("listening on %s: %v", listenAddr.At(portOffset), err) } ln, ok := lnAny.(net.Listener) if !ok { return fmt.Errorf("network '%s' cannot handle HTTP/1 or HTTP/2 connections", listenAddr.Network) } // wrap listener before TLS (up to the TLS placeholder wrapper) var lnWrapperIdx int for i, lnWrapper := range srv.listenerWrappers { if _, ok := lnWrapper.(*tlsPlaceholderWrapper); ok { lnWrapperIdx = i + 1 // mark the next wrapper's spot break } ln = lnWrapper.WrapListener(ln) } if useTLS { // create TLS listener - this enables and terminates TLS ln = tls.NewListener(ln, tlsCfg) } // finish wrapping listener where we left off before TLS for i := lnWrapperIdx; i < len(srv.listenerWrappers); i++ { ln = srv.listenerWrappers[i].WrapListener(ln) } // check if the connection is h2c ln = &http2Listener{ useTLS: useTLS, useH1: h1ok, useH2: h2ok || h2cok, Listener: ln, logger: app.logger, } // if binding to port 0, the OS chooses a port for us; // but the user won't know the port unless we print it if !listenAddr.IsUnixNetwork() && !listenAddr.IsFdNetwork() && listenAddr.StartPort == 0 && listenAddr.EndPort == 0 { app.logger.Info("port 0 listener", zap.String("input_address", lnAddr), zap.String("actual_address", ln.Addr().String())) } app.logger.Debug("starting server loop", zap.String("address", ln.Addr().String()), zap.Bool("tls", useTLS), zap.Bool("http3", srv.h3server != nil)) srv.listeners = append(srv.listeners, ln) //nolint:errcheck go srv.server.Serve(ln) } if h2ok && !useTLS { // Can only serve h2 with TLS enabled app.logger.Warn("HTTP/2 skipped because it requires TLS", zap.String("network", listenAddr.Network), zap.String("addr", hostport)) } if h3ok { // Can't serve HTTP/3 on the same socket as HTTP/1 and 2 because it uses // a different transport mechanism... which is fine, but the OS doesn't // differentiate between a SOCK_STREAM file and a SOCK_DGRAM file; they // are still one file on the system. So even though "unixpacket" and // "unixgram" are different network types just as "tcp" and "udp" are, // the OS will not let us use the same file as both STREAM and DGRAM. if listenAddr.IsUnixNetwork() { app.logger.Warn("HTTP/3 disabled because Unix can't multiplex STREAM and DGRAM on same socket", zap.String("file", hostport)) continue } if useTLS { // enable HTTP/3 if configured app.logger.Info("enabling HTTP/3 listener", zap.String("addr", hostport)) if err := srv.serveHTTP3(listenAddr.At(portOffset), tlsCfg); err != nil { return err } } else { // Can only serve h3 with TLS enabled app.logger.Warn("HTTP/3 skipped because it requires TLS", zap.String("network", listenAddr.Network), zap.String("addr", hostport)) } } } } srv.logger.Info("server running", zap.String("name", srvName), zap.Strings("protocols", srv.Protocols)) } // finish automatic HTTPS by finally beginning // certificate management err = app.automaticHTTPSPhase2() if err != nil { return fmt.Errorf("finalizing automatic HTTPS: %v", err) } return nil } // Stop gracefully shuts down the HTTP server. func (app *App) Stop() error { ctx := context.Background() // see if any listeners in our config will be closing or if they are continuing // through a reload; because if any are closing, we will enforce shutdown delay var delay bool scheduledTime := time.Now().Add(time.Duration(app.ShutdownDelay)) if app.ShutdownDelay > 0 { for _, server := range app.Servers { for _, na := range server.addresses { for _, addr := range na.Expand() { if caddy.ListenerUsage(addr.Network, addr.JoinHostPort(0)) < 2 { app.logger.Debug("listener closing and shutdown delay is configured", zap.String("address", addr.String())) server.shutdownAtMu.Lock() server.shutdownAt = scheduledTime server.shutdownAtMu.Unlock() delay = true } else { app.logger.Debug("shutdown delay configured but listener will remain open", zap.String("address", addr.String())) } } } } } // honor scheduled/delayed shutdown time if delay { app.logger.Info("shutdown scheduled", zap.Duration("delay_duration", time.Duration(app.ShutdownDelay)), zap.Time("time", scheduledTime)) time.Sleep(time.Duration(app.ShutdownDelay)) } // enforce grace period if configured if app.GracePeriod > 0 { var cancel context.CancelFunc timeout := time.Duration(app.GracePeriod) ctx, cancel = context.WithTimeoutCause(ctx, timeout, fmt.Errorf("server graceful shutdown %ds timeout", int(timeout.Seconds()))) defer cancel() app.logger.Info("servers shutting down; grace period initiated", zap.Duration("duration", timeout)) } else { app.logger.Info("servers shutting down with eternal grace period") } // goroutines aren't guaranteed to be scheduled right away, // so we'll use one WaitGroup to wait for all the goroutines // to start their server shutdowns, and another to wait for // them to finish; we'll always block for them to start so // that when we return the caller can be confident* that the // old servers are no longer accepting new connections // (* the scheduler might still pause them right before // calling Shutdown(), but it's unlikely) var startedShutdown, finishedShutdown sync.WaitGroup // these will run in goroutines stopServer := func(server *Server) { defer finishedShutdown.Done() startedShutdown.Done() // possible if server failed to Start if server.server == nil { return } if err := server.server.Shutdown(ctx); err != nil { if cause := context.Cause(ctx); cause != nil && errors.Is(err, context.DeadlineExceeded) { err = cause } app.logger.Error("server shutdown", zap.Error(err), zap.Strings("addresses", server.Listen)) } } stopH3Server := func(server *Server) { defer finishedShutdown.Done() startedShutdown.Done() if server.h3server == nil { return } // closing quic listeners won't affect accepted connections now // so like stdlib, close listeners first, but keep the net.PacketConns open for _, h3ln := range server.quicListeners { if err := h3ln.Close(); err != nil { app.logger.Error("http3 listener close", zap.Error(err)) } } if err := server.h3server.Shutdown(ctx); err != nil { if cause := context.Cause(ctx); cause != nil && errors.Is(err, context.DeadlineExceeded) { err = cause } app.logger.Error("HTTP/3 server shutdown", zap.Error(err), zap.Strings("addresses", server.Listen)) } // close the underlying net.PacketConns now // see the comment for ListenQUIC for _, h3ln := range server.quicListeners { if err := h3ln.Close(); err != nil { app.logger.Error("http3 listener close socket", zap.Error(err)) } } } for _, server := range app.Servers { startedShutdown.Add(2) finishedShutdown.Add(2) go stopServer(server) go stopH3Server(server) } // block until all the goroutines have been run by the scheduler; // this means that they have likely called Shutdown() by now startedShutdown.Wait() // if the process is exiting, we need to block here and wait // for the grace periods to complete, otherwise the process will // terminate before the servers are finished shutting down; but // we don't really need to wait for the grace period to finish // if the process isn't exiting (but note that frequent config // reloads with long grace periods for a sustained length of time // may deplete resources) if caddy.Exiting() { finishedShutdown.Wait() } // run stop callbacks now that the server shutdowns are complete for name, s := range app.Servers { for _, stopHook := range s.onStopFuncs { if err := stopHook(ctx); err != nil { app.logger.Error("server stop hook", zap.String("server", name), zap.Error(err)) } } } app.stopped = true return nil } // Cleanup will close remaining listeners if they still remain // because some of the servers fail to start. // It simply calls Stop because Stop won't be called when Start fails. func (app *App) Cleanup() error { if app.stopped { return nil } return app.Stop() } func (app *App) httpPort() int { if app.HTTPPort == 0 { return DefaultHTTPPort } return app.HTTPPort } func (app *App) httpsPort() int { if app.HTTPSPort == 0 { return DefaultHTTPSPort } return app.HTTPSPort } const ( // defaultIdleTimeout is the default HTTP server timeout // for closing idle connections; useful to avoid resource // exhaustion behind hungry CDNs, for example (we've had // several complaints without this). defaultIdleTimeout = caddy.Duration(5 * time.Minute) // defaultReadHeaderTimeout is the default timeout for // reading HTTP headers from clients. Headers are generally // small, often less than 1 KB, so it shouldn't take a // long time even on legitimately slow connections or // busy servers to read it. defaultReadHeaderTimeout = caddy.Duration(time.Minute) ) // Interface guards var ( _ caddy.App = (*App)(nil) _ caddy.Provisioner = (*App)(nil) _ caddy.Validator = (*App)(nil) ) ================================================ FILE: modules/caddyhttp/autohttps.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "fmt" "net/http" "slices" "strconv" "strings" "github.com/caddyserver/certmagic" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/internal" "github.com/caddyserver/caddy/v2/modules/caddytls" ) // AutoHTTPSConfig is used to disable automatic HTTPS // or certain aspects of it for a specific server. // HTTPS is enabled automatically and by default when // qualifying hostnames are available from the config. type AutoHTTPSConfig struct { // If true, automatic HTTPS will be entirely disabled, // including certificate management and redirects. Disabled bool `json:"disable,omitempty"` // If true, only automatic HTTP->HTTPS redirects will // be disabled, but other auto-HTTPS features will // remain enabled. DisableRedir bool `json:"disable_redirects,omitempty"` // If true, automatic certificate management will be // disabled, but other auto-HTTPS features will // remain enabled. DisableCerts bool `json:"disable_certificates,omitempty"` // Hosts/domain names listed here will not be included // in automatic HTTPS (they will not have certificates // loaded nor redirects applied). Skip []string `json:"skip,omitempty"` // Hosts/domain names listed here will still be enabled // for automatic HTTPS (unless in the Skip list), except // that certificates will not be provisioned and managed // for these names. SkipCerts []string `json:"skip_certificates,omitempty"` // By default, automatic HTTPS will obtain and renew // certificates for qualifying hostnames. However, if // a certificate with a matching SAN is already loaded // into the cache, certificate management will not be // enabled. To force automated certificate management // regardless of loaded certificates, set this to true. IgnoreLoadedCerts bool `json:"ignore_loaded_certificates,omitempty"` } // automaticHTTPSPhase1 provisions all route matchers, determines // which domain names found in the routes qualify for automatic // HTTPS, and sets up HTTP->HTTPS redirects. This phase must occur // at the beginning of provisioning, because it may add routes and // even servers to the app, which still need to be set up with the // rest of them during provisioning. func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) error { logger := app.logger.Named("auto_https") // this map acts as a set to store the domain names // for which we will manage certificates automatically uniqueDomainsForCerts := make(map[string]struct{}) // this maps domain names for automatic HTTP->HTTPS // redirects to their destination server addresses // (there might be more than 1 if bind is used; see // https://github.com/caddyserver/caddy/issues/3443) redirDomains := make(map[string][]caddy.NetworkAddress) // the log configuration for an HTTPS enabled server var logCfg *ServerLogConfig // Sort server names to ensure deterministic iteration. // This prevents race conditions where the order of server processing // could affect which server gets assigned the HTTP->HTTPS redirect listener. srvNames := make([]string, 0, len(app.Servers)) for name := range app.Servers { srvNames = append(srvNames, name) } slices.Sort(srvNames) for _, srvName := range srvNames { srv := app.Servers[srvName] // as a prerequisite, provision route matchers; this is // required for all routes on all servers, and must be // done before we attempt to do phase 1 of auto HTTPS, // since we have to access the decoded host matchers the // handlers will be provisioned later if srv.Routes != nil { err := srv.Routes.ProvisionMatchers(ctx) if err != nil { return fmt.Errorf("server %s: setting up route matchers: %v", srvName, err) } } // prepare for automatic HTTPS if srv.AutoHTTPS == nil { srv.AutoHTTPS = new(AutoHTTPSConfig) } if srv.AutoHTTPS.Disabled { logger.Info("automatic HTTPS is completely disabled for server", zap.String("server_name", srvName)) continue } // skip if all listeners use the HTTP port if !srv.listenersUseAnyPortOtherThan(app.httpPort()) { logger.Warn("server is listening only on the HTTP port, so no automatic HTTPS will be applied to this server", zap.String("server_name", srvName), zap.Int("http_port", app.httpPort()), ) srv.AutoHTTPS.Disabled = true continue } // if all listeners are on the HTTPS port, make sure // there is at least one TLS connection policy; it // should be obvious that they want to use TLS without // needing to specify one empty policy to enable it if srv.TLSConnPolicies == nil && !srv.listenersUseAnyPortOtherThan(app.httpsPort()) { logger.Info("server is listening only on the HTTPS port but has no TLS connection policies; adding one to enable TLS", zap.String("server_name", srvName), zap.Int("https_port", app.httpsPort()), ) srv.TLSConnPolicies = caddytls.ConnectionPolicies{new(caddytls.ConnectionPolicy)} } // find all qualifying domain names (deduplicated) in this server // (this is where we need the provisioned, decoded request matchers) serverDomainSet := make(map[string]struct{}) for routeIdx, route := range srv.Routes { for matcherSetIdx, matcherSet := range route.MatcherSets { for matcherIdx, m := range matcherSet { if hm, ok := m.(*MatchHost); ok { for hostMatcherIdx, d := range *hm { var err error d, err = repl.ReplaceOrErr(d, true, false) if err != nil { return fmt.Errorf("%s: route %d, matcher set %d, matcher %d, host matcher %d: %v", srvName, routeIdx, matcherSetIdx, matcherIdx, hostMatcherIdx, err) } if !slices.Contains(srv.AutoHTTPS.Skip, d) { serverDomainSet[d] = struct{}{} } } } } } } // build the list of domains that could be used with ECH (if enabled) // so the TLS app can know to publish ECH configs for them echDomains := make([]string, 0, len(serverDomainSet)) for d := range serverDomainSet { echDomains = append(echDomains, d) } app.tlsApp.RegisterServerNames(echDomains) // nothing more to do here if there are no domains that qualify for // automatic HTTPS and there are no explicit TLS connection policies: // if there is at least one domain but no TLS conn policy (F&&T), we'll // add one below; if there are no domains but at least one TLS conn // policy (meaning TLS is enabled) (T&&F), it could be a catch-all with // on-demand TLS -- and in that case we would still need HTTP->HTTPS // redirects, which we set up below; hence these two conditions if len(serverDomainSet) == 0 && len(srv.TLSConnPolicies) == 0 { continue } // clone the logger so we can apply it to the HTTP server // (not sure if necessary to clone it; but probably safer) // (we choose one log cfg arbitrarily; not sure which is best) if srv.Logs != nil { logCfg = srv.Logs.clone() } // for all the hostnames we found, filter them so we have // a deduplicated list of names for which to obtain certs // (only if cert management not disabled for this server) if srv.AutoHTTPS.DisableCerts { logger.Warn("skipping automated certificate management for server because it is disabled", zap.String("server_name", srvName)) } else { for d := range serverDomainSet { if certmagic.SubjectQualifiesForCert(d) && !slices.Contains(srv.AutoHTTPS.SkipCerts, d) { // if a certificate for this name is already loaded, // don't obtain another one for it, unless we are // supposed to ignore loaded certificates if !srv.AutoHTTPS.IgnoreLoadedCerts && app.tlsApp.HasCertificateForSubject(d) { logger.Info("skipping automatic certificate management because one or more matching certificates are already loaded", zap.String("domain", d), zap.String("server_name", srvName), ) continue } // most clients don't accept wildcards like *.tld... we // can handle that, but as a courtesy, warn the user if strings.Contains(d, "*") && strings.Count(strings.Trim(d, "."), ".") == 1 { logger.Warn("most clients do not trust second-level wildcard certificates (*.tld)", zap.String("domain", d)) } uniqueDomainsForCerts[d] = struct{}{} } } } // tell the server to use TLS if it is not already doing so if srv.TLSConnPolicies == nil { srv.TLSConnPolicies = caddytls.ConnectionPolicies{new(caddytls.ConnectionPolicy)} } // nothing left to do if auto redirects are disabled if srv.AutoHTTPS.DisableRedir { logger.Info("automatic HTTP->HTTPS redirects are disabled", zap.String("server_name", srvName)) continue } logger.Info("enabling automatic HTTP->HTTPS redirects", zap.String("server_name", srvName)) // create HTTP->HTTPS redirects for _, listenAddr := range srv.Listen { // figure out the address we will redirect to... addr, err := caddy.ParseNetworkAddress(listenAddr) if err != nil { msg := "%s: invalid listener address: %v" if strings.Count(listenAddr, ":") > 1 { msg = msg + ", there are too many colons, so the port is ambiguous. Did you mean to wrap the IPv6 address with [] brackets?" } return fmt.Errorf(msg, srvName, listenAddr) } // this address might not have a hostname, i.e. might be a // catch-all address for a particular port; we need to keep // track if it is, so we can set up redirects for it anyway // (e.g. the user might have enabled on-demand TLS); we use // an empty string to indicate a catch-all, which we have to // treat special later if len(serverDomainSet) == 0 { redirDomains[""] = append(redirDomains[""], addr) continue } // ...and associate it with each domain in this server for d := range serverDomainSet { // if this domain is used on more than one HTTPS-enabled // port, we'll have to choose one, so prefer the HTTPS port if _, ok := redirDomains[d]; !ok || addr.StartPort == uint(app.httpsPort()) { redirDomains[d] = append(redirDomains[d], addr) } } } } // if all servers have auto_https disabled and no domains need certs, // skip the rest of the TLS automation setup to avoid creating // unnecessary PKI infrastructure and automation policies allServersDisabled := true for _, srv := range app.Servers { if srv.AutoHTTPS == nil || !srv.AutoHTTPS.Disabled { allServersDisabled = false break } } if allServersDisabled && len(uniqueDomainsForCerts) == 0 { logger.Debug("all servers have automatic HTTPS disabled and no domains need certificates, skipping TLS automation setup") return nil } // we now have a list of all the unique names for which we need certs var internal, tailscale []string uniqueDomainsLoop: for d := range uniqueDomainsForCerts { // some names we've found might already have automation policies // explicitly specified for them; we should exclude those from // our hidden/implicit policy, since applying a name to more than // one automation policy would be confusing and an error if app.tlsApp.Automation != nil { for _, ap := range app.tlsApp.Automation.Policies { for _, apHost := range ap.Subjects() { if apHost == d { // if the automation policy has all internal subjects but no issuers, // it will default to CertMagic's issuers which are public CAs; use // our internal issuer instead if len(ap.Issuers) == 0 && ap.AllInternalSubjects() { iss := new(caddytls.InternalIssuer) if err := iss.Provision(ctx); err != nil { return err } ap.Issuers = append(ap.Issuers, iss) } continue uniqueDomainsLoop } } } } // if no automation policy exists for the name yet, we will associate it with an implicit one; // we handle tailscale domains specially, and we also separate out identifiers that need the // internal issuer (self-signed certs); certmagic does not consider public IP addresses to be // disqualified for public certs, because there are public CAs that will issue certs for IPs. // However, with auto-HTTPS, many times there is no issuer explicitly defined, and the default // issuers do not (currently, as of 2024) issue IP certificates; so assign all IP subjects to // the internal issuer when there are no explicit automation policies shouldUseInternal := func(ident string) bool { usingDefaultIssuersAndIsIP := certmagic.SubjectIsIP(ident) && (app.tlsApp == nil || app.tlsApp.Automation == nil || len(app.tlsApp.Automation.Policies) == 0) return !certmagic.SubjectQualifiesForPublicCert(d) || usingDefaultIssuersAndIsIP } if isTailscaleDomain(d) { tailscale = append(tailscale, d) delete(uniqueDomainsForCerts, d) // not managed by us; handled separately } else if shouldUseInternal(d) { internal = append(internal, d) } } // ensure there is an automation policy to handle these certs err := app.createAutomationPolicies(ctx, internal, tailscale) if err != nil { return err } // we need to reduce the mapping, i.e. group domains by address // since new routes are appended to servers by their address domainsByAddr := make(map[string][]string) for domain, addrs := range redirDomains { for _, addr := range addrs { addrStr := addr.String() domainsByAddr[addrStr] = append(domainsByAddr[addrStr], domain) } } // these keep track of the redirect server address(es) // and the routes for those servers which actually // respond with the redirects redirServerAddrs := make(map[string]struct{}) redirServers := make(map[string][]Route) var redirRoutes RouteList for addrStr, domains := range domainsByAddr { // build the matcher set for this redirect route; (note that we happen // to bypass Provision and Validate steps for these matcher modules) matcherSet := MatcherSet{MatchProtocol("http")} // match on known domain names, unless it's our special case of a // catch-all which is an empty string (common among catch-all sites // that enable on-demand TLS for yet-unknown domain names) if len(domains) != 1 || domains[0] != "" { matcherSet = append(matcherSet, MatchHost(domains)) } addr, err := caddy.ParseNetworkAddress(addrStr) if err != nil { return err } redirRoute := app.makeRedirRoute(addr.StartPort, matcherSet) // use the network/host information from the address, // but change the port to the HTTP port then rebuild redirAddr := addr redirAddr.StartPort = uint(app.httpPort()) redirAddr.EndPort = redirAddr.StartPort redirAddrStr := redirAddr.String() redirServers[redirAddrStr] = append(redirServers[redirAddrStr], redirRoute) } // on-demand TLS means that hostnames may be used which are not // explicitly defined in the config, and we still need to redirect // those; so we can append a single catch-all route (notice there // is no Host matcher) after the other redirect routes which will // allow us to handle unexpected/new hostnames... however, it's // not entirely clear what the redirect destination should be, // so I'm going to just hard-code the app's HTTPS port and call // it good for now... // TODO: This implies that all plaintext requests will be blindly // redirected to their HTTPS equivalent, even if this server // doesn't handle that hostname at all; I don't think this is a // bad thing, and it also obscures the actual hostnames that this // server is configured to match on, which may be desirable, but // it's not something that should be relied on. We can change this // if we want to. appendCatchAll := func(routes []Route) []Route { return append(routes, app.makeRedirRoute(uint(app.httpsPort()), MatcherSet{MatchProtocol("http")})) } // Sort redirect addresses to ensure deterministic process redirServerAddrsSorted := make([]string, 0, len(redirServers)) for addr := range redirServers { redirServerAddrsSorted = append(redirServerAddrsSorted, addr) } slices.Sort(redirServerAddrsSorted) redirServersLoop: for _, redirServerAddr := range redirServerAddrsSorted { routes := redirServers[redirServerAddr] // for each redirect listener, see if there's already a // server configured to listen on that exact address; if so, // insert the redirect route to the end of its route list // after any other routes with host matchers; otherwise, // we'll create a new server for all the listener addresses // that are unused and serve the remaining redirects from it // Sort redirect routes by host specificity to ensure exact matches // take precedence over wildcards, preventing ambiguous routing. slices.SortFunc(routes, func(a, b Route) int { hostA := getFirstHostFromRoute(a) hostB := getFirstHostFromRoute(b) // Catch-all routes (empty host) have the lowest priority if hostA == "" && hostB != "" { return 1 } if hostB == "" && hostA != "" { return -1 } hasWildcardA := strings.Contains(hostA, "*") hasWildcardB := strings.Contains(hostB, "*") // Exact domains take precedence over wildcards if !hasWildcardA && hasWildcardB { return -1 } if hasWildcardA && !hasWildcardB { return 1 } // If both are exact or both are wildcards, the longer one is more specific if len(hostA) != len(hostB) { return len(hostB) - len(hostA) } // Tie-breaker: alphabetical order to ensure determinism return strings.Compare(hostA, hostB) }) // Use the sorted srvNames to consistently find the target server for _, srvName := range srvNames { srv := app.Servers[srvName] // only look at servers which listen on an address which // we want to add redirects to if !srv.hasListenerAddress(redirServerAddr) { continue } // find the index of the route after the last route with a host // matcher, then insert the redirects there, but before any // user-defined catch-all routes // see https://github.com/caddyserver/caddy/issues/3212 insertIndex := srv.findLastRouteWithHostMatcher() // add the redirects at the insert index, except for when // we have a catch-all for HTTPS, in which case the user's // defined catch-all should take precedence. See #4829 if len(uniqueDomainsForCerts) != 0 { srv.Routes = append(srv.Routes[:insertIndex], append(routes, srv.Routes[insertIndex:]...)...) } // append our catch-all route in case the user didn't define their own srv.Routes = appendCatchAll(srv.Routes) continue redirServersLoop } // no server with this listener address exists; // save this address and route for custom server redirServerAddrs[redirServerAddr] = struct{}{} redirRoutes = append(redirRoutes, routes...) } // if there are routes remaining which do not belong // in any existing server, make our own to serve the // rest of the redirects if len(redirServerAddrs) > 0 { redirServerAddrsList := make([]string, 0, len(redirServerAddrs)) for a := range redirServerAddrs { redirServerAddrsList = append(redirServerAddrsList, a) } app.Servers["remaining_auto_https_redirects"] = &Server{ Listen: redirServerAddrsList, Routes: appendCatchAll(redirRoutes), Logs: logCfg, } } // persist the domains/IPs we're managing certs for through provisioning/startup app.allCertDomains = uniqueDomainsForCerts logger.Debug("adjusted config", zap.Reflect("tls", app.tlsApp), zap.Reflect("http", app)) return nil } func (app *App) makeRedirRoute(redirToPort uint, matcherSet MatcherSet) Route { redirTo := "https://{http.request.host}" // since this is an external redirect, we should only append an explicit // port if we know it is not the officially standardized HTTPS port, and, // notably, also not the port that Caddy thinks is the HTTPS port (the // configurable HTTPSPort parameter) - we can't change the standard HTTPS // port externally, so that config parameter is for internal use only; // we also do not append the port if it happens to be the HTTP port as // well, obviously (for example, user defines the HTTP port explicitly // in the list of listen addresses for a server) if redirToPort != uint(app.httpPort()) && redirToPort != uint(app.httpsPort()) && redirToPort != DefaultHTTPPort && redirToPort != DefaultHTTPSPort { redirTo += ":" + strconv.Itoa(int(redirToPort)) } redirTo += "{http.request.uri}" return Route{ MatcherSets: []MatcherSet{matcherSet}, Handlers: []MiddlewareHandler{ StaticResponse{ StatusCode: WeakString(strconv.Itoa(http.StatusPermanentRedirect)), Headers: http.Header{ "Location": []string{redirTo}, }, Close: true, }, }, } } // createAutomationPolicies ensures that automated certificates for this // app are managed properly. This adds up to two automation policies: // one for the public names, and one for the internal names. If a catch-all // automation policy exists, it will be shallow-copied and used as the // base for the new ones (this is important for preserving behavior the // user intends to be "defaults"). func (app *App) createAutomationPolicies(ctx caddy.Context, internalNames, tailscaleNames []string) error { // before we begin, loop through the existing automation policies // and, for any ACMEIssuers we find, make sure they're filled in // with default values that might be specified in our HTTP app; also // look for a base (or "catch-all" / default) automation policy, // which we're going to essentially require, to make sure it has // those defaults, too var basePolicy *caddytls.AutomationPolicy var foundBasePolicy bool if app.tlsApp.Automation == nil { // we will expect this to not be nil from now on app.tlsApp.Automation = new(caddytls.AutomationConfig) } for _, ap := range app.tlsApp.Automation.Policies { // on-demand policies can have the tailscale manager added implicitly // if there's no explicit manager configured -- for convenience if ap.OnDemand && len(ap.Managers) == 0 { var ts caddytls.Tailscale if err := ts.Provision(ctx); err != nil { return err } ap.Managers = []certmagic.Manager{ts} // must reprovision the automation policy so that the underlying // CertMagic config knows about the updated Managers if err := ap.Provision(app.tlsApp); err != nil { return fmt.Errorf("re-provisioning automation policy: %v", err) } } // set up default issuer -- honestly, this is only // really necessary because the HTTP app is opinionated // and has settings which could be inferred as new // defaults for the ACMEIssuer in the TLS app (such as // what the HTTP and HTTPS ports are) if ap.Issuers == nil { var err error ap.Issuers, err = caddytls.DefaultIssuersProvisioned(ctx) if err != nil { return err } } for _, iss := range ap.Issuers { if acmeIssuer, ok := iss.(acmeCapable); ok { err := app.fillInACMEIssuer(acmeIssuer.GetACMEIssuer()) if err != nil { return err } } } // while we're here, is this the catch-all/base policy? if !foundBasePolicy && len(ap.SubjectsRaw) == 0 { basePolicy = ap foundBasePolicy = true } } // Ensure automation policies' CertMagic configs are rebuilt when // ACME issuer templates may have been modified above (for example, // alternate ports filled in by the HTTP app). If a policy is already // provisioned, perform a lightweight rebuild of the CertMagic config // so issuers receive SetConfig with the updated templates; otherwise // run a normal Provision to initialize the policy. for i, ap := range app.tlsApp.Automation.Policies { // If the policy is already provisioned, rebuild only the CertMagic // config so issuers get SetConfig with updated templates. Otherwise // provision the policy normally (which may load modules). if ap.IsProvisioned() { if err := ap.RebuildCertMagic(app.tlsApp); err != nil { return fmt.Errorf("rebuilding certmagic config for automation policy %d: %v", i, err) } } else { if err := ap.Provision(app.tlsApp); err != nil { return fmt.Errorf("provisioning automation policy %d after auto-HTTPS defaults: %v", i, err) } } } if basePolicy == nil { // no base policy found; we will make one basePolicy = new(caddytls.AutomationPolicy) } // if the basePolicy has an existing ACMEIssuer (particularly to // include any type that embeds/wraps an ACMEIssuer), let's use it // (I guess we just use the first one?), otherwise we'll make one var baseACMEIssuer *caddytls.ACMEIssuer for _, iss := range basePolicy.Issuers { if acmeWrapper, ok := iss.(acmeCapable); ok { baseACMEIssuer = acmeWrapper.GetACMEIssuer() break } } if baseACMEIssuer == nil { // note that this happens if basePolicy.Issuers is empty // OR if it is not empty but does not have not an ACMEIssuer baseACMEIssuer = new(caddytls.ACMEIssuer) } // if there was a base policy to begin with, we already // filled in its issuer's defaults; if there wasn't, we // still need to do that if !foundBasePolicy { err := app.fillInACMEIssuer(baseACMEIssuer) if err != nil { return err } } // never overwrite any other issuer that might already be configured if basePolicy.Issuers == nil { var err error basePolicy.Issuers, err = caddytls.DefaultIssuersProvisioned(ctx) if err != nil { return err } for _, iss := range basePolicy.Issuers { if acmeIssuer, ok := iss.(acmeCapable); ok { err := app.fillInACMEIssuer(acmeIssuer.GetACMEIssuer()) if err != nil { return err } } } } if !foundBasePolicy { // there was no base policy to begin with, so add // our base/catch-all policy - this will serve the // public-looking names as well as any other names // that don't match any other policy err := app.tlsApp.AddAutomationPolicy(basePolicy) if err != nil { return err } } else { // a base policy already existed; we might have // changed it, so re-provision it err := basePolicy.Provision(app.tlsApp) if err != nil { return err } } // public names will be taken care of by the base (catch-all) // policy, which we've ensured exists if not already specified; // internal names, however, need to be handled by an internal // issuer, which we need to make a new policy for, scoped to // just those names (yes, this logic is a bit asymmetric, but // it works, because our assumed/natural default issuer is an // ACME issuer) if len(internalNames) > 0 { internalIssuer := new(caddytls.InternalIssuer) // shallow-copy the base policy; we want to inherit // from it, not replace it... this takes two lines to // overrule compiler optimizations policyCopy := *basePolicy newPolicy := &policyCopy // very important to provision the issuer, since we // are bypassing the JSON-unmarshaling step if err := internalIssuer.Provision(ctx); err != nil { return err } // this policy should apply only to the given names // and should use our issuer -- yes, this overrides // any issuer that may have been set in the base // policy, but we do this because these names do not // already have a policy associated with them, which // is easy to do; consider the case of a Caddyfile // that has only "localhost" as a name, but sets the // default/global ACME CA to the Let's Encrypt staging // endpoint... they probably don't intend to change the // fundamental set of names that setting applies to, // rather they just want to change the CA for the set // of names that would normally use the production API; // anyway, that gets into the weeds a bit... newPolicy.SubjectsRaw = internalNames newPolicy.Issuers = []certmagic.Issuer{internalIssuer} err := app.tlsApp.AddAutomationPolicy(newPolicy) if err != nil { return err } } // tailscale names go in their own automation policies because // they require on-demand TLS to be enabled, which we obviously // can't enable for everything if len(tailscaleNames) > 0 { policyCopy := *basePolicy newPolicy := &policyCopy var ts caddytls.Tailscale if err := ts.Provision(ctx); err != nil { return err } newPolicy.SubjectsRaw = tailscaleNames newPolicy.Issuers = nil newPolicy.Managers = append(newPolicy.Managers, ts) err := app.tlsApp.AddAutomationPolicy(newPolicy) if err != nil { return err } } // we just changed a lot of stuff, so double-check that it's all good err := app.tlsApp.Validate() if err != nil { return err } return nil } // fillInACMEIssuer fills in default values into acmeIssuer that // are defined in app; these values at time of writing are just // app.HTTPPort and app.HTTPSPort, which are used by ACMEIssuer. // Sure, we could just use the global/CertMagic defaults, but if // a user has configured those ports in the HTTP app, it makes // sense to use them in the TLS app too, even if they forgot (or // were too lazy, like me) to set it in each automation policy // that uses it -- this just makes things a little less tedious // for the user, so they don't have to repeat those ports in // potentially many places. This function never steps on existing // config values. If any changes are made, acmeIssuer is // reprovisioned. acmeIssuer must not be nil. func (app *App) fillInACMEIssuer(acmeIssuer *caddytls.ACMEIssuer) error { if app.HTTPPort > 0 || app.HTTPSPort > 0 { if acmeIssuer.Challenges == nil { acmeIssuer.Challenges = new(caddytls.ChallengesConfig) } } if app.HTTPPort > 0 { if acmeIssuer.Challenges.HTTP == nil { acmeIssuer.Challenges.HTTP = new(caddytls.HTTPChallengeConfig) } // don't overwrite existing explicit config if acmeIssuer.Challenges.HTTP.AlternatePort == 0 { acmeIssuer.Challenges.HTTP.AlternatePort = app.HTTPPort } } if app.HTTPSPort > 0 { if acmeIssuer.Challenges.TLSALPN == nil { acmeIssuer.Challenges.TLSALPN = new(caddytls.TLSALPNChallengeConfig) } // don't overwrite existing explicit config if acmeIssuer.Challenges.TLSALPN.AlternatePort == 0 { acmeIssuer.Challenges.TLSALPN.AlternatePort = app.HTTPSPort } } // we must provision all ACME issuers, even if nothing // was changed, because we don't know if they are new // and haven't been provisioned yet; if an ACME issuer // never gets provisioned, its Agree field stays false, // which leads to, um, problems later on return acmeIssuer.Provision(app.ctx) } // automaticHTTPSPhase2 begins certificate management for // all names in the qualifying domain set for each server. // This phase must occur after provisioning and at the end // of app start, after all the servers have been started. // Doing this last ensures that there won't be any race // for listeners on the HTTP or HTTPS ports when management // is async (if CertMagic's solvers bind to those ports // first, then our servers would fail to bind to them, // which would be bad, since CertMagic's bindings are // temporary and don't serve the user's sites!). func (app *App) automaticHTTPSPhase2() error { if len(app.allCertDomains) == 0 { return nil } app.logger.Info("enabling automatic TLS certificate management", zap.Strings("domains", internal.MaxSizeSubjectsListForLog(app.allCertDomains, 1000)), ) err := app.tlsApp.Manage(app.allCertDomains) if err != nil { return fmt.Errorf("managing certificates for %d domains: %s", len(app.allCertDomains), err) } app.allCertDomains = nil // no longer needed; allow GC to deallocate return nil } func isTailscaleDomain(name string) bool { return strings.HasSuffix(strings.ToLower(name), ".ts.net") } type acmeCapable interface{ GetACMEIssuer() *caddytls.ACMEIssuer } // getFirstHostFromRoute traverses a route's matchers to find the Host rule. // Since we are dealing with internally generated redirect routes, the host // is typically the first string within the MatchHost. func getFirstHostFromRoute(r Route) string { for _, matcherSet := range r.MatcherSets { for _, m := range matcherSet { // Check if the matcher is of type MatchHost (value or pointer) switch hm := m.(type) { case MatchHost: if len(hm) > 0 { return hm[0] } case *MatchHost: if len(*hm) > 0 { return (*hm)[0] } } } } // Return an empty string if it's a catch-all route (no specific host) return "" } ================================================ FILE: modules/caddyhttp/caddyauth/argon2id.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyauth import ( "crypto/rand" "crypto/subtle" "encoding/base64" "fmt" "strconv" "strings" "golang.org/x/crypto/argon2" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(Argon2idHash{}) } const ( argon2idName = "argon2id" defaultArgon2idTime = 1 defaultArgon2idMemory = 46 * 1024 defaultArgon2idThreads = 1 defaultArgon2idKeylen = 32 defaultSaltLength = 16 ) // Argon2idHash implements the Argon2id password hashing. type Argon2idHash struct { salt []byte time uint32 memory uint32 threads uint8 keyLen uint32 } // CaddyModule returns the Caddy module information. func (Argon2idHash) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.authentication.hashes.argon2id", New: func() caddy.Module { return new(Argon2idHash) }, } } // Compare checks if the plaintext password matches the given Argon2id hash. func (Argon2idHash) Compare(hashed, plaintext []byte) (bool, error) { argHash, storedKey, err := DecodeHash(hashed) if err != nil { return false, err } computedKey := argon2.IDKey( plaintext, argHash.salt, argHash.time, argHash.memory, argHash.threads, argHash.keyLen, ) return subtle.ConstantTimeCompare(storedKey, computedKey) == 1, nil } // Hash generates an Argon2id hash of the given plaintext using the configured parameters and salt. func (b Argon2idHash) Hash(plaintext []byte) ([]byte, error) { if b.salt == nil { s, err := generateSalt(defaultSaltLength) if err != nil { return nil, err } b.salt = s } key := argon2.IDKey( plaintext, b.salt, b.time, b.memory, b.threads, b.keyLen, ) hash := fmt.Sprintf( "$argon2id$v=%d$m=%d,t=%d,p=%d$%s$%s", argon2.Version, b.memory, b.time, b.threads, base64.RawStdEncoding.EncodeToString(b.salt), base64.RawStdEncoding.EncodeToString(key), ) return []byte(hash), nil } // DecodeHash parses an Argon2id PHC string into an Argon2idHash struct and returns the struct along with the derived key. func DecodeHash(hash []byte) (*Argon2idHash, []byte, error) { parts := strings.Split(string(hash), "$") if len(parts) != 6 { return nil, nil, fmt.Errorf("invalid hash format") } if parts[1] != argon2idName { return nil, nil, fmt.Errorf("unsupported variant: %s", parts[1]) } version, err := strconv.Atoi(strings.TrimPrefix(parts[2], "v=")) if err != nil { return nil, nil, fmt.Errorf("invalid version: %w", err) } if version != argon2.Version { return nil, nil, fmt.Errorf("incompatible version: %d", version) } params := strings.Split(parts[3], ",") if len(params) != 3 { return nil, nil, fmt.Errorf("invalid parameters") } mem, err := strconv.ParseUint(strings.TrimPrefix(params[0], "m="), 10, 32) if err != nil { return nil, nil, fmt.Errorf("invalid memory parameter: %w", err) } iter, err := strconv.ParseUint(strings.TrimPrefix(params[1], "t="), 10, 32) if err != nil { return nil, nil, fmt.Errorf("invalid iterations parameter: %w", err) } threads, err := strconv.ParseUint(strings.TrimPrefix(params[2], "p="), 10, 8) if err != nil { return nil, nil, fmt.Errorf("invalid parallelism parameter: %w", err) } salt, err := base64.RawStdEncoding.Strict().DecodeString(parts[4]) if err != nil { return nil, nil, fmt.Errorf("decode salt: %w", err) } key, err := base64.RawStdEncoding.Strict().DecodeString(parts[5]) if err != nil { return nil, nil, fmt.Errorf("decode key: %w", err) } return &Argon2idHash{ salt: salt, time: uint32(iter), memory: uint32(mem), threads: uint8(threads), keyLen: uint32(len(key)), }, key, nil } // FakeHash returns a constant fake hash for timing attacks mitigation. func (Argon2idHash) FakeHash() []byte { // hashed with the following command: // caddy hash-password --plaintext "antitiming" --algorithm "argon2id" return []byte("$argon2id$v=19$m=47104,t=1,p=1$P2nzckEdTZ3bxCiBCkRTyA$xQL3Z32eo5jKl7u5tcIsnEKObYiyNZQQf5/4sAau6Pg") } // Interface guards var ( _ Comparer = (*Argon2idHash)(nil) _ Hasher = (*Argon2idHash)(nil) ) func generateSalt(length int) ([]byte, error) { salt := make([]byte, length) if _, err := rand.Read(salt); err != nil { return nil, fmt.Errorf("failed to generate salt: %w", err) } return salt, nil } ================================================ FILE: modules/caddyhttp/caddyauth/basicauth.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyauth import ( "encoding/base64" "encoding/hex" "encoding/json" "fmt" weakrand "math/rand/v2" "net/http" "strings" "sync" "golang.org/x/sync/singleflight" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(HTTPBasicAuth{}) } // HTTPBasicAuth facilitates HTTP basic authentication. type HTTPBasicAuth struct { // The algorithm with which the passwords are hashed. Default: bcrypt HashRaw json.RawMessage `json:"hash,omitempty" caddy:"namespace=http.authentication.hashes inline_key=algorithm"` // The list of accounts to authenticate. AccountList []Account `json:"accounts,omitempty"` // The name of the realm. Default: restricted Realm string `json:"realm,omitempty"` // If non-nil, a mapping of plaintext passwords to their // hashes will be cached in memory (with random eviction). // This can greatly improve the performance of traffic-heavy // servers that use secure password hashing algorithms, with // the downside that plaintext passwords will be stored in // memory for a longer time (this should not be a problem // as long as your machine is not compromised, at which point // all bets are off, since basicauth necessitates plaintext // passwords being received over the wire anyway). Note that // a cache hit does not mean it is a valid password. HashCache *Cache `json:"hash_cache,omitempty"` Accounts map[string]Account `json:"-"` Hash Comparer `json:"-"` // fakePassword is used when a given user is not found, // so that timing side-channels can be mitigated: it gives // us something to hash and compare even if the user does // not exist, which should have similar timing as a user // account that does exist. fakePassword []byte } // CaddyModule returns the Caddy module information. func (HTTPBasicAuth) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.authentication.providers.http_basic", New: func() caddy.Module { return new(HTTPBasicAuth) }, } } // Provision provisions the HTTP basic auth provider. func (hba *HTTPBasicAuth) Provision(ctx caddy.Context) error { if hba.HashRaw == nil { hba.HashRaw = json.RawMessage(`{"algorithm": "bcrypt"}`) } // load password hasher hasherIface, err := ctx.LoadModule(hba, "HashRaw") if err != nil { return fmt.Errorf("loading password hasher module: %v", err) } hba.Hash = hasherIface.(Comparer) if hba.Hash == nil { return fmt.Errorf("hash is required") } // if supported, generate a fake password we can compare against if needed if hasher, ok := hba.Hash.(Hasher); ok { hba.fakePassword = hasher.FakeHash() } repl := caddy.NewReplacer() // load account list hba.Accounts = make(map[string]Account) for i, acct := range hba.AccountList { if _, ok := hba.Accounts[acct.Username]; ok { return fmt.Errorf("account %d: username is not unique: %s", i, acct.Username) } acct.Username = repl.ReplaceAll(acct.Username, "") acct.Password = repl.ReplaceAll(acct.Password, "") if acct.Username == "" || acct.Password == "" { return fmt.Errorf("account %d: username and password are required", i) } // TODO: Remove support for redundantly-encoded b64-encoded hashes // Passwords starting with '$' are likely in Modular Crypt Format, // so we don't need to base64 decode them. But historically, we // required redundant base64, so we try to decode it otherwise. if strings.HasPrefix(acct.Password, "$") { acct.password = []byte(acct.Password) } else { acct.password, err = base64.StdEncoding.DecodeString(acct.Password) if err != nil { return fmt.Errorf("base64-decoding password: %v", err) } } hba.Accounts[acct.Username] = acct } hba.AccountList = nil // allow GC to deallocate if hba.HashCache != nil { hba.HashCache.cache = make(map[string]bool) hba.HashCache.mu = new(sync.RWMutex) hba.HashCache.g = new(singleflight.Group) } return nil } // Authenticate validates the user credentials in req and returns the user, if valid. func (hba HTTPBasicAuth) Authenticate(w http.ResponseWriter, req *http.Request) (User, bool, error) { username, plaintextPasswordStr, ok := req.BasicAuth() if !ok { return hba.promptForCredentials(w, nil) } account, accountExists := hba.Accounts[username] if !accountExists { // don't return early if account does not exist; we want // to try to avoid side-channels that leak existence, so // we use a fake password to simulate realistic CPU cycles account.password = hba.fakePassword } same, err := hba.correctPassword(account, []byte(plaintextPasswordStr)) if err != nil || !same || !accountExists { return hba.promptForCredentials(w, err) } return User{ID: username}, true, nil } func (hba HTTPBasicAuth) correctPassword(account Account, plaintextPassword []byte) (bool, error) { compare := func() (bool, error) { return hba.Hash.Compare(account.password, plaintextPassword) } // if no caching is enabled, simply return the result of hashing + comparing if hba.HashCache == nil { return compare() } // compute a cache key that is unique for these input parameters cacheKey := hex.EncodeToString(append(account.password, plaintextPassword...)) // fast track: if the result of the input is already cached, use it hba.HashCache.mu.RLock() same, ok := hba.HashCache.cache[cacheKey] hba.HashCache.mu.RUnlock() if ok { return same, nil } // slow track: do the expensive op, then add it to the cache // but perform it in a singleflight group so that multiple // parallel requests using the same password don't cause a // thundering herd problem by all performing the same hashing // operation before the first one finishes and caches it. v, err, _ := hba.HashCache.g.Do(cacheKey, func() (any, error) { return compare() }) if err != nil { return false, err } same = v.(bool) hba.HashCache.mu.Lock() if len(hba.HashCache.cache) >= 1000 { hba.HashCache.makeRoom() // keep cache size under control } hba.HashCache.cache[cacheKey] = same hba.HashCache.mu.Unlock() return same, nil } func (hba HTTPBasicAuth) promptForCredentials(w http.ResponseWriter, err error) (User, bool, error) { // browsers show a message that says something like: // "The website says: " // which is kinda dumb, but whatever. realm := hba.Realm if realm == "" { realm = "restricted" } w.Header().Set("WWW-Authenticate", fmt.Sprintf(`Basic realm="%s"`, realm)) return User{}, false, err } // Cache enables caching of basic auth results. This is especially // helpful for secure password hashes which can be expensive to // compute on every HTTP request. type Cache struct { mu *sync.RWMutex g *singleflight.Group // map of concatenated hashed password + plaintext password, to result cache map[string]bool } // makeRoom deletes about 1/10 of the items in the cache // in order to keep its size under control. It must not be // called without a lock on c.mu. func (c *Cache) makeRoom() { // we delete more than just 1 entry so that we don't have // to do this on every request; assuming the capacity of // the cache is on a long tail, we can save a lot of CPU // time by doing a whole bunch of deletions now and then // we won't have to do them again for a while numToDelete := max(len(c.cache)/10, 1) for deleted := 0; deleted <= numToDelete; deleted++ { // Go maps are "nondeterministic" not actually random, // so although we could just chop off the "front" of the // map with less code, this is a heavily skewed eviction // strategy; generating random numbers is cheap and // ensures a much better distribution. //nolint:gosec rnd := weakrand.IntN(len(c.cache)) i := 0 for key := range c.cache { if i == rnd { delete(c.cache, key) break } i++ } } } // Comparer is a type that can securely compare // a plaintext password with a hashed password // in constant-time. Comparers should hash the // plaintext password and then use constant-time // comparison. type Comparer interface { // Compare returns true if the result of hashing // plaintextPassword is hashedPassword, false // otherwise. An error is returned only if // there is a technical/configuration error. Compare(hashedPassword, plaintextPassword []byte) (bool, error) } // Hasher is a type that can generate a secure hash // given a plaintext. Hashing modules which implement // this interface can be used with the hash-password // subcommand as well as benefitting from anti-timing // features. A hasher also returns a fake hash which // can be used for timing side-channel mitigation. type Hasher interface { Hash(plaintext []byte) ([]byte, error) FakeHash() []byte } // Account contains a username and password. type Account struct { // A user's username. Username string `json:"username"` // The user's hashed password, in Modular Crypt Format (with `$` prefix) // or base64-encoded. Password string `json:"password"` //nolint:gosec // false positive, this is a hashed password password []byte } // Interface guards var ( _ caddy.Provisioner = (*HTTPBasicAuth)(nil) _ Authenticator = (*HTTPBasicAuth)(nil) ) ================================================ FILE: modules/caddyhttp/caddyauth/bcrypt.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyauth import ( "errors" "golang.org/x/crypto/bcrypt" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(BcryptHash{}) } // defaultBcryptCost cost 14 strikes a solid balance between security, usability, and hardware performance const ( bcryptName = "bcrypt" defaultBcryptCost = 14 ) // BcryptHash implements the bcrypt hash. type BcryptHash struct { // cost is the bcrypt hashing difficulty factor (work factor). // Higher values increase computation time and security. cost int } // CaddyModule returns the Caddy module information. func (BcryptHash) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.authentication.hashes.bcrypt", New: func() caddy.Module { return new(BcryptHash) }, } } // Compare compares passwords. func (BcryptHash) Compare(hashed, plaintext []byte) (bool, error) { err := bcrypt.CompareHashAndPassword(hashed, plaintext) if errors.Is(err, bcrypt.ErrMismatchedHashAndPassword) { return false, nil } if err != nil { return false, err } return true, nil } // Hash hashes plaintext using a random salt. func (b BcryptHash) Hash(plaintext []byte) ([]byte, error) { cost := b.cost if cost < bcrypt.MinCost || cost > bcrypt.MaxCost { cost = defaultBcryptCost } return bcrypt.GenerateFromPassword(plaintext, cost) } // FakeHash returns a fake hash. func (BcryptHash) FakeHash() []byte { // hashed with the following command: // caddy hash-password --plaintext "antitiming" --algorithm "bcrypt" return []byte("$2a$14$X3ulqf/iGxnf1k6oMZ.RZeJUoqI9PX2PM4rS5lkIKJXduLGXGPrt6") } // Interface guards var ( _ Comparer = (*BcryptHash)(nil) _ Hasher = (*BcryptHash)(nil) ) ================================================ FILE: modules/caddyhttp/caddyauth/caddyauth.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyauth import ( "fmt" "net/http" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(Authentication{}) } // Authentication is a middleware which provides user authentication. // Rejects requests with HTTP 401 if the request is not authenticated. // // After a successful authentication, the placeholder // `{http.auth.user.id}` will be set to the username, and also // `{http.auth.user.*}` placeholders may be set for any authentication // modules that provide user metadata. // // In case of an error, the placeholder `{http.auth..error}` // will be set to the error message returned by the authentication // provider. // // Its API is still experimental and may be subject to change. type Authentication struct { // A set of authentication providers. If none are specified, // all requests will always be unauthenticated. ProvidersRaw caddy.ModuleMap `json:"providers,omitempty" caddy:"namespace=http.authentication.providers"` Providers map[string]Authenticator `json:"-"` logger *zap.Logger } // CaddyModule returns the Caddy module information. func (Authentication) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.authentication", New: func() caddy.Module { return new(Authentication) }, } } // Provision sets up an Authentication module by initializing its logger, // loading and registering all configured authentication providers. func (a *Authentication) Provision(ctx caddy.Context) error { a.logger = ctx.Logger() a.Providers = make(map[string]Authenticator) mods, err := ctx.LoadModule(a, "ProvidersRaw") if err != nil { return fmt.Errorf("loading authentication providers: %v", err) } for modName, modIface := range mods.(map[string]any) { a.Providers[modName] = modIface.(Authenticator) } return nil } func (a Authentication) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) var user User var authed bool var err error for provName, prov := range a.Providers { user, authed, err = prov.Authenticate(w, r) if err != nil { if c := a.logger.Check(zapcore.ErrorLevel, "auth provider returned error"); c != nil { c.Write(zap.String("provider", provName), zap.Error(err)) } // Set the error from the authentication provider in a placeholder, // so it can be used in the handle_errors directive. repl.Set("http.auth."+provName+".error", err.Error()) continue } if authed { break } } if !authed { return caddyhttp.Error(http.StatusUnauthorized, fmt.Errorf("not authenticated")) } repl.Set("http.auth.user.id", user.ID) for k, v := range user.Metadata { repl.Set("http.auth.user."+k, v) } return next.ServeHTTP(w, r) } // Authenticator is a type which can authenticate a request. // If a request was not authenticated, it returns false. An // error is only returned if authenticating the request fails // for a technical reason (not for bad/missing credentials). type Authenticator interface { Authenticate(http.ResponseWriter, *http.Request) (User, bool, error) } // User represents an authenticated user. type User struct { // The ID of the authenticated user. ID string // Any other relevant data about this // user. Keys should be adhere to Caddy // conventions (snake_casing), as all // keys will be made available as // placeholders. Metadata map[string]string } // Interface guards var ( _ caddy.Provisioner = (*Authentication)(nil) _ caddyhttp.MiddlewareHandler = (*Authentication)(nil) ) ================================================ FILE: modules/caddyhttp/caddyauth/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyauth import ( "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { httpcaddyfile.RegisterHandlerDirective("basicauth", parseCaddyfile) // deprecated httpcaddyfile.RegisterHandlerDirective("basic_auth", parseCaddyfile) } // parseCaddyfile sets up the handler from Caddyfile tokens. Syntax: // // basic_auth [] [ []] { // // ... // } // // If no hash algorithm is supplied, bcrypt will be assumed. func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { h.Next() // consume directive name // "basicauth" is deprecated, replaced by "basic_auth" if h.Val() == "basicauth" { caddy.Log().Named("config.adapter.caddyfile").Warn("the 'basicauth' directive is deprecated, please use 'basic_auth' instead!") } var ba HTTPBasicAuth ba.HashCache = new(Cache) var cmp Comparer args := h.RemainingArgs() var hashName string switch len(args) { case 0: hashName = bcryptName case 1: hashName = args[0] case 2: hashName = args[0] ba.Realm = args[1] default: return nil, h.ArgErr() } switch hashName { case bcryptName: cmp = BcryptHash{} case argon2idName: cmp = Argon2idHash{} default: return nil, h.Errf("unrecognized hash algorithm: %s", hashName) } ba.HashRaw = caddyconfig.JSONModuleObject(cmp, "algorithm", hashName, nil) for h.NextBlock(0) { username := h.Val() var b64Pwd string h.Args(&b64Pwd) if h.NextArg() { return nil, h.ArgErr() } if username == "" || b64Pwd == "" { return nil, h.Err("username and password cannot be empty or missing") } ba.AccountList = append(ba.AccountList, Account{ Username: username, Password: b64Pwd, }) } return Authentication{ ProvidersRaw: caddy.ModuleMap{ "http_basic": caddyconfig.JSON(ba, nil), }, }, nil } ================================================ FILE: modules/caddyhttp/caddyauth/command.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyauth import ( "bufio" "bytes" "fmt" "os" "os/signal" "github.com/spf13/cobra" "golang.org/x/term" caddycmd "github.com/caddyserver/caddy/v2/cmd" "github.com/caddyserver/caddy/v2" ) func init() { caddycmd.RegisterCommand(caddycmd.Command{ Name: "hash-password", Usage: "[--plaintext ] [--algorithm ] [--bcrypt-cost ] [--argon2id-time ] [--argon2id-memory ] [--argon2id-threads ] [--argon2id-keylen ]", Short: "Hashes a password and writes base64", Long: ` Convenient way to hash a plaintext password. The resulting hash is written to stdout as a base64 string. --plaintext The password to hash. If omitted, it will be read from stdin. If Caddy is attached to a controlling TTY, the input will not be echoed. --algorithm Selects the hashing algorithm. Valid options are: * 'argon2id' (recommended for modern security) * 'bcrypt' (legacy, slower, configurable cost) bcrypt-specific parameters: --bcrypt-cost Sets the bcrypt hashing difficulty. Higher values increase security by making the hash computation slower and more CPU-intensive. Must be within the valid range [bcrypt.MinCost, bcrypt.MaxCost]. If omitted or invalid, the default cost is used. Argon2id-specific parameters: --argon2id-time Number of iterations to perform. Increasing this makes hashing slower and more resistant to brute-force attacks. --argon2id-memory Amount of memory to use during hashing. Larger values increase resistance to GPU/ASIC attacks. --argon2id-threads Number of CPU threads to use. Increase for faster hashing on multi-core systems. --argon2id-keylen Length of the resulting hash in bytes. Longer keys increase security but slightly increase storage size. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringP("plaintext", "p", "", "The plaintext password") cmd.Flags().StringP("algorithm", "a", bcryptName, "Name of the hash algorithm") cmd.Flags().Int("bcrypt-cost", defaultBcryptCost, "Bcrypt hashing cost (only used with 'bcrypt' algorithm)") cmd.Flags().Uint32("argon2id-time", defaultArgon2idTime, "Number of iterations for Argon2id hashing. Increasing this makes the hash slower and more resistant to brute-force attacks.") cmd.Flags().Uint32("argon2id-memory", defaultArgon2idMemory, "Memory to use in KiB for Argon2id hashing. Larger values increase resistance to GPU/ASIC attacks.") cmd.Flags().Uint8("argon2id-threads", defaultArgon2idThreads, "Number of CPU threads to use for Argon2id hashing. Increase for faster hashing on multi-core systems.") cmd.Flags().Uint32("argon2id-keylen", defaultArgon2idKeylen, "Length of the resulting Argon2id hash in bytes. Longer hashes increase security but slightly increase storage size.") cmd.RunE = caddycmd.WrapCommandFuncForCobra(cmdHashPassword) }, }) } func cmdHashPassword(fs caddycmd.Flags) (int, error) { var err error algorithm := fs.String("algorithm") plaintext := []byte(fs.String("plaintext")) bcryptCost := fs.Int("bcrypt-cost") if len(plaintext) == 0 { fd := int(os.Stdin.Fd()) if term.IsTerminal(fd) { // ensure the terminal state is restored on SIGINT state, _ := term.GetState(fd) c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) go func() { <-c _ = term.Restore(fd, state) os.Exit(caddy.ExitCodeFailedStartup) }() defer signal.Stop(c) fmt.Fprint(os.Stderr, "Enter password: ") plaintext, err = term.ReadPassword(fd) fmt.Fprintln(os.Stderr) if err != nil { return caddy.ExitCodeFailedStartup, err } fmt.Fprint(os.Stderr, "Confirm password: ") confirmation, err := term.ReadPassword(fd) fmt.Fprintln(os.Stderr) if err != nil { return caddy.ExitCodeFailedStartup, err } if !bytes.Equal(plaintext, confirmation) { return caddy.ExitCodeFailedStartup, fmt.Errorf("password does not match") } } else { rd := bufio.NewReader(os.Stdin) plaintext, err = rd.ReadBytes('\n') if err != nil { return caddy.ExitCodeFailedStartup, err } plaintext = plaintext[:len(plaintext)-1] // Trailing newline } if len(plaintext) == 0 { return caddy.ExitCodeFailedStartup, fmt.Errorf("plaintext is required") } } var hash []byte var hashString string switch algorithm { case bcryptName: hash, err = BcryptHash{cost: bcryptCost}.Hash(plaintext) hashString = string(hash) case argon2idName: time, err := fs.GetUint32("argon2id-time") if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("failed to get argon2id time parameter: %w", err) } memory, err := fs.GetUint32("argon2id-memory") if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("failed to get argon2id memory parameter: %w", err) } threads, err := fs.GetUint8("argon2id-threads") if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("failed to get argon2id threads parameter: %w", err) } keyLen, err := fs.GetUint32("argon2id-keylen") if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("failed to get argon2id keylen parameter: %w", err) } hash, _ = Argon2idHash{ time: time, memory: memory, threads: threads, keyLen: keyLen, }.Hash(plaintext) hashString = string(hash) default: return caddy.ExitCodeFailedStartup, fmt.Errorf("unrecognized hash algorithm: %s", algorithm) } if err != nil { return caddy.ExitCodeFailedStartup, err } fmt.Println(hashString) return 0, nil } ================================================ FILE: modules/caddyhttp/caddyhttp.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "bytes" "encoding/json" "io" "net" "net/http" "path" "path/filepath" "strconv" "strings" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(tlsPlaceholderWrapper{}) } // RequestMatcher is a type that can match to a request. // A route matcher MUST NOT modify the request, with the // only exception being its context. // // Deprecated: Matchers should now implement RequestMatcherWithError. // You may remove any interface guards for RequestMatcher // but keep your Match() methods for backwards compatibility. type RequestMatcher interface { Match(*http.Request) bool } // RequestMatcherWithError is like RequestMatcher but can return an error. // An error during matching will abort the request middleware chain and // invoke the error middleware chain. // // This will eventually replace RequestMatcher. Matcher modules // should implement both interfaces, and once all modules have // been updated to use RequestMatcherWithError, the RequestMatcher // interface may eventually be dropped. type RequestMatcherWithError interface { MatchWithError(*http.Request) (bool, error) } // Handler is like http.Handler except ServeHTTP may return an error. // // If any handler encounters an error, it should be returned for proper // handling. Return values should be propagated down the middleware chain // by returning it unchanged. Returned errors should not be re-wrapped // if they are already HandlerError values. type Handler interface { ServeHTTP(http.ResponseWriter, *http.Request) error } // HandlerFunc is a convenience type like http.HandlerFunc. type HandlerFunc func(http.ResponseWriter, *http.Request) error // ServeHTTP implements the Handler interface. func (f HandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) error { return f(w, r) } // Middleware chains one Handler to the next by being passed // the next Handler in the chain. type Middleware func(Handler) Handler // MiddlewareHandler is like Handler except it takes as a third // argument the next handler in the chain. The next handler will // never be nil, but may be a no-op handler if this is the last // handler in the chain. Handlers which act as middleware should // call the next handler's ServeHTTP method so as to propagate // the request down the chain properly. Handlers which act as // responders (content origins) need not invoke the next handler, // since the last handler in the chain should be the first to // write the response. type MiddlewareHandler interface { ServeHTTP(http.ResponseWriter, *http.Request, Handler) error } // emptyHandler is used as a no-op handler. var emptyHandler Handler = HandlerFunc(func(_ http.ResponseWriter, req *http.Request) error { SetVar(req.Context(), "unhandled", true) return nil }) // An implicit suffix middleware that, if reached, sets the StatusCode to the // error stored in the ErrorCtxKey. This is to prevent situations where the // Error chain does not actually handle the error (for instance, it matches only // on some errors). See #3053 var errorEmptyHandler Handler = HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { httpError := r.Context().Value(ErrorCtxKey) if handlerError, ok := httpError.(HandlerError); ok { w.WriteHeader(handlerError.StatusCode) } else { w.WriteHeader(http.StatusInternalServerError) } return nil }) // ResponseHandler pairs a response matcher with custom handling // logic. Either the status code can be changed to something else // while using the original response body, or, if a status code // is not set, it can execute a custom route list; this is useful // for executing handler routes based on the properties of an HTTP // response that has not been written out to the client yet. // // To use this type, provision it at module load time, then when // ready to use, match the response against its matcher; if it // matches (or doesn't have a matcher), change the status code on // the response if configured; otherwise invoke the routes by // calling `rh.Routes.Compile(next).ServeHTTP(rw, req)` (or similar). type ResponseHandler struct { // The response matcher for this handler. If empty/nil, // it always matches. Match *ResponseMatcher `json:"match,omitempty"` // To write the original response body but with a different // status code, set this field to the desired status code. // If set, this takes priority over routes. StatusCode WeakString `json:"status_code,omitempty"` // The list of HTTP routes to execute if no status code is // specified. If evaluated, the original response body // will not be written. Routes RouteList `json:"routes,omitempty"` } // Provision sets up the routes in rh. func (rh *ResponseHandler) Provision(ctx caddy.Context) error { if rh.Routes != nil { err := rh.Routes.Provision(ctx) if err != nil { return err } } return nil } // WeakString is a type that unmarshals any JSON value // as a string literal, with the following exceptions: // // 1. actual string values are decoded as strings; and // 2. null is decoded as empty string; // // and provides methods for getting the value as various // primitive types. However, using this type removes any // type safety as far as deserializing JSON is concerned. type WeakString string // UnmarshalJSON satisfies json.Unmarshaler according to // this type's documentation. func (ws *WeakString) UnmarshalJSON(b []byte) error { if len(b) == 0 { return io.EOF } if b[0] == byte('"') && b[len(b)-1] == byte('"') { var s string err := json.Unmarshal(b, &s) if err != nil { return err } *ws = WeakString(s) return nil } if bytes.Equal(b, []byte("null")) { return nil } *ws = WeakString(b) return nil } // MarshalJSON marshals was a boolean if true or false, // a number if an integer, or a string otherwise. func (ws WeakString) MarshalJSON() ([]byte, error) { if ws == "true" { return []byte("true"), nil } if ws == "false" { return []byte("false"), nil } if num, err := strconv.Atoi(string(ws)); err == nil { return json.Marshal(num) } return json.Marshal(string(ws)) } // Int returns ws as an integer. If ws is not an // integer, 0 is returned. func (ws WeakString) Int() int { num, _ := strconv.Atoi(string(ws)) return num } // Float64 returns ws as a float64. If ws is not a // float value, the zero value is returned. func (ws WeakString) Float64() float64 { num, _ := strconv.ParseFloat(string(ws), 64) return num } // Bool returns ws as a boolean. If ws is not a // boolean, false is returned. func (ws WeakString) Bool() bool { return string(ws) == "true" } // String returns ws as a string. func (ws WeakString) String() string { return string(ws) } // StatusCodeMatches returns true if a real HTTP status code matches // the configured status code, which may be either a real HTTP status // code or an integer representing a class of codes (e.g. 4 for all // 4xx statuses). func StatusCodeMatches(actual, configured int) bool { if actual == configured { return true } if configured < 100 && actual >= configured*100 && actual < (configured+1)*100 { return true } return false } // SanitizedPathJoin performs filepath.Join(root, reqPath) that // is safe against directory traversal attacks. It uses logic // similar to that in the Go standard library, specifically // in the implementation of http.Dir. The root is assumed to // be a trusted path, but reqPath is not; and the output will // never be outside of root. The resulting path can be used // with the local file system. If root is empty, the current // directory is assumed. If the cleaned request path is deemed // not local according to lexical processing (i.e. ignoring links), // it will be rejected as unsafe and only the root will be returned. func SanitizedPathJoin(root, reqPath string) string { if root == "" { root = "." } relPath := path.Clean("/" + reqPath)[1:] // clean path and trim the leading / if relPath != "" && !filepath.IsLocal(relPath) { // path is unsafe (see https://github.com/golang/go/issues/56336#issuecomment-1416214885) return root } path := filepath.Join(root, filepath.FromSlash(relPath)) // filepath.Join also cleans the path, and cleaning strips // the trailing slash, so we need to re-add it afterwards. // if the length is 1, then it's a path to the root, // and that should return ".", so we don't append the separator. if strings.HasSuffix(reqPath, "/") && len(reqPath) > 1 { path += separator } return path } // CleanPath cleans path p according to path.Clean(), but only // merges repeated slashes if collapseSlashes is true, and always // preserves trailing slashes. func CleanPath(p string, collapseSlashes bool) string { if collapseSlashes { return cleanPath(p) } // insert an invalid/impossible URI character into each two consecutive // slashes to expand empty path segments; then clean the path as usual, // and then remove the remaining temporary characters. const tmpCh = 0xff var sb strings.Builder for i, ch := range p { if ch == '/' && i > 0 && p[i-1] == '/' { sb.WriteByte(tmpCh) } sb.WriteRune(ch) } halfCleaned := cleanPath(sb.String()) halfCleaned = strings.ReplaceAll(halfCleaned, string([]byte{tmpCh}), "") return halfCleaned } // cleanPath does path.Clean(p) but preserves any trailing slash. func cleanPath(p string) string { cleaned := path.Clean(p) if cleaned != "/" && strings.HasSuffix(p, "/") { cleaned = cleaned + "/" } return cleaned } // tlsPlaceholderWrapper is a no-op listener wrapper that marks // where the TLS listener should be in a chain of listener wrappers. // It should only be used if another listener wrapper must be placed // in front of the TLS handshake. type tlsPlaceholderWrapper struct{} func (tlsPlaceholderWrapper) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.listeners.tls", New: func() caddy.Module { return new(tlsPlaceholderWrapper) }, } } func (tlsPlaceholderWrapper) WrapListener(ln net.Listener) net.Listener { return ln } func (tlsPlaceholderWrapper) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { return nil } const ( // DefaultHTTPPort is the default port for HTTP. DefaultHTTPPort = 80 // DefaultHTTPSPort is the default port for HTTPS. DefaultHTTPSPort = 443 ) const separator = string(filepath.Separator) // Interface guard var ( _ caddy.ListenerWrapper = (*tlsPlaceholderWrapper)(nil) _ caddyfile.Unmarshaler = (*tlsPlaceholderWrapper)(nil) ) ================================================ FILE: modules/caddyhttp/caddyhttp_test.go ================================================ package caddyhttp import ( "net/url" "path/filepath" "runtime" "testing" ) func TestSanitizedPathJoin(t *testing.T) { // For reference: // %2e = . // %2f = / // %5c = \ for i, tc := range []struct { inputRoot string inputPath string expect string expectWindows string }{ { inputPath: "", expect: ".", }, { inputPath: "/", expect: ".", }, { // fileserver.MatchFile passes an inputPath of "//" for some try_files values. // See https://github.com/caddyserver/caddy/issues/6352 inputPath: "//", expect: filepath.FromSlash("./"), }, { inputPath: "/foo", expect: "foo", }, { inputPath: "/foo/", expect: filepath.FromSlash("foo/"), }, { inputPath: "/foo/bar", expect: filepath.FromSlash("foo/bar"), }, { inputRoot: "/a", inputPath: "/foo/bar", expect: filepath.FromSlash("/a/foo/bar"), }, { inputPath: "/foo/../bar", expect: "bar", }, { inputRoot: "/a/b", inputPath: "/foo/../bar", expect: filepath.FromSlash("/a/b/bar"), }, { inputRoot: "/a/b", inputPath: "/..%2fbar", expect: filepath.FromSlash("/a/b/bar"), }, { inputRoot: "/a/b", inputPath: "/%2e%2e%2fbar", expect: filepath.FromSlash("/a/b/bar"), }, { // inputPath fails the IsLocal test so only the root is returned, // but with a trailing slash since one was included in inputPath inputRoot: "/a/b", inputPath: "/%2e%2e%2f%2e%2e%2f", expect: filepath.FromSlash("/a/b/"), }, { inputRoot: "/a/b", inputPath: "/foo%2fbar", expect: filepath.FromSlash("/a/b/foo/bar"), }, { inputRoot: "/a/b", inputPath: "/foo%252fbar", expect: filepath.FromSlash("/a/b/foo%2fbar"), }, { inputRoot: "C:\\www", inputPath: "/foo/bar", expect: filepath.Join("C:\\www", "foo", "bar"), }, { inputRoot: "C:\\www", inputPath: "/D:\\foo\\bar", expect: filepath.Join("C:\\www", "D:\\foo\\bar"), expectWindows: "C:\\www", // inputPath fails IsLocal on Windows }, { inputRoot: `C:\www`, inputPath: `/..\windows\win.ini`, expect: `C:\www/..\windows\win.ini`, expectWindows: `C:\www`, }, { inputRoot: `C:\www`, inputPath: `/..\..\..\..\..\..\..\..\..\..\windows\win.ini`, expect: `C:\www/..\..\..\..\..\..\..\..\..\..\windows\win.ini`, expectWindows: `C:\www`, }, { inputRoot: `C:\www`, inputPath: `/..%5cwindows%5cwin.ini`, expect: `C:\www/..\windows\win.ini`, expectWindows: `C:\www`, }, { inputRoot: `C:\www`, inputPath: `/..%5c..%5c..%5c..%5c..%5c..%5c..%5c..%5c..%5c..%5cwindows%5cwin.ini`, expect: `C:\www/..\..\..\..\..\..\..\..\..\..\windows\win.ini`, expectWindows: `C:\www`, }, { // https://github.com/golang/go/issues/56336#issuecomment-1416214885 inputRoot: "root", inputPath: "/a/b/../../c", expect: filepath.FromSlash("root/c"), }, } { // we don't *need* to use an actual parsed URL, but it // adds some authenticity to the tests since real-world // values will be coming in from URLs; thus, the test // corpus can contain paths as encoded by clients, which // more closely emulates the actual attack vector u, err := url.Parse("http://test:9999" + tc.inputPath) if err != nil { t.Fatalf("Test %d: invalid URL: %v", i, err) } actual := SanitizedPathJoin(tc.inputRoot, u.Path) if runtime.GOOS == "windows" && tc.expectWindows != "" { tc.expect = tc.expectWindows } if actual != tc.expect { t.Errorf("Test %d: SanitizedPathJoin('%s', '%s') => '%s' (expected '%s')", i, tc.inputRoot, tc.inputPath, actual, tc.expect) } } } func TestCleanPath(t *testing.T) { for i, tc := range []struct { input string mergeSlashes bool expect string }{ { input: "/foo", expect: "/foo", }, { input: "/foo/", expect: "/foo/", }, { input: "//foo", expect: "//foo", }, { input: "//foo", mergeSlashes: true, expect: "/foo", }, { input: "/foo//bar/", mergeSlashes: true, expect: "/foo/bar/", }, { input: "/foo/./.././bar", expect: "/bar", }, { input: "/foo//./..//./bar", expect: "/foo//bar", }, { input: "/foo///./..//./bar", expect: "/foo///bar", }, { input: "/foo///./..//.", expect: "/foo//", }, { input: "/foo//./bar", expect: "/foo//bar", }, } { actual := CleanPath(tc.input, tc.mergeSlashes) if actual != tc.expect { t.Errorf("Test %d [input='%s' mergeSlashes=%t]: Got '%s', expected '%s'", i, tc.input, tc.mergeSlashes, actual, tc.expect) } } } ================================================ FILE: modules/caddyhttp/celmatcher.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "crypto/x509/pkix" "encoding/json" "errors" "fmt" "net/http" "reflect" "regexp" "strings" "time" "github.com/google/cel-go/cel" "github.com/google/cel-go/common" "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/ext" "github.com/google/cel-go/interpreter" "github.com/google/cel-go/interpreter/functions" "github.com/google/cel-go/parser" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(MatchExpression{}) } // MatchExpression matches requests by evaluating a // [CEL](https://github.com/google/cel-spec) expression. // This enables complex logic to be expressed using a comfortable, // familiar syntax. Please refer to // [the standard definitions of CEL functions and operators](https://github.com/google/cel-spec/blob/master/doc/langdef.md#standard-definitions). // // This matcher's JSON interface is actually a string, not a struct. // The generated docs are not correct because this type has custom // marshaling logic. // // COMPATIBILITY NOTE: This module is still experimental and is not // subject to Caddy's compatibility guarantee. type MatchExpression struct { // The CEL expression to evaluate. Any Caddy placeholders // will be expanded and situated into proper CEL function // calls before evaluating. Expr string `json:"expr,omitempty"` // Name is an optional name for this matcher. // This is used to populate the name for regexp // matchers that appear in the expression. Name string `json:"name,omitempty"` expandedExpr string prg cel.Program ta types.Adapter log *zap.Logger } // CaddyModule returns the Caddy module information. func (MatchExpression) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.expression", New: func() caddy.Module { return new(MatchExpression) }, } } // MarshalJSON marshals m's expression. func (m MatchExpression) MarshalJSON() ([]byte, error) { // if the name is empty, then we can marshal just the expression string if m.Name == "" { return json.Marshal(m.Expr) } // otherwise, we need to marshal the full object, using an // anonymous struct to avoid infinite recursion return json.Marshal(struct { Expr string `json:"expr"` Name string `json:"name"` }{ Expr: m.Expr, Name: m.Name, }) } // UnmarshalJSON unmarshals m's expression. func (m *MatchExpression) UnmarshalJSON(data []byte) error { // if the data is a string, then it's just the expression if data[0] == '"' { return json.Unmarshal(data, &m.Expr) } // otherwise, it's a full object, so unmarshal it, // using an temp map to avoid infinite recursion var tmpJson map[string]any err := json.Unmarshal(data, &tmpJson) *m = MatchExpression{ Expr: tmpJson["expr"].(string), Name: tmpJson["name"].(string), } return err } // Provision sets ups m. func (m *MatchExpression) Provision(ctx caddy.Context) error { m.log = ctx.Logger() // replace placeholders with a function call - this is just some // light (and possibly naïve) syntactic sugar m.expandedExpr = placeholderRegexp.ReplaceAllString(m.Expr, placeholderExpansion) // as a second pass, we'll strip the escape character from an escaped // placeholder, so that it can be used as an input to other CEL functions m.expandedExpr = escapedPlaceholderRegexp.ReplaceAllString(m.expandedExpr, escapedPlaceholderExpansion) // our type adapter expands CEL's standard type support m.ta = celTypeAdapter{} // initialize the CEL libraries from the Matcher implementations which // have been configured to support CEL. matcherLibProducers := []CELLibraryProducer{} for _, info := range caddy.GetModules("http.matchers") { p, ok := info.New().(CELLibraryProducer) if ok { matcherLibProducers = append(matcherLibProducers, p) } } // add the matcher name to the context so that the matcher name // can be used by regexp matchers being provisioned ctx = ctx.WithValue(MatcherNameCtxKey, m.Name) // Assemble the compilation and program options from the different library // producers into a single cel.Library implementation. matcherEnvOpts := []cel.EnvOption{} matcherProgramOpts := []cel.ProgramOption{} for _, producer := range matcherLibProducers { l, err := producer.CELLibrary(ctx) if err != nil { return fmt.Errorf("error initializing CEL library for %T: %v", producer, err) } matcherEnvOpts = append(matcherEnvOpts, l.CompileOptions()...) matcherProgramOpts = append(matcherProgramOpts, l.ProgramOptions()...) } matcherLib := cel.Lib(NewMatcherCELLibrary(matcherEnvOpts, matcherProgramOpts)) // create the CEL environment env, err := cel.NewEnv( cel.Function(CELPlaceholderFuncName, cel.SingletonBinaryBinding(m.caddyPlaceholderFunc), cel.Overload( CELPlaceholderFuncName+"_httpRequest_string", []*cel.Type{httpRequestObjectType, cel.StringType}, cel.AnyType, )), cel.Variable(CELRequestVarName, httpRequestObjectType), cel.CustomTypeAdapter(m.ta), ext.Strings(), ext.Bindings(), ext.Lists(), ext.Math(), matcherLib, ) if err != nil { return fmt.Errorf("setting up CEL environment: %v", err) } // parse and type-check the expression checked, issues := env.Compile(m.expandedExpr) if issues.Err() != nil { return fmt.Errorf("compiling CEL program: %s", issues.Err()) } // request matching is a boolean operation, so we don't really know // what to do if the expression returns a non-boolean type if checked.OutputType() != cel.BoolType { return fmt.Errorf("CEL request matcher expects return type of bool, not %s", checked.OutputType()) } // compile the "program" m.prg, err = env.Program(checked, cel.EvalOptions(cel.OptOptimize)) if err != nil { return fmt.Errorf("compiling CEL program: %s", err) } return nil } // Match returns true if r matches m. func (m MatchExpression) Match(r *http.Request) bool { match, err := m.MatchWithError(r) if err != nil { SetVar(r.Context(), MatcherErrorVarKey, err) } return match } // MatchWithError returns true if r matches m. func (m MatchExpression) MatchWithError(r *http.Request) (bool, error) { celReq := celHTTPRequest{r} out, _, err := m.prg.Eval(celReq) if err != nil { m.log.Error("evaluating expression", zap.Error(err)) return false, err } if outBool, ok := out.Value().(bool); ok { return outBool, nil } return false, nil } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (m *MatchExpression) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume matcher name // if there's multiple args, then we need to keep the raw // tokens because the user may have used quotes within their // CEL expression (e.g. strings) and we should retain that if d.CountRemainingArgs() > 1 { m.Expr = strings.Join(d.RemainingArgsRaw(), " ") return nil } // there should at least be one arg if !d.NextArg() { return d.ArgErr() } // if there's only one token, then we can safely grab the // cleaned token (no quotes) and use that as the expression // because there's no valid CEL expression that is only a // quoted string; commonly quotes are used in Caddyfile to // define the expression m.Expr = d.Val() // use the named matcher's name, to fill regexp // matchers names by default m.Name = d.GetContextString(caddyfile.MatcherNameCtxKey) return nil } // caddyPlaceholderFunc implements the custom CEL function that accesses the // Replacer on a request and gets values from it. func (m MatchExpression) caddyPlaceholderFunc(lhs, rhs ref.Val) ref.Val { celReq, ok := lhs.(celHTTPRequest) if !ok { return types.NewErr( "invalid request of type '%v' to %s(request, placeholderVarName)", lhs.Type(), CELPlaceholderFuncName, ) } phStr, ok := rhs.(types.String) if !ok { return types.NewErr( "invalid placeholder variable name of type '%v' to %s(request, placeholderVarName)", rhs.Type(), CELPlaceholderFuncName, ) } repl := celReq.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) val, _ := repl.Get(string(phStr)) return m.ta.NativeToValue(val) } // httpRequestCELType is the type representation of a native HTTP request. var httpRequestCELType = cel.ObjectType("http.Request", traits.ReceiverType) // celHTTPRequest wraps an http.Request with ref.Val interface methods. // // This type also implements the interpreter.Activation interface which // drops allocation costs for CEL expression evaluations by roughly half. type celHTTPRequest struct{ *http.Request } func (cr celHTTPRequest) ResolveName(name string) (any, bool) { if name == CELRequestVarName { return cr, true } return nil, false } func (cr celHTTPRequest) Parent() interpreter.Activation { return nil } func (cr celHTTPRequest) ConvertToNative(typeDesc reflect.Type) (any, error) { return cr.Request, nil } func (celHTTPRequest) ConvertToType(typeVal ref.Type) ref.Val { panic("not implemented") } func (cr celHTTPRequest) Equal(other ref.Val) ref.Val { if o, ok := other.Value().(celHTTPRequest); ok { return types.Bool(o.Request == cr.Request) } return types.ValOrErr(other, "%v is not comparable type", other) } func (celHTTPRequest) Type() ref.Type { return httpRequestCELType } func (cr celHTTPRequest) Value() any { return cr } var pkixNameCELType = cel.ObjectType("pkix.Name", traits.ReceiverType) // celPkixName wraps an pkix.Name with // methods to satisfy the ref.Val interface. type celPkixName struct{ *pkix.Name } func (pn celPkixName) ConvertToNative(typeDesc reflect.Type) (any, error) { return pn.Name, nil } func (pn celPkixName) ConvertToType(typeVal ref.Type) ref.Val { if typeVal.TypeName() == "string" { return types.String(pn.Name.String()) } panic("not implemented") } func (pn celPkixName) Equal(other ref.Val) ref.Val { if o, ok := other.Value().(string); ok { return types.Bool(pn.Name.String() == o) } return types.ValOrErr(other, "%v is not comparable type", other) } func (celPkixName) Type() ref.Type { return pkixNameCELType } func (pn celPkixName) Value() any { return pn } // celTypeAdapter can adapt our custom types to a CEL value. type celTypeAdapter struct{} func (celTypeAdapter) NativeToValue(value any) ref.Val { switch v := value.(type) { case celHTTPRequest: return v case pkix.Name: return celPkixName{&v} case time.Time: return types.Timestamp{Time: v} case error: return types.WrapErr(v) } return types.DefaultTypeAdapter.NativeToValue(value) } // CELLibraryProducer provide CEL libraries that expose a Matcher // implementation as a first class function within the CEL expression // matcher. type CELLibraryProducer interface { // CELLibrary creates a cel.Library which makes it possible to use the // target object within CEL expression matchers. CELLibrary(caddy.Context) (cel.Library, error) } // CELMatcherImpl creates a new cel.Library based on the following pieces of // data: // // - macroName: the function name to be used within CEL. This will be a macro // and not a function proper. // - funcName: the function overload name generated by the CEL macro used to // represent the matcher. // - matcherDataTypes: the argument types to the macro. // - fac: a matcherFactory implementation which converts from CEL constant // values to a Matcher instance. // // Note, macro names and function names must not collide with other macros or // functions exposed within CEL expressions, or an error will be produced // during the expression matcher plan time. // // The existing CELMatcherImpl support methods are configured to support a // limited set of function signatures. For strong type validation you may need // to provide a custom macro which does a more detailed analysis of the CEL // literal provided to the macro as an argument. func CELMatcherImpl(macroName, funcName string, matcherDataTypes []*cel.Type, fac any) (cel.Library, error) { requestType := cel.ObjectType("http.Request") var macro parser.Macro switch len(matcherDataTypes) { case 1: matcherDataType := matcherDataTypes[0] switch matcherDataType.String() { case "list(string)": macro = parser.NewGlobalVarArgMacro(macroName, celMatcherStringListMacroExpander(funcName)) case cel.StringType.String(): macro = parser.NewGlobalMacro(macroName, 1, celMatcherStringMacroExpander(funcName)) case CELTypeJSON.String(): macro = parser.NewGlobalMacro(macroName, 1, celMatcherJSONMacroExpander(funcName)) default: return nil, fmt.Errorf("unsupported matcher data type: %s", matcherDataType) } case 2: if matcherDataTypes[0] == cel.StringType && matcherDataTypes[1] == cel.StringType { macro = parser.NewGlobalMacro(macroName, 2, celMatcherStringListMacroExpander(funcName)) matcherDataTypes = []*cel.Type{cel.ListType(cel.StringType)} } else { return nil, fmt.Errorf("unsupported matcher data type: %s, %s", matcherDataTypes[0], matcherDataTypes[1]) } case 3: // nolint:gosec // false positive, impossible to be out of bounds; see: https://github.com/securego/gosec/issues/1525 if matcherDataTypes[0] == cel.StringType && matcherDataTypes[1] == cel.StringType && matcherDataTypes[2] == cel.StringType { macro = parser.NewGlobalMacro(macroName, 3, celMatcherStringListMacroExpander(funcName)) matcherDataTypes = []*cel.Type{cel.ListType(cel.StringType)} } else { // nolint:gosec // false positive, impossible to be out of bounds; see: https://github.com/securego/gosec/issues/1525 return nil, fmt.Errorf("unsupported matcher data type: %s, %s, %s", matcherDataTypes[0], matcherDataTypes[1], matcherDataTypes[2]) } } envOptions := []cel.EnvOption{ cel.Macros(macro), cel.Function(funcName, cel.Overload(funcName, append([]*cel.Type{requestType}, matcherDataTypes...), cel.BoolType), cel.SingletonBinaryBinding(CELMatcherRuntimeFunction(funcName, fac))), } programOptions := []cel.ProgramOption{ cel.CustomDecorator(CELMatcherDecorator(funcName, fac)), } return NewMatcherCELLibrary(envOptions, programOptions), nil } // CELMatcherFactory converts a constant CEL value into a RequestMatcher. // Deprecated: Use CELMatcherWithErrorFactory instead. type CELMatcherFactory = func(data ref.Val) (RequestMatcher, error) // CELMatcherWithErrorFactory converts a constant CEL value into a RequestMatcherWithError. type CELMatcherWithErrorFactory = func(data ref.Val) (RequestMatcherWithError, error) // matcherCELLibrary is a simplistic configurable cel.Library implementation. type matcherCELLibrary struct { envOptions []cel.EnvOption programOptions []cel.ProgramOption } // NewMatcherCELLibrary creates a matcherLibrary from option setes. func NewMatcherCELLibrary(envOptions []cel.EnvOption, programOptions []cel.ProgramOption) cel.Library { return &matcherCELLibrary{ envOptions: envOptions, programOptions: programOptions, } } func (lib *matcherCELLibrary) CompileOptions() []cel.EnvOption { return lib.envOptions } func (lib *matcherCELLibrary) ProgramOptions() []cel.ProgramOption { return lib.programOptions } // CELMatcherDecorator matches a call overload generated by a CEL macro // that takes a single argument, and optimizes the implementation to precompile // the matcher and return a function that references the precompiled and // provisioned matcher. func CELMatcherDecorator(funcName string, fac any) interpreter.InterpretableDecorator { return func(i interpreter.Interpretable) (interpreter.Interpretable, error) { call, ok := i.(interpreter.InterpretableCall) if !ok { return i, nil } if call.OverloadID() != funcName { return i, nil } callArgs := call.Args() reqAttr, ok := callArgs[0].(interpreter.InterpretableAttribute) if !ok { return nil, errors.New("missing 'req' argument") } nsAttr, ok := reqAttr.Attr().(interpreter.NamespacedAttribute) if !ok { return nil, errors.New("missing 'req' argument") } varNames := nsAttr.CandidateVariableNames() if len(varNames) != 1 || len(varNames) == 1 && varNames[0] != CELRequestVarName { return nil, errors.New("missing 'req' argument") } matcherData, ok := callArgs[1].(interpreter.InterpretableConst) if !ok { // If the matcher arguments are not constant, then this means // they contain a Caddy placeholder reference and the evaluation // and matcher provisioning should be handled at dynamically. return i, nil } if factory, ok := fac.(CELMatcherWithErrorFactory); ok { matcher, err := factory(matcherData.Value()) if err != nil { return nil, err } return interpreter.NewCall( i.ID(), funcName, funcName+"_opt", []interpreter.Interpretable{reqAttr}, func(args ...ref.Val) ref.Val { // The request value, guaranteed to be of type celHTTPRequest celReq := args[0] // If needed this call could be changed to convert the value // to a *http.Request using CEL's ConvertToNative method. httpReq := celReq.Value().(celHTTPRequest) match, err := matcher.MatchWithError(httpReq.Request) if err != nil { return types.WrapErr(err) } return types.Bool(match) }, ), nil } if factory, ok := fac.(CELMatcherFactory); ok { matcher, err := factory(matcherData.Value()) if err != nil { return nil, err } return interpreter.NewCall( i.ID(), funcName, funcName+"_opt", []interpreter.Interpretable{reqAttr}, func(args ...ref.Val) ref.Val { // The request value, guaranteed to be of type celHTTPRequest celReq := args[0] // If needed this call could be changed to convert the value // to a *http.Request using CEL's ConvertToNative method. httpReq := celReq.Value().(celHTTPRequest) if m, ok := matcher.(RequestMatcherWithError); ok { match, err := m.MatchWithError(httpReq.Request) if err != nil { return types.WrapErr(err) } return types.Bool(match) } return types.Bool(matcher.Match(httpReq.Request)) }, ), nil } return nil, fmt.Errorf("invalid matcher factory, must be CELMatcherFactory or CELMatcherWithErrorFactory: %T", fac) } } // CELMatcherRuntimeFunction creates a function binding for when the input to the matcher // is dynamically resolved rather than a set of static constant values. func CELMatcherRuntimeFunction(funcName string, fac any) functions.BinaryOp { return func(celReq, matcherData ref.Val) ref.Val { if factory, ok := fac.(CELMatcherWithErrorFactory); ok { matcher, err := factory(matcherData) if err != nil { return types.WrapErr(err) } httpReq := celReq.Value().(celHTTPRequest) match, err := matcher.MatchWithError(httpReq.Request) if err != nil { return types.WrapErr(err) } return types.Bool(match) } if factory, ok := fac.(CELMatcherFactory); ok { matcher, err := factory(matcherData) if err != nil { return types.WrapErr(err) } httpReq := celReq.Value().(celHTTPRequest) if m, ok := matcher.(RequestMatcherWithError); ok { match, err := m.MatchWithError(httpReq.Request) if err != nil { return types.WrapErr(err) } return types.Bool(match) } return types.Bool(matcher.Match(httpReq.Request)) } return types.NewErr("CELMatcherRuntimeFunction invalid matcher factory: %T", fac) } } // celMatcherStringListMacroExpander validates that the macro is called // with a variable number of string arguments (at least one). // // The arguments are collected into a single list argument the following // function call returned: (request, [args]) func celMatcherStringListMacroExpander(funcName string) cel.MacroFactory { return func(eh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { matchArgs := []ast.Expr{} if len(args) == 0 { return nil, eh.NewError(0, "matcher requires at least one argument") } for _, arg := range args { if isCELStringExpr(arg) { matchArgs = append(matchArgs, arg) } else { return nil, eh.NewError(arg.ID(), "matcher arguments must be string constants") } } return eh.NewCall(funcName, eh.NewIdent(CELRequestVarName), eh.NewList(matchArgs...)), nil } } // celMatcherStringMacroExpander validates that the macro is called a single // string argument. // // The following function call is returned: (request, arg) func celMatcherStringMacroExpander(funcName string) parser.MacroExpander { return func(eh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { if len(args) != 1 { return nil, eh.NewError(0, "matcher requires one argument") } if isCELStringExpr(args[0]) { return eh.NewCall(funcName, eh.NewIdent(CELRequestVarName), args[0]), nil } return nil, eh.NewError(args[0].ID(), "matcher argument must be a string literal") } } // celMatcherJSONMacroExpander validates that the macro is called a single // map literal argument. // // The following function call is returned: (request, arg) func celMatcherJSONMacroExpander(funcName string) parser.MacroExpander { return func(eh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { if len(args) != 1 { return nil, eh.NewError(0, "matcher requires a map literal argument") } arg := args[0] switch arg.Kind() { case ast.StructKind: return nil, eh.NewError(arg.ID(), fmt.Sprintf("matcher input must be a map literal, not a %s", arg.AsStruct().TypeName())) case ast.MapKind: mapExpr := arg.AsMap() for _, entry := range mapExpr.Entries() { isStringPlaceholder := isCELStringExpr(entry.AsMapEntry().Key()) if !isStringPlaceholder { return nil, eh.NewError(entry.ID(), "matcher map keys must be string literals") } isStringListPlaceholder := isCELStringExpr(entry.AsMapEntry().Value()) || isCELStringListLiteral(entry.AsMapEntry().Value()) if !isStringListPlaceholder { return nil, eh.NewError(entry.AsMapEntry().Value().ID(), "matcher map values must be string or list literals") } } return eh.NewCall(funcName, eh.NewIdent(CELRequestVarName), arg), nil case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.SelectKind: // appeasing the linter :) } return nil, eh.NewError(arg.ID(), "matcher requires a map literal argument") } } // CELValueToMapStrList converts a CEL value to a map[string][]string // // Earlier validation stages should guarantee that the value has this type // at compile time, and that the runtime value type is map[string]any. // The reason for the slight difference in value type is that CEL allows for // map literals containing heterogeneous values, in this case string and list // of string. func CELValueToMapStrList(data ref.Val) (map[string][]string, error) { // Prefer map[string]any, but newer cel-go versions may return map[any]any mapStrType := reflect.TypeFor[map[string]any]() mapStrRaw, err := data.ConvertToNative(mapStrType) var mapStrIface map[string]any if err != nil { // Try map[any]any and convert keys to strings mapAnyType := reflect.TypeFor[map[any]any]() mapAnyRaw, err2 := data.ConvertToNative(mapAnyType) if err2 != nil { return nil, err } mapAnyIface := mapAnyRaw.(map[any]any) mapStrIface = make(map[string]any, len(mapAnyIface)) for k, v := range mapAnyIface { ks, ok := k.(string) if !ok { return nil, fmt.Errorf("unsupported map key type in header match: %T", k) } mapStrIface[ks] = v } } else { mapStrIface = mapStrRaw.(map[string]any) } mapStrListStr := make(map[string][]string, len(mapStrIface)) for k, v := range mapStrIface { switch val := v.(type) { case string: mapStrListStr[k] = []string{val} case types.String: mapStrListStr[k] = []string{string(val)} case []string: mapStrListStr[k] = val case []ref.Val: convVals := make([]string, len(val)) for i, elem := range val { strVal, ok := elem.(types.String) if !ok { return nil, fmt.Errorf("unsupported value type in matcher input: %T", val) } convVals[i] = string(strVal) } mapStrListStr[k] = convVals case []any: convVals := make([]string, len(val)) for i, elem := range val { switch e := elem.(type) { case string: convVals[i] = e case types.String: convVals[i] = string(e) default: return nil, fmt.Errorf("unsupported element type in matcher input list: %T", elem) } } mapStrListStr[k] = convVals default: return nil, fmt.Errorf("unsupported value type in matcher input: %T", val) } } return mapStrListStr, nil } // isCELStringExpr indicates whether the expression is a supported string expression func isCELStringExpr(e ast.Expr) bool { return isCELStringLiteral(e) || isCELCaddyPlaceholderCall(e) || isCELConcatCall(e) } // isCELStringLiteral returns whether the expression is a CEL string literal. func isCELStringLiteral(e ast.Expr) bool { switch e.Kind() { case ast.LiteralKind: constant := e.AsLiteral() switch constant.Type() { case types.StringType: return true } case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.MapKind, ast.SelectKind, ast.StructKind: // appeasing the linter :) } return false } // isCELCaddyPlaceholderCall returns whether the expression is a caddy placeholder call. func isCELCaddyPlaceholderCall(e ast.Expr) bool { switch e.Kind() { case ast.CallKind: call := e.AsCall() if call.FunctionName() == CELPlaceholderFuncName { return true } case ast.UnspecifiedExprKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind: // appeasing the linter :) } return false } // isCELConcatCall tests whether the expression is a concat function (+) with string, placeholder, or // other concat call arguments. func isCELConcatCall(e ast.Expr) bool { switch e.Kind() { case ast.CallKind: call := e.AsCall() if call.Target().Kind() != ast.UnspecifiedExprKind { return false } if call.FunctionName() != operators.Add { return false } for _, arg := range call.Args() { if !isCELStringExpr(arg) { return false } } return true case ast.UnspecifiedExprKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind: // appeasing the linter :) } return false } // isCELStringListLiteral returns whether the expression resolves to a list literal // containing only string constants or a placeholder call. func isCELStringListLiteral(e ast.Expr) bool { switch e.Kind() { case ast.ListKind: list := e.AsList() for _, elem := range list.Elements() { if !isCELStringExpr(elem) { return false } } return true case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind: // appeasing the linter :) } return false } // Variables used for replacing Caddy placeholders in CEL // expressions with a proper CEL function call; this is // just for syntactic sugar. var ( // The placeholder may not be preceded by a backslash; the expansion // will include the preceding character if it is not a backslash. placeholderRegexp = regexp.MustCompile(`([^\\]|^){([a-zA-Z][\w.-]+)}`) placeholderExpansion = `${1}ph(req, "${2}")` // As a second pass, we need to strip the escape character in front of // the placeholder, if it exists. escapedPlaceholderRegexp = regexp.MustCompile(`\\{([a-zA-Z][\w.-]+)}`) escapedPlaceholderExpansion = `{${1}}` CELTypeJSON = cel.MapType(cel.StringType, cel.DynType) ) var httpRequestObjectType = cel.ObjectType("http.Request") // The name of the CEL function which accesses Replacer values. const CELPlaceholderFuncName = "ph" // The name of the CEL request variable. const CELRequestVarName = "req" const MatcherNameCtxKey = "matcher_name" // Interface guards var ( _ caddy.Provisioner = (*MatchExpression)(nil) _ RequestMatcherWithError = (*MatchExpression)(nil) _ caddyfile.Unmarshaler = (*MatchExpression)(nil) _ json.Marshaler = (*MatchExpression)(nil) _ json.Unmarshaler = (*MatchExpression)(nil) ) ================================================ FILE: modules/caddyhttp/celmatcher_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "context" "crypto/tls" "crypto/x509" "encoding/pem" "net/http" "net/http/httptest" "testing" "github.com/caddyserver/caddy/v2" ) var ( clientCert = []byte(`-----BEGIN CERTIFICATE----- MIIB9jCCAV+gAwIBAgIBAjANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1DYWRk eSBUZXN0IENBMB4XDTE4MDcyNDIxMzUwNVoXDTI4MDcyMTIxMzUwNVowHTEbMBkG A1UEAwwSY2xpZW50LmxvY2FsZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB iQKBgQDFDEpzF0ew68teT3xDzcUxVFaTII+jXH1ftHXxxP4BEYBU4q90qzeKFneF z83I0nC0WAQ45ZwHfhLMYHFzHPdxr6+jkvKPASf0J2v2HDJuTM1bHBbik5Ls5eq+ fVZDP8o/VHKSBKxNs8Goc2NTsr5b07QTIpkRStQK+RJALk4x9QIDAQABo0swSTAJ BgNVHRMEAjAAMAsGA1UdDwQEAwIHgDAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8A AAEwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADgYEANSjz2Sk+ eqp31wM9il1n+guTNyxJd+FzVAH+hCZE5K+tCgVDdVFUlDEHHbS/wqb2PSIoouLV 3Q9fgDkiUod+uIK0IynzIKvw+Cjg+3nx6NQ0IM0zo8c7v398RzB4apbXKZyeeqUH 9fNwfEi+OoXR6s+upSKobCmLGLGi9Na5s5g= -----END CERTIFICATE-----`) matcherTests = []struct { name string expression *MatchExpression urlTarget string httpMethod string httpHeader *http.Header wantErr bool wantResult bool clientCertificate []byte }{ { name: "boolean matches succeed for placeholder http.request.tls.client.subject", expression: &MatchExpression{ Expr: "{http.request.tls.client.subject} == 'CN=client.localdomain'", }, clientCertificate: clientCert, urlTarget: "https://example.com/foo", wantResult: true, }, { name: "header matches (MatchHeader)", expression: &MatchExpression{ Expr: `header({'Field': 'foo'})`, }, urlTarget: "https://example.com/foo", httpHeader: &http.Header{"Field": []string{"foo", "bar"}}, wantResult: true, }, { name: "header matches an escaped placeholder value (MatchHeader)", expression: &MatchExpression{ Expr: `header({'Field': '\\\{foobar}'})`, }, urlTarget: "https://example.com/foo", httpHeader: &http.Header{"Field": []string{"{foobar}"}}, wantResult: true, }, { name: "header matches an placeholder replaced during the header matcher (MatchHeader)", expression: &MatchExpression{ Expr: `header({'Field': '\{http.request.uri.path}'})`, }, urlTarget: "https://example.com/foo", httpHeader: &http.Header{"Field": []string{"/foo"}}, wantResult: true, }, { name: "header error, invalid escape sequence (MatchHeader)", expression: &MatchExpression{ Expr: `header({'Field': '\\{foobar}'})`, }, wantErr: true, }, { name: "header error, needs to be JSON syntax with field as key (MatchHeader)", expression: &MatchExpression{ Expr: `header('foo')`, }, wantErr: true, }, { name: "header_regexp matches (MatchHeaderRE)", expression: &MatchExpression{ Expr: `header_regexp('Field', 'fo{2}')`, }, urlTarget: "https://example.com/foo", httpHeader: &http.Header{"Field": []string{"foo", "bar"}}, wantResult: true, }, { name: "header_regexp matches with name (MatchHeaderRE)", expression: &MatchExpression{ Expr: `header_regexp('foo', 'Field', 'fo{2}')`, }, urlTarget: "https://example.com/foo", httpHeader: &http.Header{"Field": []string{"foo", "bar"}}, wantResult: true, }, { name: "header_regexp does not match (MatchHeaderRE)", expression: &MatchExpression{ Expr: `header_regexp('foo', 'Nope', 'fo{2}')`, }, urlTarget: "https://example.com/foo", httpHeader: &http.Header{"Field": []string{"foo", "bar"}}, wantResult: false, }, { name: "header_regexp error (MatchHeaderRE)", expression: &MatchExpression{ Expr: `header_regexp('foo')`, }, wantErr: true, }, { name: "host matches localhost (MatchHost)", expression: &MatchExpression{ Expr: `host('localhost')`, }, urlTarget: "http://localhost", wantResult: true, }, { name: "host matches (MatchHost)", expression: &MatchExpression{ Expr: `host('*.example.com')`, }, urlTarget: "https://foo.example.com", wantResult: true, }, { name: "host does not match (MatchHost)", expression: &MatchExpression{ Expr: `host('example.net', '*.example.com')`, }, urlTarget: "https://foo.example.org", wantResult: false, }, { name: "host error (MatchHost)", expression: &MatchExpression{ Expr: `host(80)`, }, wantErr: true, }, { name: "method does not match (MatchMethod)", expression: &MatchExpression{ Expr: `method('PUT')`, }, urlTarget: "https://foo.example.com", httpMethod: "GET", wantResult: false, }, { name: "method matches (MatchMethod)", expression: &MatchExpression{ Expr: `method('DELETE', 'PUT', 'POST')`, }, urlTarget: "https://foo.example.com", httpMethod: "PUT", wantResult: true, }, { name: "method error not enough arguments (MatchMethod)", expression: &MatchExpression{ Expr: `method()`, }, wantErr: true, }, { name: "path matches substring (MatchPath)", expression: &MatchExpression{ Expr: `path('*substring*')`, }, urlTarget: "https://example.com/foo/substring/bar.txt", wantResult: true, }, { name: "path does not match (MatchPath)", expression: &MatchExpression{ Expr: `path('/foo')`, }, urlTarget: "https://example.com/foo/bar", wantResult: false, }, { name: "path matches end url fragment (MatchPath)", expression: &MatchExpression{ Expr: `path('/foo')`, }, urlTarget: "https://example.com/FOO", wantResult: true, }, { name: "path matches end fragment with substring prefix (MatchPath)", expression: &MatchExpression{ Expr: `path('/foo*')`, }, urlTarget: "https://example.com/FOOOOO", wantResult: true, }, { name: "path matches one of multiple (MatchPath)", expression: &MatchExpression{ Expr: `path('/foo', '/foo/*', '/bar', '/bar/*', '/baz', '/baz*')`, }, urlTarget: "https://example.com/foo", wantResult: true, }, { name: "path_regexp with empty regex matches empty path (MatchPathRE)", expression: &MatchExpression{ Expr: `path_regexp('')`, }, urlTarget: "https://example.com/", wantResult: true, }, { name: "path_regexp with slash regex matches empty path (MatchPathRE)", expression: &MatchExpression{ Expr: `path_regexp('/')`, }, urlTarget: "https://example.com/", wantResult: true, }, { name: "path_regexp matches end url fragment (MatchPathRE)", expression: &MatchExpression{ Expr: `path_regexp('^/foo')`, }, urlTarget: "https://example.com/foo/", wantResult: true, }, { name: "path_regexp does not match fragment at end (MatchPathRE)", expression: &MatchExpression{ Expr: `path_regexp('bar_at_start', '^/bar')`, }, urlTarget: "https://example.com/foo/bar", wantResult: false, }, { name: "protocol matches (MatchProtocol)", expression: &MatchExpression{ Expr: `protocol('HTTPs')`, }, urlTarget: "https://example.com", wantResult: true, }, { name: "protocol does not match (MatchProtocol)", expression: &MatchExpression{ Expr: `protocol('grpc')`, }, urlTarget: "https://example.com", wantResult: false, }, { name: "protocol invocation error no args (MatchProtocol)", expression: &MatchExpression{ Expr: `protocol()`, }, wantErr: true, }, { name: "protocol invocation error too many args (MatchProtocol)", expression: &MatchExpression{ Expr: `protocol('grpc', 'https')`, }, wantErr: true, }, { name: "protocol invocation error wrong arg type (MatchProtocol)", expression: &MatchExpression{ Expr: `protocol(true)`, }, wantErr: true, }, { name: "query does not match against a specific value (MatchQuery)", expression: &MatchExpression{ Expr: `query({"debug": "1"})`, }, urlTarget: "https://example.com/foo", wantResult: false, }, { name: "query matches against a specific value (MatchQuery)", expression: &MatchExpression{ Expr: `query({"debug": "1"})`, }, urlTarget: "https://example.com/foo/?debug=1", wantResult: true, }, { name: "query matches against multiple values (MatchQuery)", expression: &MatchExpression{ Expr: `query({"debug": ["0", "1", {http.request.uri.query.debug}+"1"]})`, }, urlTarget: "https://example.com/foo/?debug=1", wantResult: true, }, { name: "query matches against a wildcard (MatchQuery)", expression: &MatchExpression{ Expr: `query({"debug": ["*"]})`, }, urlTarget: "https://example.com/foo/?debug=something", wantResult: true, }, { name: "query matches against a placeholder value (MatchQuery)", expression: &MatchExpression{ Expr: `query({"debug": {http.request.uri.query.debug}})`, }, urlTarget: "https://example.com/foo/?debug=1", wantResult: true, }, { name: "query error bad map key type (MatchQuery)", expression: &MatchExpression{ Expr: `query({1: "1"})`, }, wantErr: true, }, { name: "query error typed struct instead of map (MatchQuery)", expression: &MatchExpression{ Expr: `query(Message{field: "1"})`, }, wantErr: true, }, { name: "query error bad map value type (MatchQuery)", expression: &MatchExpression{ Expr: `query({"debug": 1})`, }, wantErr: true, }, { name: "query error no args (MatchQuery)", expression: &MatchExpression{ Expr: `query()`, }, wantErr: true, }, { name: "remote_ip error no args (MatchRemoteIP)", expression: &MatchExpression{ Expr: `remote_ip()`, }, wantErr: true, }, { name: "remote_ip single IP match (MatchRemoteIP)", expression: &MatchExpression{ Expr: `remote_ip('192.0.2.1')`, }, urlTarget: "https://example.com/foo", wantResult: true, }, { name: "vars value (VarsMatcher)", expression: &MatchExpression{ Expr: `vars({'foo': 'bar'})`, }, urlTarget: "https://example.com/foo", wantResult: true, }, { name: "vars matches placeholder, needs escape (VarsMatcher)", expression: &MatchExpression{ Expr: `vars({'\{http.request.uri.path}': '/foo'})`, }, urlTarget: "https://example.com/foo", wantResult: true, }, { name: "vars error wrong syntax (VarsMatcher)", expression: &MatchExpression{ Expr: `vars('foo', 'bar')`, }, wantErr: true, }, { name: "vars error no args (VarsMatcher)", expression: &MatchExpression{ Expr: `vars()`, }, wantErr: true, }, { name: "vars_regexp value (MatchVarsRE)", expression: &MatchExpression{ Expr: `vars_regexp('foo', 'ba?r')`, }, urlTarget: "https://example.com/foo", wantResult: true, }, { name: "vars_regexp value with name (MatchVarsRE)", expression: &MatchExpression{ Expr: `vars_regexp('name', 'foo', 'ba?r')`, }, urlTarget: "https://example.com/foo", wantResult: true, }, { name: "vars_regexp matches placeholder, needs escape (MatchVarsRE)", expression: &MatchExpression{ Expr: `vars_regexp('\{http.request.uri.path}', '/fo?o')`, }, urlTarget: "https://example.com/foo", wantResult: true, }, { name: "vars_regexp error no args (MatchVarsRE)", expression: &MatchExpression{ Expr: `vars_regexp()`, }, wantErr: true, }, } ) func TestMatchExpressionMatch(t *testing.T) { for _, tst := range matcherTests { tc := tst t.Run(tc.name, func(t *testing.T) { caddyCtx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() err := tc.expression.Provision(caddyCtx) if err != nil { if !tc.wantErr { t.Errorf("MatchExpression.Provision() error = %v, wantErr %v", err, tc.wantErr) } return } req := httptest.NewRequest(tc.httpMethod, tc.urlTarget, nil) if tc.httpHeader != nil { req.Header = *tc.httpHeader } repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) ctx = context.WithValue(ctx, VarsCtxKey, map[string]any{ "foo": "bar", }) req = req.WithContext(ctx) addHTTPVarsToReplacer(repl, req, httptest.NewRecorder()) if tc.clientCertificate != nil { block, _ := pem.Decode(clientCert) if block == nil { t.Fatalf("failed to decode PEM certificate") } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { t.Fatalf("failed to decode PEM certificate: %v", err) } req.TLS = &tls.ConnectionState{ PeerCertificates: []*x509.Certificate{cert}, } } matches, err := tc.expression.MatchWithError(req) if err != nil { t.Errorf("MatchExpression.Match() error = %v", err) } if matches != tc.wantResult { t.Errorf("MatchExpression.Match() expected to return '%t', for expression : '%s'", tc.wantResult, tc.expression.Expr) } }) } } func BenchmarkMatchExpressionMatch(b *testing.B) { for _, tst := range matcherTests { tc := tst if tc.wantErr { continue } b.Run(tst.name, func(b *testing.B) { tc.expression.Provision(caddy.Context{}) req := httptest.NewRequest(tc.httpMethod, tc.urlTarget, nil) if tc.httpHeader != nil { req.Header = *tc.httpHeader } repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) ctx = context.WithValue(ctx, VarsCtxKey, map[string]any{ "foo": "bar", }) req = req.WithContext(ctx) addHTTPVarsToReplacer(repl, req, httptest.NewRecorder()) if tc.clientCertificate != nil { block, _ := pem.Decode(clientCert) if block == nil { b.Fatalf("failed to decode PEM certificate") } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { b.Fatalf("failed to decode PEM certificate: %v", err) } req.TLS = &tls.ConnectionState{ PeerCertificates: []*x509.Certificate{cert}, } } b.ResetTimer() for b.Loop() { tc.expression.MatchWithError(req) } }) } } func TestMatchExpressionProvision(t *testing.T) { tests := []struct { name string expression *MatchExpression wantErr bool }{ { name: "boolean matches succeed", expression: &MatchExpression{ Expr: "{http.request.uri.query} != ''", }, wantErr: false, }, { name: "reject expressions with non-boolean results", expression: &MatchExpression{ Expr: "{http.request.uri.query}", }, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() if err := tt.expression.Provision(ctx); (err != nil) != tt.wantErr { t.Errorf("MatchExpression.Provision() error = %v, wantErr %v", err, tt.wantErr) } }) } } ================================================ FILE: modules/caddyhttp/encode/brotli/brotli_precompressed.go ================================================ package caddybrotli import ( "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode" ) func init() { caddy.RegisterModule(BrotliPrecompressed{}) } // BrotliPrecompressed provides the file extension for files precompressed with brotli encoding. type BrotliPrecompressed struct{} // CaddyModule returns the Caddy module information. func (BrotliPrecompressed) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.precompressed.br", New: func() caddy.Module { return new(BrotliPrecompressed) }, } } // AcceptEncoding returns the name of the encoding as // used in the Accept-Encoding request headers. func (BrotliPrecompressed) AcceptEncoding() string { return "br" } // Suffix returns the filename suffix of precompressed files. func (BrotliPrecompressed) Suffix() string { return ".br" } // Interface guards var _ encode.Precompressed = (*BrotliPrecompressed)(nil) ================================================ FILE: modules/caddyhttp/encode/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package encode import ( "strconv" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { httpcaddyfile.RegisterHandlerDirective("encode", parseCaddyfile) } func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { enc := new(Encode) err := enc.UnmarshalCaddyfile(h.Dispenser) if err != nil { return nil, err } return enc, nil } // UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax: // // encode [] { // gzip [] // zstd // minimum_length // # response matcher block // match { // status // header [] // } // # or response matcher single line syntax // match [header []] | [status ] // } // // Specifying the formats on the first line will use those formats' defaults. func (enc *Encode) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume directive name prefer := []string{} remainingArgs := d.RemainingArgs() responseMatchers := make(map[string]caddyhttp.ResponseMatcher) for d.NextBlock(0) { switch d.Val() { case "minimum_length": if !d.NextArg() { return d.ArgErr() } minLength, err := strconv.Atoi(d.Val()) if err != nil { return err } enc.MinLength = minLength case "match": err := caddyhttp.ParseNamedResponseMatcher(d.NewFromNextSegment(), responseMatchers) if err != nil { return err } matcher := responseMatchers["match"] enc.Matcher = &matcher default: name := d.Val() modID := "http.encoders." + name unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return err } encoding, ok := unm.(Encoding) if !ok { return d.Errf("module %s is not an HTTP encoding; is %T", modID, unm) } if enc.EncodingsRaw == nil { enc.EncodingsRaw = make(caddy.ModuleMap) } enc.EncodingsRaw[name] = caddyconfig.JSON(encoding, nil) prefer = append(prefer, name) } } if len(prefer) == 0 && len(remainingArgs) == 0 { remainingArgs = []string{"zstd", "gzip"} } for _, arg := range remainingArgs { mod, err := caddy.GetModule("http.encoders." + arg) if err != nil { return d.Errf("finding encoder module '%s': %v", mod, err) } encoding, ok := mod.New().(Encoding) if !ok { return d.Errf("module %s is not an HTTP encoding", mod) } if enc.EncodingsRaw == nil { enc.EncodingsRaw = make(caddy.ModuleMap) } enc.EncodingsRaw[arg] = caddyconfig.JSON(encoding, nil) prefer = append(prefer, arg) } // use the order in which the encoders were defined. enc.Prefer = prefer return nil } // Interface guard var _ caddyfile.Unmarshaler = (*Encode)(nil) ================================================ FILE: modules/caddyhttp/encode/encode.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package encode implements an encoder middleware for Caddy. The initial // enhancements related to Accept-Encoding, minimum content length, and // buffer/writer pools were adapted from https://github.com/xi2/httpgzip // then modified heavily to accommodate modular encoders and fix bugs. // Code borrowed from that repository is Copyright (c) 2015 The Httpgzip Authors. package encode import ( "fmt" "io" "math" "net/http" "slices" "sort" "strconv" "strings" "sync" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(Encode{}) } // Encode is a middleware which can encode responses. type Encode struct { // Selection of compression algorithms to choose from. The best one // will be chosen based on the client's Accept-Encoding header. EncodingsRaw caddy.ModuleMap `json:"encodings,omitempty" caddy:"namespace=http.encoders"` // If the client has no strong preference, choose these encodings in order. Prefer []string `json:"prefer,omitempty"` // Only encode responses that are at least this many bytes long. MinLength int `json:"minimum_length,omitempty"` // Only encode responses that match against this ResponseMatcher. // The default is a collection of text-based Content-Type headers. Matcher *caddyhttp.ResponseMatcher `json:"match,omitempty"` writerPools map[string]*sync.Pool // TODO: these pools do not get reused through config reloads... } // CaddyModule returns the Caddy module information. func (Encode) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.encode", New: func() caddy.Module { return new(Encode) }, } } // Provision provisions enc. func (enc *Encode) Provision(ctx caddy.Context) error { mods, err := ctx.LoadModule(enc, "EncodingsRaw") if err != nil { return fmt.Errorf("loading encoder modules: %v", err) } for modName, modIface := range mods.(map[string]any) { err = enc.addEncoding(modIface.(Encoding)) if err != nil { return fmt.Errorf("adding encoding %s: %v", modName, err) } } if enc.MinLength == 0 { enc.MinLength = defaultMinLength } if enc.Matcher == nil { // common text-based content types // list based on https://developers.cloudflare.com/speed/optimization/content/brotli/content-compression/#compression-between-cloudflare-and-website-visitors enc.Matcher = &caddyhttp.ResponseMatcher{ Headers: http.Header{ "Content-Type": []string{ "application/atom+xml*", "application/eot*", "application/font*", "application/geo+json*", "application/graphql+json*", "application/graphql-response+json*", "application/javascript*", "application/json*", "application/ld+json*", "application/manifest+json*", "application/opentype*", "application/otf*", "application/rss+xml*", "application/truetype*", "application/ttf*", "application/vnd.api+json*", "application/vnd.ms-fontobject*", "application/wasm*", "application/x-httpd-cgi*", "application/x-javascript*", "application/x-opentype*", "application/x-otf*", "application/x-perl*", "application/x-protobuf*", "application/x-ttf*", "application/xhtml+xml*", "application/xml*", "font/ttf*", "font/otf*", "image/svg+xml*", "image/vnd.microsoft.icon*", "image/x-icon*", "multipart/bag*", "multipart/mixed*", "text/*", }, }, } } return nil } // Validate ensures that enc's configuration is valid. func (enc *Encode) Validate() error { check := make(map[string]bool) for _, encName := range enc.Prefer { if _, ok := enc.writerPools[encName]; !ok { return fmt.Errorf("encoding %s not enabled", encName) } if _, ok := check[encName]; ok { return fmt.Errorf("encoding %s is duplicated in prefer", encName) } check[encName] = true } return nil } func isEncodeAllowed(h http.Header) bool { return !strings.Contains(h.Get("Cache-Control"), "no-transform") } func (enc *Encode) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { if isEncodeAllowed(r.Header) { for _, encName := range AcceptedEncodings(r, enc.Prefer) { if _, ok := enc.writerPools[encName]; !ok { continue // encoding not offered } w = enc.openResponseWriter(encName, w, r.Method == http.MethodConnect) defer w.(*responseWriter).Close() // to comply with RFC 9110 section 8.8.3(.3), we modify the Etag when encoding // by appending a hyphen and the encoder name; the problem is, the client will // send back that Etag in a If-None-Match header, but upstream handlers that set // the Etag in the first place don't know that we appended to their Etag! so here // we have to strip our addition so the upstream handlers can still honor client // caches without knowing about our changes... if etag := r.Header.Get("If-None-Match"); etag != "" && !strings.HasPrefix(etag, "W/") { ourSuffix := "-" + encName + `"` if before, ok := strings.CutSuffix(etag, ourSuffix); ok { etag = before + `"` r.Header.Set("If-None-Match", etag) } } break } } err := next.ServeHTTP(w, r) // If there was an error, disable encoding completely // This prevents corruption when handle_errors processes the response if err != nil { if ew, ok := w.(*responseWriter); ok { ew.disabled = true } } return err } func (enc *Encode) addEncoding(e Encoding) error { ae := e.AcceptEncoding() if ae == "" { return fmt.Errorf("encoder does not specify an Accept-Encoding value") } if _, ok := enc.writerPools[ae]; ok { return fmt.Errorf("encoder already added: %s", ae) } if enc.writerPools == nil { enc.writerPools = make(map[string]*sync.Pool) } enc.writerPools[ae] = &sync.Pool{ New: func() any { return e.NewEncoder() }, } return nil } // openResponseWriter creates a new response writer that may (or may not) // encode the response with encodingName. The returned response writer MUST // be closed after the handler completes. func (enc *Encode) openResponseWriter(encodingName string, w http.ResponseWriter, isConnect bool) *responseWriter { var rw responseWriter return enc.initResponseWriter(&rw, encodingName, w, isConnect) } // initResponseWriter initializes the responseWriter instance // allocated in openResponseWriter, enabling mid-stack inlining. func (enc *Encode) initResponseWriter(rw *responseWriter, encodingName string, wrappedRW http.ResponseWriter, isConnect bool) *responseWriter { if rww, ok := wrappedRW.(*caddyhttp.ResponseWriterWrapper); ok { rw.ResponseWriter = rww } else { rw.ResponseWriter = &caddyhttp.ResponseWriterWrapper{ResponseWriter: wrappedRW} } rw.encodingName = encodingName rw.config = enc rw.isConnect = isConnect return rw } // responseWriter writes to an underlying response writer // using the encoding represented by encodingName and // configured by config. type responseWriter struct { http.ResponseWriter encodingName string w Encoder config *Encode statusCode int wroteHeader bool isConnect bool disabled bool // disable encoding (for error responses) } // WriteHeader stores the status to write when the time comes // to actually write the header. func (rw *responseWriter) WriteHeader(status int) { rw.statusCode = status // See #5849 and RFC 9110 section 15.4.5 (https://www.rfc-editor.org/rfc/rfc9110.html#section-15.4.5) - 304 // Not Modified must have certain headers set as if it was a 200 response, and according to the issue // we would miss the Vary header in this case when compression was also enabled; note that we set this // header in the responseWriter.init() method but that is only called if we are writing a response body if status == http.StatusNotModified && !hasVaryValue(rw.Header(), "Accept-Encoding") { rw.Header().Add("Vary", "Accept-Encoding") } // write status immediately if status is 2xx and the request is CONNECT // since it means the response is successful. // see: https://github.com/caddyserver/caddy/issues/6733#issuecomment-2525058845 if rw.isConnect && 200 <= status && status <= 299 { rw.ResponseWriter.WriteHeader(status) rw.wroteHeader = true } // write status immediately when status code is informational // see: https://caddy.community/t/disappear-103-early-hints-response-with-encode-enable-caddy-v2-7-6/23081/5 if 100 <= status && status <= 199 { rw.ResponseWriter.WriteHeader(status) } } // Match determines, if encoding should be done based on the ResponseMatcher. func (enc *Encode) Match(rw *responseWriter) bool { return enc.Matcher.Match(rw.statusCode, rw.Header()) } // FlushError is an alternative Flush returning an error. It delays the actual Flush of the underlying // ResponseWriterWrapper until headers were written. func (rw *responseWriter) FlushError() error { // WriteHeader wasn't called and is a CONNECT request, treat it as a success. // otherwise, wait until header is written. if rw.isConnect && !rw.wroteHeader && rw.statusCode == 0 { rw.WriteHeader(http.StatusOK) } if !rw.wroteHeader { // flushing the underlying ResponseWriter will write header and status code, // but we need to delay that until we can determine if we must encode and // therefore add the Content-Encoding header; this happens in the first call // to rw.Write (see bug in #4314) return nil } // also flushes the encoder, if any // see: https://github.com/jjiang-stripe/caddy-slow-gzip if rw.w != nil { err := rw.w.Flush() if err != nil { return err } } //nolint:bodyclose return http.NewResponseController(rw.ResponseWriter).Flush() } // Write writes to the response. If the response qualifies, // it is encoded using the encoder, which is initialized // if not done so already. func (rw *responseWriter) Write(p []byte) (int, error) { // ignore zero data writes, probably head request if len(p) == 0 { return 0, nil } // WriteHeader wasn't called and is a CONNECT request, treat it as a success. // otherwise, determine if the response should be compressed. if rw.isConnect && !rw.wroteHeader && rw.statusCode == 0 { rw.WriteHeader(http.StatusOK) } // sniff content-type and determine content-length if !rw.wroteHeader && rw.config.MinLength > 0 { var gtMinLength bool if len(p) > rw.config.MinLength { gtMinLength = true } else if cl, err := strconv.Atoi(rw.Header().Get("Content-Length")); err == nil && cl > rw.config.MinLength { gtMinLength = true } if gtMinLength { if rw.Header().Get("Content-Type") == "" { rw.Header().Set("Content-Type", http.DetectContentType(p)) } rw.init() } } // before we write to the response, we need to make // sure the header is written exactly once; we do // that by checking if a status code has been set, // and if so, that means we haven't written the // header OR the default status code will be written // by the standard library if !rw.wroteHeader { if rw.statusCode != 0 { rw.ResponseWriter.WriteHeader(rw.statusCode) } rw.wroteHeader = true } if rw.w != nil { return rw.w.Write(p) } else { return rw.ResponseWriter.Write(p) } } // used to mask ReadFrom method type writerOnly struct { io.Writer } // copied from stdlib const sniffLen = 512 // ReadFrom will try to use sendfile to copy from the reader to the response writer. // It's only used if the response writer implements io.ReaderFrom and the data can't be compressed. // It's based on stdlin http1.1 response writer implementation. // https://github.com/golang/go/blob/f4e3ec3dbe3b8e04a058d266adf8e048bab563f2/src/net/http/server.go#L586 func (rw *responseWriter) ReadFrom(r io.Reader) (int64, error) { rf, ok := rw.ResponseWriter.(io.ReaderFrom) // sendfile can't be used anyway if !ok { // mask ReadFrom to avoid infinite recursion return io.Copy(writerOnly{rw}, r) } var ns int64 // try to sniff the content type and determine if the response should be compressed if !rw.wroteHeader && rw.config.MinLength > 0 { var ( err error buf [sniffLen]byte ) // mask ReadFrom to let Write determine if the response should be compressed ns, err = io.CopyBuffer(writerOnly{rw}, io.LimitReader(r, sniffLen), buf[:]) if err != nil || ns < sniffLen { return ns, err } } // the response will be compressed, no sendfile support if rw.w != nil { nr, err := io.Copy(rw.w, r) return nr + ns, err } nr, err := rf.ReadFrom(r) return nr + ns, err } // Close writes any remaining buffered response and // deallocates any active resources. func (rw *responseWriter) Close() error { // didn't write, probably head request if !rw.wroteHeader { cl, err := strconv.Atoi(rw.Header().Get("Content-Length")) if err == nil && cl > rw.config.MinLength { rw.init() } // issue #5059, don't write status code if not set explicitly. if rw.statusCode != 0 { rw.ResponseWriter.WriteHeader(rw.statusCode) } rw.wroteHeader = true } var err error if rw.w != nil { err = rw.w.Close() rw.w.Reset(nil) rw.config.writerPools[rw.encodingName].Put(rw.w) rw.w = nil } return err } // Unwrap returns the underlying ResponseWriter. func (rw *responseWriter) Unwrap() http.ResponseWriter { return rw.ResponseWriter } // init should be called before we write a response, if rw.buf has contents. func (rw *responseWriter) init() { // Don't initialize encoder for error responses // This prevents response corruption when handle_errors is used if rw.disabled { return } hdr := rw.Header() if hdr.Get("Content-Encoding") == "" && isEncodeAllowed(hdr) && rw.config.Match(rw) { rw.w = rw.config.writerPools[rw.encodingName].Get().(Encoder) rw.w.Reset(rw.ResponseWriter) hdr.Del("Content-Length") // https://github.com/golang/go/issues/14975 hdr.Set("Content-Encoding", rw.encodingName) if !hasVaryValue(hdr, "Accept-Encoding") { hdr.Add("Vary", "Accept-Encoding") } hdr.Del("Accept-Ranges") // we don't know ranges for dynamically-encoded content // strong ETags need to be distinct depending on the encoding ("selected representation") // see RFC 9110 section 8.8.3.3: // https://www.rfc-editor.org/rfc/rfc9110.html#name-example-entity-tags-varying // I don't know a great way to do this... how about appending? That's a neat trick! // (We have to strip the value we append from If-None-Match headers before // sending subsequent requests back upstream, however, since upstream handlers // don't know about our appending to their Etag since they've already done their work) if etag := hdr.Get("Etag"); etag != "" && !strings.HasPrefix(etag, "W/") { etag = fmt.Sprintf(`%s-%s"`, strings.TrimSuffix(etag, `"`), rw.encodingName) hdr.Set("Etag", etag) } } } func hasVaryValue(hdr http.Header, target string) bool { for _, vary := range hdr.Values("Vary") { for val := range strings.SplitSeq(vary, ",") { if strings.EqualFold(strings.TrimSpace(val), target) { return true } } } return false } // AcceptedEncodings returns the list of encodings that the // client supports, in descending order of preference. // The client preference via q-factor and the server // preference via Prefer setting are taken into account. If // the Sec-WebSocket-Key header is present then non-identity // encodings are not considered. See // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html. func AcceptedEncodings(r *http.Request, preferredOrder []string) []string { acceptEncHeader := r.Header.Get("Accept-Encoding") websocketKey := r.Header.Get("Sec-WebSocket-Key") if acceptEncHeader == "" { return []string{} } prefs := []encodingPreference{} for accepted := range strings.SplitSeq(acceptEncHeader, ",") { parts := strings.Split(accepted, ";") encName := strings.ToLower(strings.TrimSpace(parts[0])) // determine q-factor qFactor := 1.0 if len(parts) > 1 { qFactorStr := strings.ToLower(strings.TrimSpace(parts[1])) if strings.HasPrefix(qFactorStr, "q=") { if qFactorFloat, err := strconv.ParseFloat(qFactorStr[2:], 32); err == nil { if qFactorFloat >= 0 && qFactorFloat <= 1 { qFactor = qFactorFloat } } } } // encodings with q-factor of 0 are not accepted; // use a small threshold to account for float precision if qFactor < 0.00001 { continue } // don't encode WebSocket handshakes if websocketKey != "" && encName != "identity" { continue } // set server preference prefOrder := slices.Index(preferredOrder, encName) if prefOrder > -1 { prefOrder = len(preferredOrder) - prefOrder } prefs = append(prefs, encodingPreference{ encoding: encName, q: qFactor, preferOrder: prefOrder, }) } // sort preferences by descending q-factor first, then by preferOrder sort.Slice(prefs, func(i, j int) bool { if math.Abs(prefs[i].q-prefs[j].q) < 0.00001 { return prefs[i].preferOrder > prefs[j].preferOrder } return prefs[i].q > prefs[j].q }) prefEncNames := make([]string, len(prefs)) for i := range prefs { prefEncNames[i] = prefs[i].encoding } return prefEncNames } // encodingPreference pairs an encoding with its q-factor. type encodingPreference struct { encoding string q float64 preferOrder int } // Encoder is a type which can encode a stream of data. type Encoder interface { io.WriteCloser Reset(io.Writer) Flush() error // encoder by default buffers data to maximize compressing rate } // Encoding is a type which can create encoders of its kind // and return the name used in the Accept-Encoding header. type Encoding interface { AcceptEncoding() string NewEncoder() Encoder } // Precompressed is a type which returns filename suffix of precompressed // file and Accept-Encoding header to use when serving this file. type Precompressed interface { AcceptEncoding() string Suffix() string } // defaultMinLength is the minimum length at which to compress content. const defaultMinLength = 512 // Interface guards var ( _ caddy.Provisioner = (*Encode)(nil) _ caddy.Validator = (*Encode)(nil) _ caddyhttp.MiddlewareHandler = (*Encode)(nil) ) ================================================ FILE: modules/caddyhttp/encode/encode_test.go ================================================ package encode import ( "net/http" "slices" "sync" "testing" ) func BenchmarkOpenResponseWriter(b *testing.B) { enc := new(Encode) for b.Loop() { enc.openResponseWriter("test", nil, false) } } func TestPreferOrder(t *testing.T) { testCases := []struct { name string accept string prefer []string expected []string }{ { name: "PreferOrder(): 4 accept, 3 prefer", accept: "deflate, gzip, br, zstd", prefer: []string{"zstd", "br", "gzip"}, expected: []string{"zstd", "br", "gzip", "deflate"}, }, { name: "PreferOrder(): 2 accept, 3 prefer", accept: "deflate, zstd", prefer: []string{"zstd", "br", "gzip"}, expected: []string{"zstd", "deflate"}, }, { name: "PreferOrder(): 2 accept (1 empty), 3 prefer", accept: "gzip,,zstd", prefer: []string{"zstd", "br", "gzip"}, expected: []string{"zstd", "gzip", ""}, }, { name: "PreferOrder(): 1 accept, 2 prefer", accept: "gzip", prefer: []string{"zstd", "gzip"}, expected: []string{"gzip"}, }, { name: "PreferOrder(): 4 accept (1 duplicate), 1 prefer", accept: "deflate, gzip, br, br", prefer: []string{"br"}, expected: []string{"br", "br", "deflate", "gzip"}, }, { name: "PreferOrder(): empty accept, 0 prefer", accept: "", prefer: []string{}, expected: []string{}, }, { name: "PreferOrder(): empty accept, 1 prefer", accept: "", prefer: []string{"gzip"}, expected: []string{}, }, { name: "PreferOrder(): with q-factor", accept: "deflate;q=0.8, gzip;q=0.4, br;q=0.2, zstd", prefer: []string{"gzip"}, expected: []string{"zstd", "deflate", "gzip", "br"}, }, { name: "PreferOrder(): with q-factor, no prefer", accept: "deflate;q=0.8, gzip;q=0.4, br;q=0.2, zstd", prefer: []string{}, expected: []string{"zstd", "deflate", "gzip", "br"}, }, { name: "PreferOrder(): q-factor=0 filtered out", accept: "deflate;q=0.1, gzip;q=0.4, br;q=0.5, zstd;q=0", prefer: []string{"gzip"}, expected: []string{"br", "gzip", "deflate"}, }, { name: "PreferOrder(): q-factor=0 filtered out, no prefer", accept: "deflate;q=0.1, gzip;q=0.4, br;q=0.5, zstd;q=0", prefer: []string{}, expected: []string{"br", "gzip", "deflate"}, }, { name: "PreferOrder(): with invalid q-factor", accept: "br, deflate, gzip;q=2, zstd;q=0.1", prefer: []string{"zstd", "gzip"}, expected: []string{"gzip", "br", "deflate", "zstd"}, }, { name: "PreferOrder(): with invalid q-factor, no prefer", accept: "br, deflate, gzip;q=2, zstd;q=0.1", prefer: []string{}, expected: []string{"br", "deflate", "gzip", "zstd"}, }, } enc := new(Encode) r, _ := http.NewRequest("", "", nil) for _, test := range testCases { t.Run(test.name, func(t *testing.T) { if test.accept == "" { r.Header.Del("Accept-Encoding") } else { r.Header.Set("Accept-Encoding", test.accept) } enc.Prefer = test.prefer result := AcceptedEncodings(r, enc.Prefer) if !slices.Equal(result, test.expected) { t.Errorf("AcceptedEncodings() actual: %s expected: %s", result, test.expected) } }) } } func TestValidate(t *testing.T) { type testCase struct { name string prefer []string wantErr bool } var err error var testCases []testCase enc := new(Encode) enc.writerPools = map[string]*sync.Pool{ "zstd": nil, "gzip": nil, "br": nil, } testCases = []testCase{ { name: "ValidatePrefer (zstd, gzip & br enabled): valid order with all encoder", prefer: []string{"zstd", "br", "gzip"}, wantErr: false, }, { name: "ValidatePrefer (zstd, gzip & br enabled): valid order with 2 out of 3 encoders", prefer: []string{"br", "gzip"}, wantErr: false, }, { name: "ValidatePrefer (zstd, gzip & br enabled): valid order with 1 out of 3 encoders", prefer: []string{"gzip"}, wantErr: false, }, { name: "ValidatePrefer (zstd, gzip & br enabled): 1 duplicated (once) encoder", prefer: []string{"gzip", "zstd", "gzip"}, wantErr: true, }, { name: "ValidatePrefer (zstd, gzip & br enabled): 1 not enabled encoder in prefer list", prefer: []string{"br", "zstd", "gzip", "deflate"}, wantErr: true, }, { name: "ValidatePrefer (zstd, gzip & br enabled): no prefer list", prefer: []string{}, wantErr: false, }, } for _, test := range testCases { t.Run(test.name, func(t *testing.T) { enc.Prefer = test.prefer err = enc.Validate() if (err != nil) != test.wantErr { t.Errorf("Validate() error = %v, wantErr = %v", err, test.wantErr) } }) } enc.writerPools = map[string]*sync.Pool{ "zstd": nil, "gzip": nil, } testCases = []testCase{ { name: "ValidatePrefer (zstd & gzip enabled): 1 not enabled encoder in prefer list", prefer: []string{"zstd", "br", "gzip"}, wantErr: true, }, { name: "ValidatePrefer (zstd & gzip enabled): 2 not enabled encoder in prefer list", prefer: []string{"br", "zstd", "gzip", "deflate"}, wantErr: true, }, { name: "ValidatePrefer (zstd & gzip enabled): only not enabled encoder in prefer list", prefer: []string{"deflate", "br", "gzip"}, wantErr: true, }, { name: "ValidatePrefer (zstd & gzip enabled): 1 duplicated (once) encoder in prefer list", prefer: []string{"gzip", "zstd", "gzip"}, wantErr: true, }, { name: "ValidatePrefer (zstd & gzip enabled): 1 duplicated (twice) encoder in prefer list", prefer: []string{"gzip", "zstd", "gzip", "gzip"}, wantErr: true, }, { name: "ValidatePrefer (zstd & gzip enabled): 1 duplicated encoder in prefer list", prefer: []string{"zstd", "zstd", "gzip", "gzip"}, wantErr: true, }, { name: "ValidatePrefer (zstd & gzip enabled): 1 duplicated not enabled encoder in prefer list", prefer: []string{"br", "br", "gzip"}, wantErr: true, }, { name: "ValidatePrefer (zstd & gzip enabled): 2 duplicated not enabled encoder in prefer list", prefer: []string{"br", "deflate", "br", "deflate"}, wantErr: true, }, { name: "ValidatePrefer (zstd & gzip enabled): valid order zstd first", prefer: []string{"zstd", "gzip"}, wantErr: false, }, { name: "ValidatePrefer (zstd & gzip enabled): valid order gzip first", prefer: []string{"gzip", "zstd"}, wantErr: false, }, } for _, test := range testCases { t.Run(test.name, func(t *testing.T) { enc.Prefer = test.prefer err = enc.Validate() if (err != nil) != test.wantErr { t.Errorf("Validate() error = %v, wantErr = %v", err, test.wantErr) } }) } } func TestIsEncodeAllowed(t *testing.T) { testCases := []struct { name string headers http.Header expected bool }{ { name: "Without any headers", headers: http.Header{}, expected: true, }, { name: "Without Cache-Control HTTP header", headers: http.Header{ "Accept-Encoding": {"gzip"}, }, expected: true, }, { name: "Cache-Control HTTP header ending with no-transform directive", headers: http.Header{ "Accept-Encoding": {"gzip"}, "Cache-Control": {"no-cache; no-transform"}, }, expected: false, }, { name: "With Cache-Control HTTP header no-transform as Cache-Extension value", headers: http.Header{ "Accept-Encoding": {"gzip"}, "Cache-Control": {`no-store; no-cache; community="no-transform"`}, }, expected: false, }, } for _, test := range testCases { t.Run(test.name, func(t *testing.T) { if result := isEncodeAllowed(test.headers); result != test.expected { t.Errorf("The headers given to the isEncodeAllowed should return %t, %t given.", result, test.expected) } }) } } ================================================ FILE: modules/caddyhttp/encode/gzip/gzip.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddygzip import ( "fmt" "strconv" "github.com/klauspost/compress/gzip" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode" ) func init() { caddy.RegisterModule(Gzip{}) } // Gzip can create gzip encoders. type Gzip struct { Level int `json:"level,omitempty"` } // CaddyModule returns the Caddy module information. func (Gzip) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.encoders.gzip", New: func() caddy.Module { return new(Gzip) }, } } // UnmarshalCaddyfile sets up the handler from Caddyfile tokens. func (g *Gzip) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume option name if !d.NextArg() { return nil } levelStr := d.Val() level, err := strconv.Atoi(levelStr) if err != nil { return err } g.Level = level return nil } // Provision provisions g's configuration. func (g *Gzip) Provision(ctx caddy.Context) error { if g.Level == 0 { g.Level = defaultGzipLevel } return nil } // Validate validates g's configuration. func (g Gzip) Validate() error { if g.Level < gzip.StatelessCompression { return fmt.Errorf("quality too low; must be >= %d", gzip.StatelessCompression) } if g.Level > gzip.BestCompression { return fmt.Errorf("quality too high; must be <= %d", gzip.BestCompression) } return nil } // AcceptEncoding returns the name of the encoding as // used in the Accept-Encoding request headers. func (Gzip) AcceptEncoding() string { return "gzip" } // NewEncoder returns a new gzip writer. func (g Gzip) NewEncoder() encode.Encoder { writer, _ := gzip.NewWriterLevel(nil, g.Level) return writer } // Informed from http://blog.klauspost.com/gzip-performance-for-go-webservers/ var defaultGzipLevel = 5 // Interface guards var ( _ encode.Encoding = (*Gzip)(nil) _ caddy.Provisioner = (*Gzip)(nil) _ caddy.Validator = (*Gzip)(nil) _ caddyfile.Unmarshaler = (*Gzip)(nil) ) ================================================ FILE: modules/caddyhttp/encode/gzip/gzip_precompressed.go ================================================ package caddygzip import ( "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode" ) func init() { caddy.RegisterModule(GzipPrecompressed{}) } // GzipPrecompressed provides the file extension for files precompressed with gzip encoding. type GzipPrecompressed struct { Gzip } // CaddyModule returns the Caddy module information. func (GzipPrecompressed) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.precompressed.gzip", New: func() caddy.Module { return new(GzipPrecompressed) }, } } // Suffix returns the filename suffix of precompressed files. func (GzipPrecompressed) Suffix() string { return ".gz" } var _ encode.Precompressed = (*GzipPrecompressed)(nil) ================================================ FILE: modules/caddyhttp/encode/zstd/zstd.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyzstd import ( "fmt" "github.com/klauspost/compress/zstd" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode" ) func init() { caddy.RegisterModule(Zstd{}) } // Zstd can create Zstandard encoders. type Zstd struct { // The compression level. Accepted values: fastest, better, best, default. Level string `json:"level,omitempty"` // Compression level refer to type constants value from zstd.SpeedFastest to zstd.SpeedBestCompression level zstd.EncoderLevel } // CaddyModule returns the Caddy module information. func (Zstd) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.encoders.zstd", New: func() caddy.Module { return new(Zstd) }, } } // UnmarshalCaddyfile sets up the handler from Caddyfile tokens. func (z *Zstd) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume option name if !d.NextArg() { return nil } levelStr := d.Val() if ok, _ := zstd.EncoderLevelFromString(levelStr); !ok { return d.Errf("unexpected compression level, use one of '%s', '%s', '%s', '%s'", zstd.SpeedFastest, zstd.SpeedBetterCompression, zstd.SpeedBestCompression, zstd.SpeedDefault, ) } z.Level = levelStr return nil } // Provision provisions z's configuration. func (z *Zstd) Provision(ctx caddy.Context) error { if z.Level == "" { z.Level = zstd.SpeedDefault.String() } var ok bool if ok, z.level = zstd.EncoderLevelFromString(z.Level); !ok { return fmt.Errorf("unexpected compression level, use one of '%s', '%s', '%s', '%s'", zstd.SpeedFastest, zstd.SpeedDefault, zstd.SpeedBetterCompression, zstd.SpeedBestCompression, ) } return nil } // AcceptEncoding returns the name of the encoding as // used in the Accept-Encoding request headers. func (Zstd) AcceptEncoding() string { return "zstd" } // NewEncoder returns a new Zstandard writer. func (z Zstd) NewEncoder() encode.Encoder { // The default of 8MB for the window is // too large for many clients, so we limit // it to 128K to lighten their load. writer, _ := zstd.NewWriter( nil, zstd.WithWindowSize(128<<10), zstd.WithEncoderConcurrency(1), zstd.WithZeroFrames(true), zstd.WithEncoderLevel(z.level), ) return writer } // Interface guards var ( _ encode.Encoding = (*Zstd)(nil) _ caddyfile.Unmarshaler = (*Zstd)(nil) _ caddy.Provisioner = (*Zstd)(nil) ) ================================================ FILE: modules/caddyhttp/encode/zstd/zstd_precompressed.go ================================================ package caddyzstd import ( "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode" ) func init() { caddy.RegisterModule(ZstdPrecompressed{}) } // ZstdPrecompressed provides the file extension for files precompressed with zstandard encoding. type ZstdPrecompressed struct { Zstd } // CaddyModule returns the Caddy module information. func (ZstdPrecompressed) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.precompressed.zstd", New: func() caddy.Module { return new(ZstdPrecompressed) }, } } // Suffix returns the filename suffix of precompressed files. func (ZstdPrecompressed) Suffix() string { return ".zst" } var _ encode.Precompressed = (*ZstdPrecompressed)(nil) ================================================ FILE: modules/caddyhttp/errors.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "errors" "fmt" weakrand "math/rand/v2" "path" "runtime" "strings" "github.com/caddyserver/caddy/v2" ) // Error is a convenient way for a Handler to populate the // essential fields of a HandlerError. If err is itself a // HandlerError, then any essential fields that are not // set will be populated. func Error(statusCode int, err error) HandlerError { const idLen = 9 var he HandlerError if errors.As(err, &he) { if he.ID == "" { he.ID = randString(idLen, true) } if he.Trace == "" { he.Trace = trace() } if he.StatusCode == 0 { he.StatusCode = statusCode } return he } return HandlerError{ ID: randString(idLen, true), StatusCode: statusCode, Err: err, Trace: trace(), } } // HandlerError is a serializable representation of // an error from within an HTTP handler. type HandlerError struct { Err error // the original error value and message StatusCode int // the HTTP status code to associate with this error ID string // generated; for identifying this error in logs Trace string // produced from call stack } func (e HandlerError) Error() string { var s string if e.ID != "" { s += fmt.Sprintf("{id=%s}", e.ID) } if e.Trace != "" { s += " " + e.Trace } if e.StatusCode != 0 { s += fmt.Sprintf(": HTTP %d", e.StatusCode) } if e.Err != nil { s += ": " + e.Err.Error() } return strings.TrimSpace(s) } // Unwrap returns the underlying error value. See the `errors` package for info. func (e HandlerError) Unwrap() error { return e.Err } // randString returns a string of n random characters. // It is not even remotely secure OR a proper distribution. // But it's good enough for some things. It excludes certain // confusing characters like I, l, 1, 0, O, etc. If sameCase // is true, then uppercase letters are excluded. func randString(n int, sameCase bool) string { if n <= 0 { return "" } dict := []byte("abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRTUVWXY23456789") if sameCase { dict = []byte("abcdefghijkmnpqrstuvwxyz0123456789") } b := make([]byte, n) for i := range b { //nolint:gosec b[i] = dict[weakrand.IntN(len(dict))] } return string(b) } func trace() string { if pc, file, line, ok := runtime.Caller(2); ok { filename := path.Base(file) pkgAndFuncName := path.Base(runtime.FuncForPC(pc).Name()) return fmt.Sprintf("%s (%s:%d)", pkgAndFuncName, filename, line) } return "" } // ErrorCtxKey is the context key to use when storing // an error (for use with context.Context). const ErrorCtxKey = caddy.CtxKey("handler_chain_error") ================================================ FILE: modules/caddyhttp/fileserver/browse.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fileserver import ( "bytes" "context" _ "embed" "encoding/json" "errors" "fmt" "io" "io/fs" "net/http" "os" "path" "strings" "sync" "text/tabwriter" "text/template" "time" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp/templates" ) // BrowseTemplate is the default template document to use for // file listings. By default, its default value is an embedded // document. You can override this value at program start, or // if you are running Caddy via config, you can specify a // custom template_file in the browse configuration. // //go:embed browse.html var BrowseTemplate string // Browse configures directory browsing. type Browse struct { // Filename of the template to use instead of the embedded browse template. TemplateFile string `json:"template_file,omitempty"` // Determines whether or not targets of symlinks should be revealed. RevealSymlinks bool `json:"reveal_symlinks,omitempty"` // Override the default sort. // It includes the following options: // - sort_by: name(default), namedirfirst, size, time // - order: asc(default), desc // eg.: // - `sort time desc` will sort by time in descending order // - `sort size` will sort by size in ascending order // The first option must be `sort_by` and the second option must be `order` (if exists). SortOptions []string `json:"sort,omitempty"` // FileLimit limits the number of up to n DirEntry values in directory order. FileLimit int `json:"file_limit,omitempty"` } const ( defaultDirEntryLimit = 10000 ) func (fsrv *FileServer) serveBrowse(fileSystem fs.FS, root, dirPath string, w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { if c := fsrv.logger.Check(zapcore.DebugLevel, "browse enabled; listing directory contents"); c != nil { c.Write(zap.String("path", dirPath), zap.String("root", root)) } // Navigation on the client-side gets messed up if the // URL doesn't end in a trailing slash because hrefs to // "b/c" at path "/a" end up going to "/b/c" instead // of "/a/b/c" - so we have to redirect in this case // so that the path is "/a/" and the client constructs // relative hrefs "b/c" to be "/a/b/c". // // Only redirect if the last element of the path (the filename) was not // rewritten; if the admin wanted to rewrite to the canonical path, they // would have, and we have to be very careful not to introduce unwanted // redirects and especially redirect loops! (Redirecting using the // original URI is necessary because that's the URI the browser knows, // we don't want to redirect from internally-rewritten URIs.) // See https://github.com/caddyserver/caddy/issues/4205. // We also redirect if the path is empty, because this implies the path // prefix was fully stripped away by a `handle_path` handler for example. // See https://github.com/caddyserver/caddy/issues/4466. origReq := r.Context().Value(caddyhttp.OriginalRequestCtxKey).(http.Request) if r.URL.Path == "" || path.Base(origReq.URL.Path) == path.Base(r.URL.Path) { if !strings.HasSuffix(origReq.URL.Path, "/") { if c := fsrv.logger.Check(zapcore.DebugLevel, "redirecting to trailing slash to preserve hrefs"); c != nil { c.Write(zap.String("request_path", r.URL.Path)) } return redirect(w, r, origReq.URL.Path+"/") } } dir, err := fsrv.openFile(fileSystem, dirPath, w) if err != nil { return err } defer dir.Close() repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) // TODO: not entirely sure if path.Clean() is necessary here but seems like a safe plan (i.e. /%2e%2e%2f) - someone could verify this listing, err := fsrv.loadDirectoryContents(r.Context(), fileSystem, dir.(fs.ReadDirFile), root, path.Clean(r.URL.EscapedPath()), repl) switch { case errors.Is(err, fs.ErrPermission): return caddyhttp.Error(http.StatusForbidden, err) case errors.Is(err, fs.ErrNotExist): return fsrv.notFound(w, r, next) case err != nil: return caddyhttp.Error(http.StatusInternalServerError, err) } w.Header().Add("Vary", "Accept, Accept-Encoding") // speed up browser/client experience and caching by supporting If-Modified-Since if ifModSinceStr := r.Header.Get("If-Modified-Since"); ifModSinceStr != "" { // basically a copy of stdlib file server's handling of If-Modified-Since ifModSince, err := http.ParseTime(ifModSinceStr) if err == nil && listing.lastModified.Truncate(time.Second).Compare(ifModSince) <= 0 { w.WriteHeader(http.StatusNotModified) return nil } } fsrv.browseApplyQueryParams(w, r, listing) buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) acceptHeader := strings.ToLower(strings.Join(r.Header["Accept"], ",")) w.Header().Set("Last-Modified", listing.lastModified.Format(http.TimeFormat)) switch { case strings.Contains(acceptHeader, "application/json"): if err := json.NewEncoder(buf).Encode(listing.Items); err != nil { return caddyhttp.Error(http.StatusInternalServerError, err) } w.Header().Set("Content-Type", "application/json; charset=utf-8") case strings.Contains(acceptHeader, "text/plain"): writer := tabwriter.NewWriter(buf, 0, 8, 1, '\t', tabwriter.AlignRight) // Header on top if _, err := fmt.Fprintln(writer, "Name\tSize\tModified"); err != nil { return caddyhttp.Error(http.StatusInternalServerError, err) } // Lines to separate the header if _, err := fmt.Fprintln(writer, "----\t----\t--------"); err != nil { return caddyhttp.Error(http.StatusInternalServerError, err) } // Actual files for _, item := range listing.Items { //nolint:gosec // not sure how this could be XSS unless you lose control of the file system (like aren't sanitizing) and client ignores Content-Type of text/plain if _, err := fmt.Fprintf(writer, "%s\t%s\t%s\n", item.Name, item.HumanSize(), item.HumanModTime("January 2, 2006 at 15:04:05"), ); err != nil { return caddyhttp.Error(http.StatusInternalServerError, err) } } if err := writer.Flush(); err != nil { return caddyhttp.Error(http.StatusInternalServerError, err) } w.Header().Set("Content-Type", "text/plain; charset=utf-8") default: var fs http.FileSystem if fsrv.Root != "" { fs = http.Dir(repl.ReplaceAll(fsrv.Root, ".")) } tplCtx := &templateContext{ TemplateContext: templates.TemplateContext{ Root: fs, Req: r, RespHeader: templates.WrappedHeader{Header: w.Header()}, }, browseTemplateContext: listing, } tpl, err := fsrv.makeBrowseTemplate(tplCtx) if err != nil { return fmt.Errorf("parsing browse template: %v", err) } if err := tpl.Execute(buf, tplCtx); err != nil { return caddyhttp.Error(http.StatusInternalServerError, err) } w.Header().Set("Content-Type", "text/html; charset=utf-8") } _, _ = buf.WriteTo(w) return nil } func (fsrv *FileServer) loadDirectoryContents(ctx context.Context, fileSystem fs.FS, dir fs.ReadDirFile, root, urlPath string, repl *caddy.Replacer) (*browseTemplateContext, error) { // modTime for the directory itself stat, err := dir.Stat() if err != nil { return nil, err } dirLimit := defaultDirEntryLimit if fsrv.Browse.FileLimit != 0 { dirLimit = fsrv.Browse.FileLimit } files, err := dir.ReadDir(dirLimit) if err != nil && err != io.EOF { return nil, err } // user can presumably browse "up" to parent folder if path is longer than "/" canGoUp := len(urlPath) > 1 return fsrv.directoryListing(ctx, fileSystem, stat.ModTime(), files, canGoUp, root, urlPath, repl), nil } // browseApplyQueryParams applies query parameters to the listing. // It mutates the listing and may set cookies. func (fsrv *FileServer) browseApplyQueryParams(w http.ResponseWriter, r *http.Request, listing *browseTemplateContext) { var orderParam, sortParam string // The configs in Caddyfile have lower priority than Query params, // so put it at first. for idx, item := range fsrv.Browse.SortOptions { // Only `sort` & `order`, 2 params are allowed if idx >= 2 { break } switch item { case sortByName, sortByNameDirFirst, sortBySize, sortByTime: sortParam = item case sortOrderAsc, sortOrderDesc: orderParam = item } } layoutParam := r.URL.Query().Get("layout") limitParam := r.URL.Query().Get("limit") offsetParam := r.URL.Query().Get("offset") sortParamTmp := r.URL.Query().Get("sort") if sortParamTmp != "" { sortParam = sortParamTmp } orderParamTmp := r.URL.Query().Get("order") if orderParamTmp != "" { orderParam = orderParamTmp } switch layoutParam { case "list", "grid", "": listing.Layout = layoutParam default: listing.Layout = "list" } // figure out what to sort by switch sortParam { case "": sortParam = sortByNameDirFirst if sortCookie, sortErr := r.Cookie("sort"); sortErr == nil { sortParam = sortCookie.Value } case sortByName, sortByNameDirFirst, sortBySize, sortByTime: http.SetCookie(w, &http.Cookie{Name: "sort", Value: sortParam, Secure: r.TLS != nil}) } // then figure out the order switch orderParam { case "": orderParam = sortOrderAsc if orderCookie, orderErr := r.Cookie("order"); orderErr == nil { orderParam = orderCookie.Value } case sortOrderAsc, sortOrderDesc: http.SetCookie(w, &http.Cookie{Name: "order", Value: orderParam, Secure: r.TLS != nil}) } // finally, apply the sorting and limiting listing.applySortAndLimit(sortParam, orderParam, limitParam, offsetParam) } // makeBrowseTemplate creates the template to be used for directory listings. func (fsrv *FileServer) makeBrowseTemplate(tplCtx *templateContext) (*template.Template, error) { var tpl *template.Template var err error if fsrv.Browse.TemplateFile != "" { tpl = tplCtx.NewTemplate(path.Base(fsrv.Browse.TemplateFile)) tpl, err = tpl.ParseFiles(fsrv.Browse.TemplateFile) if err != nil { return nil, fmt.Errorf("parsing browse template file: %v", err) } } else { tpl = tplCtx.NewTemplate("default_listing") tpl, err = tpl.Parse(BrowseTemplate) if err != nil { return nil, fmt.Errorf("parsing default browse template: %v", err) } } return tpl, nil } // isSymlinkTargetDir returns true if f's symbolic link target // is a directory. func (fsrv *FileServer) isSymlinkTargetDir(fileSystem fs.FS, f fs.FileInfo, root, urlPath string) bool { if !isSymlink(f) { return false } target := caddyhttp.SanitizedPathJoin(root, path.Join(urlPath, f.Name())) targetInfo, err := fs.Stat(fileSystem, target) if err != nil { return false } return targetInfo.IsDir() } // isSymlink return true if f is a symbolic link. func isSymlink(f fs.FileInfo) bool { return f.Mode()&os.ModeSymlink != 0 } // templateContext powers the context used when evaluating the browse template. // It combines browse-specific features with the standard templates handler // features. type templateContext struct { templates.TemplateContext *browseTemplateContext } // bufPool is used to increase the efficiency of file listings. var bufPool = sync.Pool{ New: func() any { return new(bytes.Buffer) }, } ================================================ FILE: modules/caddyhttp/fileserver/browse.html ================================================ {{ $nonce := uuidv4 -}} {{ $nonceAttribute := print "nonce=" (quote $nonce) -}} {{ $csp := printf "default-src 'none'; img-src 'self'; object-src 'none'; base-uri 'none'; script-src 'nonce-%s'; style-src 'nonce-%s'; frame-ancestors 'self'; form-action 'self';" $nonce $nonce -}} {{/* To disable the Content-Security-Policy, set this to false */}}{{ $enableCsp := true -}} {{ if $enableCsp -}} {{- .RespHeader.Set "Content-Security-Policy" $csp -}} {{ end -}} {{- define "icon"}} {{- if .IsDir}} {{- if .IsSymlink}} {{- else}} {{- end}} {{- else if or (eq .Name "LICENSE") (eq .Name "README")}} {{- else if .HasExt ".jpg" ".jpeg" ".png" ".gif" ".webp" ".tiff" ".bmp" ".heif" ".heic" ".svg" ".avif"}} {{- if eq .Tpl.Layout "grid"}} {{- else}} {{- end}} {{- else if .HasExt ".mp4" ".mov" ".m4v" ".mpeg" ".mpg" ".avi" ".ogg" ".webm" ".mkv" ".vob" ".gifv" ".3gp"}} {{- else if .HasExt ".mp3" ".m4a" ".aac" ".ogg" ".flac" ".wav" ".wma" ".midi" ".cda"}} {{- else if .HasExt ".pdf"}} {{- else if .HasExt ".csv" ".tsv"}} {{- else if .HasExt ".txt" ".doc" ".docx" ".odt" ".fodt" ".rtf"}} {{- else if .HasExt ".xls" ".xlsx" ".ods" ".fods"}} {{- else if .HasExt ".ppt" ".pptx" ".odp" ".fodp"}} {{- else if .HasExt ".zip" ".gz" ".xz" ".tar" ".7z" ".rar" ".xz" ".zst"}} {{- else if .HasExt ".deb" ".dpkg"}} {{- else if .HasExt ".rpm" ".exe" ".flatpak" ".appimage" ".jar" ".msi" ".apk"}} {{- else if .HasExt ".ps1"}} {{- else if .HasExt ".py" ".pyc" ".pyo"}} {{- else if .HasExt ".bash" ".sh" ".com" ".bat" ".dll" ".so"}} {{- else if .HasExt ".dmg"}} {{- else if .HasExt ".iso" ".img"}} {{- else if .HasExt ".md" ".mdown" ".markdown"}} {{- else if .HasExt ".ttf" ".otf" ".woff" ".woff2" ".eof"}} {{- else if .HasExt ".go"}} {{- else if .HasExt ".html" ".htm"}} {{- else if .HasExt ".js"}} {{- else if .HasExt ".css"}} {{- else if .HasExt ".json" ".json5" ".jsonc"}} {{- else if .HasExt ".ts"}} {{- else if .HasExt ".sql"}} {{- else if .HasExt ".db" ".sqlite" ".bak" ".mdb"}} {{- else if .HasExt ".eml" ".email" ".mailbox" ".mbox" ".msg"}} {{- else if .HasExt ".crt" ".pem" ".x509" ".cer" ".ca-bundle"}} {{- else if .HasExt ".key" ".keystore" ".jks" ".p12" ".pfx" ".pub"}} {{- else}} {{- if .IsSymlink}} {{- else}} {{- end}} {{- end}} {{- end}} {{html .Name}} {{- if eq .Layout "grid"}} {{- end}}

{{range $i, $crumb := .Breadcrumbs}}{{html $crumb.Text}}{{if ne $i 0}}/{{end}}{{end}}

{{.NumDirs}} director{{if eq 1 .NumDirs}}y{{else}}ies{{end}} {{.NumFiles}} file{{if ne 1 .NumFiles}}s{{end}} {{.HumanTotalFileSize}} total {{- if ne 0 .Limit}} (of which only {{.Limit}} are displayed) {{- end}}
List Grid {{- if and (eq .Layout "grid") (eq .Sort "name") (ne .Order "asc")}} Z A {{- else if and (eq .Layout "grid") (eq .Sort "name") (ne .Order "desc")}} A Z {{- else if and (eq .Layout "grid")}} A Z {{- end}} {{- if and (eq .Layout "grid") (eq .Sort "size") (ne .Order "asc")}} {{- else if and (eq .Layout "grid") (eq .Sort "size") (ne .Order "desc")}} {{- else if and (eq .Layout "grid")}} {{- end}} {{- if and (eq .Layout "grid") (eq .Sort "time") (ne .Order "asc")}} {{- else if and (eq .Layout "grid") (eq .Sort "time") (ne .Order "desc")}} {{- else if and (eq .Layout "grid")}} {{- end}}
{{- if eq .Layout "grid"}} {{- range .Items}}
{{template "icon" .}}
{{html .Name}}
{{.HumanSize}}
{{- end}} {{- else}} {{- if .CanGoUp}} {{- end}} {{- range .Items}} {{- if .IsDir}} {{- else}} {{- end}} {{- end}}
{{- if and (eq .Sort "namedirfirst") (ne .Order "desc")}} {{- else if and (eq .Sort "namedirfirst") (ne .Order "asc")}} {{- else}} {{- end}} {{- if and (eq .Sort "name") (ne .Order "desc")}} Name {{- else if and (eq .Sort "name") (ne .Order "asc")}} Name {{- else}} Name {{- end}}
{{- if and (eq .Sort "size") (ne .Order "desc")}} Size {{- else if and (eq .Sort "size") (ne .Order "asc")}} Size {{- else}} Size {{- end}} {{- if and (eq .Sort "time") (ne .Order "desc")}} Modified {{- else if and (eq .Sort "time") (ne .Order "asc")}} Modified {{- else}} Modified {{- end}}
Up
{{template "icon" .}} {{- if not .SymlinkPath}} {{html .Name}} {{- else}} {{html .Name}} {{html .SymlinkPath}} {{- end}}
{{if .IsSymlink}}↱ {{end}}{{.HumanSize}}
{{- end}}
Served with
================================================ FILE: modules/caddyhttp/fileserver/browsetplcontext.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fileserver import ( "context" "io/fs" "net/url" "os" "path" "path/filepath" "slices" "sort" "strconv" "strings" "time" "github.com/dustin/go-humanize" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func (fsrv *FileServer) directoryListing(ctx context.Context, fileSystem fs.FS, parentModTime time.Time, entries []fs.DirEntry, canGoUp bool, root, urlPath string, repl *caddy.Replacer) *browseTemplateContext { filesToHide := fsrv.transformHidePaths(repl) name, _ := url.PathUnescape(urlPath) tplCtx := &browseTemplateContext{ Name: path.Base(name), Path: urlPath, CanGoUp: canGoUp, lastModified: parentModTime, } for _, entry := range entries { if err := ctx.Err(); err != nil { break } name := entry.Name() if fileHidden(name, filesToHide) { continue } info, err := entry.Info() if err != nil { if c := fsrv.logger.Check(zapcore.ErrorLevel, "could not get info about directory entry"); c != nil { c.Write(zap.String("name", entry.Name()), zap.String("root", root)) } continue } // keep track of the most recently modified item in the listing modTime := info.ModTime() if tplCtx.lastModified.IsZero() || modTime.After(tplCtx.lastModified) { tplCtx.lastModified = modTime } isDir := entry.IsDir() || fsrv.isSymlinkTargetDir(fileSystem, info, root, urlPath) // add the slash after the escape of path to avoid escaping the slash as well if isDir { name += "/" tplCtx.NumDirs++ } else { tplCtx.NumFiles++ } size := info.Size() if !isDir { // increase the total by the symlink's size, not the target's size, // by incrementing before we follow the symlink tplCtx.TotalFileSize += size } fileIsSymlink := isSymlink(info) symlinkPath := "" if fileIsSymlink { path := caddyhttp.SanitizedPathJoin(root, path.Join(urlPath, info.Name())) fileInfo, err := fs.Stat(fileSystem, path) if err == nil { size = fileInfo.Size() } if fsrv.Browse.RevealSymlinks { symLinkTarget, err := filepath.EvalSymlinks(path) if err == nil { symlinkPath = symLinkTarget } } // An error most likely means the symlink target doesn't exist, // which isn't entirely unusual and shouldn't fail the listing. // In this case, just use the size of the symlink itself, which // was already set above. } if !isDir { // increase the total including the symlink target's size tplCtx.TotalFileSizeFollowingSymlinks += size } u := url.URL{Path: "./" + name} // prepend with "./" to fix paths with ':' in the name tplCtx.Items = append(tplCtx.Items, fileInfo{ IsDir: isDir, IsSymlink: fileIsSymlink, Name: name, Size: size, URL: u.String(), ModTime: modTime.UTC(), Mode: info.Mode(), Tpl: tplCtx, // a reference up to the template context is useful SymlinkPath: symlinkPath, }) } // this time is used for the Last-Modified header and comparing If-Modified-Since from client // both are expected to be in UTC, so we convert to UTC here // see: https://github.com/caddyserver/caddy/issues/6828 tplCtx.lastModified = tplCtx.lastModified.UTC() return tplCtx } // browseTemplateContext provides the template context for directory listings. type browseTemplateContext struct { // The name of the directory (the last element of the path). Name string `json:"name"` // The full path of the request. Path string `json:"path"` // Whether the parent directory is browsable. CanGoUp bool `json:"can_go_up"` // The items (files and folders) in the path. Items []fileInfo `json:"items,omitempty"` // If ≠0 then Items starting from that many elements. Offset int `json:"offset,omitempty"` // If ≠0 then Items have been limited to that many elements. Limit int `json:"limit,omitempty"` // The number of directories in the listing. NumDirs int `json:"num_dirs"` // The number of files (items that aren't directories) in the listing. NumFiles int `json:"num_files"` // The total size of all files in the listing. Only includes the // size of the files themselves, not the size of symlink targets // (i.e. the calculation of this value does not follow symlinks). TotalFileSize int64 `json:"total_file_size"` // The total size of all files in the listing, including the // size of the files targeted by symlinks. TotalFileSizeFollowingSymlinks int64 `json:"total_file_size_following_symlinks"` // Sort column used Sort string `json:"sort,omitempty"` // Sorting order Order string `json:"order,omitempty"` // Display format (list or grid) Layout string `json:"layout,omitempty"` // The most recent file modification date in the listing. // Used for HTTP header purposes. lastModified time.Time } // Breadcrumbs returns l.Path where every element maps // the link to the text to display. func (l browseTemplateContext) Breadcrumbs() []crumb { if len(l.Path) == 0 { return []crumb{} } // skip trailing slash lpath := l.Path if lpath[len(lpath)-1] == '/' { lpath = lpath[:len(lpath)-1] } parts := strings.Split(lpath, "/") result := make([]crumb, len(parts)) for i, p := range parts { if i == 0 && p == "" { p = "/" } // the directory name could include an encoded slash in its path, // so the item name should be unescaped in the loop rather than unescaping the // entire path outside the loop. p, _ = url.PathUnescape(p) lnk := strings.Repeat("../", len(parts)-i-1) result[i] = crumb{Link: lnk, Text: p} } return result } func (l *browseTemplateContext) applySortAndLimit(sortParam, orderParam, limitParam string, offsetParam string) { l.Sort = sortParam l.Order = orderParam if l.Order == "desc" { switch l.Sort { case sortByName: sort.Sort(sort.Reverse(byName(*l))) case sortByNameDirFirst: sort.Sort(sort.Reverse(byNameDirFirst(*l))) case sortBySize: sort.Sort(sort.Reverse(bySize(*l))) case sortByTime: sort.Sort(sort.Reverse(byTime(*l))) } } else { switch l.Sort { case sortByName: sort.Sort(byName(*l)) case sortByNameDirFirst: sort.Sort(byNameDirFirst(*l)) case sortBySize: sort.Sort(bySize(*l)) case sortByTime: sort.Sort(byTime(*l)) } } if offsetParam != "" { offset, _ := strconv.Atoi(offsetParam) if offset > 0 && offset <= len(l.Items) { l.Items = l.Items[offset:] l.Offset = offset } } if limitParam != "" { limit, _ := strconv.Atoi(limitParam) if limit > 0 && limit <= len(l.Items) { l.Items = l.Items[:limit] l.Limit = limit } } } // crumb represents part of a breadcrumb menu, // pairing a link with the text to display. type crumb struct { Link, Text string } // fileInfo contains serializable information // about a file or directory. type fileInfo struct { Name string `json:"name"` Size int64 `json:"size"` URL string `json:"url"` ModTime time.Time `json:"mod_time"` Mode os.FileMode `json:"mode"` IsDir bool `json:"is_dir"` IsSymlink bool `json:"is_symlink"` SymlinkPath string `json:"symlink_path,omitempty"` // a pointer to the template context is useful inside nested templates Tpl *browseTemplateContext `json:"-"` } // HasExt returns true if the filename has any of the given suffixes, case-insensitive. func (fi fileInfo) HasExt(exts ...string) bool { return slices.ContainsFunc(exts, func(ext string) bool { return strings.HasSuffix(strings.ToLower(fi.Name), strings.ToLower(ext)) }) } // HumanSize returns the size of the file as a // human-readable string in IEC format (i.e. // power of 2 or base 1024). func (fi fileInfo) HumanSize() string { return humanize.IBytes(uint64(fi.Size)) } // HumanTotalFileSize returns the total size of all files // in the listing as a human-readable string in IEC format // (i.e. power of 2 or base 1024). func (btc browseTemplateContext) HumanTotalFileSize() string { return humanize.IBytes(uint64(btc.TotalFileSize)) } // HumanTotalFileSizeFollowingSymlinks is the same as HumanTotalFileSize // except the returned value reflects the size of symlink targets. func (btc browseTemplateContext) HumanTotalFileSizeFollowingSymlinks() string { return humanize.IBytes(uint64(btc.TotalFileSizeFollowingSymlinks)) } // HumanModTime returns the modified time of the file // as a human-readable string given by format. func (fi fileInfo) HumanModTime(format string) string { return fi.ModTime.Format(format) } type ( byName browseTemplateContext byNameDirFirst browseTemplateContext bySize browseTemplateContext byTime browseTemplateContext ) func (l byName) Len() int { return len(l.Items) } func (l byName) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] } func (l byName) Less(i, j int) bool { return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name) } func (l byNameDirFirst) Len() int { return len(l.Items) } func (l byNameDirFirst) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] } func (l byNameDirFirst) Less(i, j int) bool { // sort by name if both are dir or file if l.Items[i].IsDir == l.Items[j].IsDir { return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name) } // sort dir ahead of file return l.Items[i].IsDir } func (l bySize) Len() int { return len(l.Items) } func (l bySize) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] } func (l bySize) Less(i, j int) bool { const directoryOffset = -1 << 31 // = -math.MinInt32 iSize, jSize := l.Items[i].Size, l.Items[j].Size // directory sizes depend on the file system; to // provide a consistent experience, put them up front // and sort them by name if l.Items[i].IsDir { iSize = directoryOffset } if l.Items[j].IsDir { jSize = directoryOffset } if l.Items[i].IsDir && l.Items[j].IsDir { return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name) } return iSize < jSize } func (l byTime) Len() int { return len(l.Items) } func (l byTime) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] } func (l byTime) Less(i, j int) bool { return l.Items[i].ModTime.Before(l.Items[j].ModTime) } const ( sortByName = "name" sortByNameDirFirst = "namedirfirst" sortBySize = "size" sortByTime = "time" sortOrderAsc = "asc" sortOrderDesc = "desc" ) ================================================ FILE: modules/caddyhttp/fileserver/browsetplcontext_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fileserver import ( "testing" ) func TestBreadcrumbs(t *testing.T) { testdata := []struct { path string expected []crumb }{ {"", []crumb{}}, {"/", []crumb{{Text: "/"}}}, {"/foo/", []crumb{ {Link: "../", Text: "/"}, {Link: "", Text: "foo"}, }}, {"/foo/bar/", []crumb{ {Link: "../../", Text: "/"}, {Link: "../", Text: "foo"}, {Link: "", Text: "bar"}, }}, {"/foo bar/", []crumb{ {Link: "../", Text: "/"}, {Link: "", Text: "foo bar"}, }}, {"/foo bar/baz/", []crumb{ {Link: "../../", Text: "/"}, {Link: "../", Text: "foo bar"}, {Link: "", Text: "baz"}, }}, {"/100%25 test coverage/is a lie/", []crumb{ {Link: "../../", Text: "/"}, {Link: "../", Text: "100% test coverage"}, {Link: "", Text: "is a lie"}, }}, {"/AC%2FDC/", []crumb{ {Link: "../", Text: "/"}, {Link: "", Text: "AC/DC"}, }}, {"/foo/%2e%2e%2f/bar", []crumb{ {Link: "../../../", Text: "/"}, {Link: "../../", Text: "foo"}, {Link: "../", Text: "../"}, {Link: "", Text: "bar"}, }}, {"/foo/../bar", []crumb{ {Link: "../../../", Text: "/"}, {Link: "../../", Text: "foo"}, {Link: "../", Text: ".."}, {Link: "", Text: "bar"}, }}, {"foo/bar/baz", []crumb{ {Link: "../../", Text: "foo"}, {Link: "../", Text: "bar"}, {Link: "", Text: "baz"}, }}, {"/qux/quux/corge/", []crumb{ {Link: "../../../", Text: "/"}, {Link: "../../", Text: "qux"}, {Link: "../", Text: "quux"}, {Link: "", Text: "corge"}, }}, {"/مجلد/", []crumb{ {Link: "../", Text: "/"}, {Link: "", Text: "مجلد"}, }}, {"/مجلد-1/مجلد-2", []crumb{ {Link: "../../", Text: "/"}, {Link: "../", Text: "مجلد-1"}, {Link: "", Text: "مجلد-2"}, }}, {"/مجلد%2F1", []crumb{ {Link: "../", Text: "/"}, {Link: "", Text: "مجلد/1"}, }}, } for testNum, d := range testdata { l := browseTemplateContext{Path: d.path} actual := l.Breadcrumbs() if len(actual) != len(d.expected) { t.Errorf("Test %d: Got %d components but expected %d; got: %+v", testNum, len(actual), len(d.expected), actual) continue } for i, c := range actual { if c != d.expected[i] { t.Errorf("Test %d crumb %d: got %#v but expected %#v at index %d", testNum, i, c, d.expected[i], i) } } } } ================================================ FILE: modules/caddyhttp/fileserver/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fileserver import ( "path/filepath" "strconv" "strings" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode" "github.com/caddyserver/caddy/v2/modules/caddyhttp/rewrite" ) func init() { httpcaddyfile.RegisterHandlerDirective("file_server", parseCaddyfile) httpcaddyfile.RegisterDirective("try_files", parseTryFiles) } // parseCaddyfile parses the file_server directive. // See UnmarshalCaddyfile for the syntax. func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { fsrv := new(FileServer) err := fsrv.UnmarshalCaddyfile(h.Dispenser) if err != nil { return fsrv, err } err = fsrv.FinalizeUnmarshalCaddyfile(h) if err != nil { return nil, err } return fsrv, err } // UnmarshalCaddyfile parses the file_server directive. It enables // the static file server and configures it with this syntax: // // file_server [] [browse] { // fs // root // hide // index // browse [] // precompressed // status // disable_canonical_uris // } // // The FinalizeUnmarshalCaddyfile method should be called after this // to finalize setup of hidden Caddyfiles. func (fsrv *FileServer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume directive name args := d.RemainingArgs() switch len(args) { case 0: case 1: if args[0] != "browse" { return d.ArgErr() } fsrv.Browse = new(Browse) default: return d.ArgErr() } for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "fs": if !d.NextArg() { return d.ArgErr() } if fsrv.FileSystem != "" { return d.Err("file system already specified") } fsrv.FileSystem = d.Val() case "hide": fsrv.Hide = d.RemainingArgs() if len(fsrv.Hide) == 0 { return d.ArgErr() } case "index": fsrv.IndexNames = d.RemainingArgs() if len(fsrv.IndexNames) == 0 { return d.ArgErr() } case "root": if !d.Args(&fsrv.Root) { return d.ArgErr() } case "browse": if fsrv.Browse != nil { return d.Err("browsing is already configured") } fsrv.Browse = new(Browse) d.Args(&fsrv.Browse.TemplateFile) for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "reveal_symlinks": if fsrv.Browse.RevealSymlinks { return d.Err("Symlinks path reveal is already enabled") } fsrv.Browse.RevealSymlinks = true case "sort": for d.NextArg() { dVal := d.Val() switch dVal { case sortByName, sortByNameDirFirst, sortBySize, sortByTime, sortOrderAsc, sortOrderDesc: fsrv.Browse.SortOptions = append(fsrv.Browse.SortOptions, dVal) default: return d.Errf("unknown sort option '%s'", dVal) } } case "file_limit": fileLimit := d.RemainingArgs() if len(fileLimit) != 1 { return d.Err("file_limit should have an integer value") } val, _ := strconv.Atoi(fileLimit[0]) if fsrv.Browse.FileLimit != 0 { return d.Err("file_limit is already enabled") } fsrv.Browse.FileLimit = val default: return d.Errf("unknown subdirective '%s'", d.Val()) } } case "precompressed": fsrv.PrecompressedOrder = d.RemainingArgs() if len(fsrv.PrecompressedOrder) == 0 { fsrv.PrecompressedOrder = []string{"br", "zstd", "gzip"} } for _, format := range fsrv.PrecompressedOrder { modID := "http.precompressed." + format mod, err := caddy.GetModule(modID) if err != nil { return d.Errf("getting module named '%s': %v", modID, err) } inst := mod.New() precompress, ok := inst.(encode.Precompressed) if !ok { return d.Errf("module %s is not a precompressor; is %T", modID, inst) } if fsrv.PrecompressedRaw == nil { fsrv.PrecompressedRaw = make(caddy.ModuleMap) } fsrv.PrecompressedRaw[format] = caddyconfig.JSON(precompress, nil) } case "status": if !d.NextArg() { return d.ArgErr() } fsrv.StatusCode = caddyhttp.WeakString(d.Val()) case "disable_canonical_uris": if d.NextArg() { return d.ArgErr() } falseBool := false fsrv.CanonicalURIs = &falseBool case "pass_thru": if d.NextArg() { return d.ArgErr() } fsrv.PassThru = true case "etag_file_extensions": etagFileExtensions := d.RemainingArgs() if len(etagFileExtensions) == 0 { return d.ArgErr() } fsrv.EtagFileExtensions = etagFileExtensions default: return d.Errf("unknown subdirective '%s'", d.Val()) } } return nil } // FinalizeUnmarshalCaddyfile finalizes the Caddyfile parsing which // requires having an httpcaddyfile.Helper to function, to setup hidden Caddyfiles. func (fsrv *FileServer) FinalizeUnmarshalCaddyfile(h httpcaddyfile.Helper) error { // Hide the Caddyfile (and any imported Caddyfiles). // This needs to be done in here instead of UnmarshalCaddyfile // because UnmarshalCaddyfile only has access to the dispenser // and not the helper, and only the helper has access to the // Caddyfiles function. if configFiles := h.Caddyfiles(); len(configFiles) > 0 { for _, file := range configFiles { file = filepath.Clean(file) if !fileHidden(file, fsrv.Hide) { // if there's no path separator, the file server module will hide all // files by that name, rather than a specific one; but we want to hide // only this specific file, so ensure there's always a path separator if !strings.Contains(file, separator) { file = "." + separator + file } fsrv.Hide = append(fsrv.Hide, file) } } } return nil } // parseTryFiles parses the try_files directive. It combines a file matcher // with a rewrite directive, so this is not a standard handler directive. // A try_files directive has this syntax (notice no matcher tokens accepted): // // try_files { // policy first_exist|smallest_size|largest_size|most_recently_modified // } // // and is basically shorthand for: // // @try_files file { // try_files // policy first_exist|smallest_size|largest_size|most_recently_modified // } // rewrite @try_files {http.matchers.file.relative} // // This directive rewrites request paths only, preserving any other part // of the URI, unless the part is explicitly given in the file list. For // example, if any of the files in the list have a query string: // // try_files {path} index.php?{query}&p={path} // // then the query string will not be treated as part of the file name; and // if that file matches, the given query string will replace any query string // that already exists on the request URI. func parseTryFiles(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) { if !h.Next() { return nil, h.ArgErr() } tryFiles := h.RemainingArgs() if len(tryFiles) == 0 { return nil, h.ArgErr() } // parse out the optional try policy var tryPolicy string for h.NextBlock(0) { switch h.Val() { case "policy": if tryPolicy != "" { return nil, h.Err("try policy already configured") } if !h.NextArg() { return nil, h.ArgErr() } tryPolicy = h.Val() switch tryPolicy { case tryPolicyFirstExist, tryPolicyFirstExistFallback, tryPolicyLargestSize, tryPolicySmallestSize, tryPolicyMostRecentlyMod: default: return nil, h.Errf("unrecognized try policy: %s", tryPolicy) } } } // makeRoute returns a route that tries the files listed in try // and then rewrites to the matched file; userQueryString is // appended to the rewrite rule. makeRoute := func(try []string, userQueryString string) []httpcaddyfile.ConfigValue { handler := rewrite.Rewrite{ URI: "{http.matchers.file.relative}" + userQueryString, } matcherSet := caddy.ModuleMap{ "file": h.JSON(MatchFile{TryFiles: try, TryPolicy: tryPolicy}), } return h.NewRoute(matcherSet, handler) } var result []httpcaddyfile.ConfigValue // if there are query strings in the list, we have to split into // a separate route for each item with a query string, because // the rewrite is different for that item try := make([]string, 0, len(tryFiles)) for _, item := range tryFiles { if idx := strings.Index(item, "?"); idx >= 0 { if len(try) > 0 { result = append(result, makeRoute(try, "")...) try = []string{} } result = append(result, makeRoute([]string{item[:idx]}, item[idx:])...) continue } // accumulate consecutive non-query-string parameters try = append(try, item) } if len(try) > 0 { result = append(result, makeRoute(try, "")...) } // ensure that multiple routes (possible if rewrite targets // have query strings, for example) are grouped together // so only the first matching rewrite is performed (#2891) h.GroupRoutes(result) return result, nil } var _ caddyfile.Unmarshaler = (*FileServer)(nil) ================================================ FILE: modules/caddyhttp/fileserver/command.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fileserver import ( "encoding/json" "fmt" "io" "log" "os" "strconv" "time" "github.com/caddyserver/certmagic" "github.com/spf13/cobra" "go.uber.org/zap" caddycmd "github.com/caddyserver/caddy/v2/cmd" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode" caddytpl "github.com/caddyserver/caddy/v2/modules/caddyhttp/templates" ) func init() { caddycmd.RegisterCommand(caddycmd.Command{ Name: "file-server", Usage: "[--domain ] [--root ] [--listen ] [--browse] [--reveal-symlinks] [--access-log] [--precompressed]", Short: "Spins up a production-ready file server", Long: ` A simple but production-ready file server. Useful for quick deployments, demos, and development. The listener's socket address can be customized with the --listen flag. If a domain name is specified with --domain, the default listener address will be changed to the HTTPS port and the server will use HTTPS. If using a public domain, ensure A/AAAA records are properly configured before using this option. By default, Zstandard and Gzip compression are enabled. Use --no-compress to disable compression. If --browse is enabled, requests for folders without an index file will respond with a file listing.`, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringP("domain", "d", "", "Domain name at which to serve the files") cmd.Flags().StringP("root", "r", "", "The path to the root of the site") cmd.Flags().StringP("listen", "l", "", "The address to which to bind the listener") cmd.Flags().BoolP("browse", "b", false, "Enable directory browsing") cmd.Flags().BoolP("reveal-symlinks", "", false, "Show symlink paths when browse is enabled.") cmd.Flags().BoolP("templates", "t", false, "Enable template rendering") cmd.Flags().BoolP("access-log", "a", false, "Enable the access log") cmd.Flags().BoolP("debug", "v", false, "Enable verbose debug logs") cmd.Flags().IntP("file-limit", "f", defaultDirEntryLimit, "Max directories to read") cmd.Flags().BoolP("no-compress", "", false, "Disable Zstandard and Gzip compression") cmd.Flags().StringSliceP("precompressed", "p", []string{}, "Specify precompression file extensions. Compression preference implied from flag order.") cmd.RunE = caddycmd.WrapCommandFuncForCobra(cmdFileServer) cmd.AddCommand(&cobra.Command{ Use: "export-template", Short: "Exports the default file browser template", Example: "caddy file-server export-template > browse.html", RunE: func(cmd *cobra.Command, args []string) error { _, err := io.WriteString(os.Stdout, BrowseTemplate) return err }, }) }, }) } func cmdFileServer(fs caddycmd.Flags) (int, error) { caddy.TrapSignals() domain := fs.String("domain") root := fs.String("root") listen := fs.String("listen") browse := fs.Bool("browse") templates := fs.Bool("templates") accessLog := fs.Bool("access-log") fileLimit := fs.Int("file-limit") debug := fs.Bool("debug") revealSymlinks := fs.Bool("reveal-symlinks") compress := !fs.Bool("no-compress") precompressed, err := fs.GetStringSlice("precompressed") if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid precompressed flag: %v", err) } var handlers []json.RawMessage if compress { zstd, err := caddy.GetModule("http.encoders.zstd") if err != nil { return caddy.ExitCodeFailedStartup, err } gzip, err := caddy.GetModule("http.encoders.gzip") if err != nil { return caddy.ExitCodeFailedStartup, err } handlers = append(handlers, caddyconfig.JSONModuleObject(encode.Encode{ EncodingsRaw: caddy.ModuleMap{ "zstd": caddyconfig.JSON(zstd.New(), nil), "gzip": caddyconfig.JSON(gzip.New(), nil), }, Prefer: []string{"zstd", "gzip"}, }, "handler", "encode", nil)) } if templates { handler := caddytpl.Templates{FileRoot: root} handlers = append(handlers, caddyconfig.JSONModuleObject(handler, "handler", "templates", nil)) } handler := FileServer{Root: root} if len(precompressed) != 0 { // logic mirrors modules/caddyhttp/fileserver/caddyfile.go case "precompressed" var order []string for _, compression := range precompressed { modID := "http.precompressed." + compression mod, err := caddy.GetModule(modID) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("getting module named '%s': %v", modID, err) } inst := mod.New() precompress, ok := inst.(encode.Precompressed) if !ok { return caddy.ExitCodeFailedStartup, fmt.Errorf("module %s is not a precompressor; is %T", modID, inst) } if handler.PrecompressedRaw == nil { handler.PrecompressedRaw = make(caddy.ModuleMap) } handler.PrecompressedRaw[compression] = caddyconfig.JSON(precompress, nil) order = append(order, compression) } handler.PrecompressedOrder = order } if browse { handler.Browse = &Browse{RevealSymlinks: revealSymlinks, FileLimit: fileLimit} } handlers = append(handlers, caddyconfig.JSONModuleObject(handler, "handler", "file_server", nil)) route := caddyhttp.Route{HandlersRaw: handlers} if domain != "" { route.MatcherSetsRaw = []caddy.ModuleMap{ { "host": caddyconfig.JSON(caddyhttp.MatchHost{domain}, nil), }, } } server := &caddyhttp.Server{ ReadHeaderTimeout: caddy.Duration(10 * time.Second), IdleTimeout: caddy.Duration(30 * time.Second), MaxHeaderBytes: 1024 * 10, Routes: caddyhttp.RouteList{route}, } if listen == "" { if domain == "" { listen = ":80" } else { listen = ":" + strconv.Itoa(certmagic.HTTPSPort) } } server.Listen = []string{listen} if accessLog { server.Logs = &caddyhttp.ServerLogConfig{} } httpApp := caddyhttp.App{ Servers: map[string]*caddyhttp.Server{"static": server}, } var false bool cfg := &caddy.Config{ Admin: &caddy.AdminConfig{ Disabled: true, Config: &caddy.ConfigSettings{ Persist: &false, }, }, AppsRaw: caddy.ModuleMap{ "http": caddyconfig.JSON(httpApp, nil), }, } if debug { cfg.Logging = &caddy.Logging{ Logs: map[string]*caddy.CustomLog{ "default": { BaseLog: caddy.BaseLog{Level: zap.DebugLevel.CapitalString()}, }, }, } } err = caddy.Run(cfg) if err != nil { return caddy.ExitCodeFailedStartup, err } log.Printf("Caddy serving static files on %s", listen) select {} } ================================================ FILE: modules/caddyhttp/fileserver/matcher.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fileserver import ( "fmt" "io/fs" "net/http" "os" "path" "path/filepath" "runtime" "strconv" "strings" "github.com/google/cel-go/cel" "github.com/google/cel-go/common" "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/parser" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(MatchFile{}) } // MatchFile is an HTTP request matcher that can match // requests based upon file existence. // // Upon matching, three new placeholders will be made // available: // // - `{http.matchers.file.relative}` The root-relative // path of the file. This is often useful when rewriting // requests. // - `{http.matchers.file.absolute}` The absolute path // of the matched file. // - `{http.matchers.file.type}` Set to "directory" if // the matched file is a directory, "file" otherwise. // - `{http.matchers.file.remainder}` Set to the remainder // of the path if the path was split by `split_path`. // // Even though file matching may depend on the OS path // separator, the placeholder values always use /. type MatchFile struct { // The file system implementation to use. By default, the // local disk file system will be used. FileSystem string `json:"fs,omitempty"` // The root directory, used for creating absolute // file paths, and required when working with // relative paths; if not specified, `{http.vars.root}` // will be used, if set; otherwise, the current // directory is assumed. Accepts placeholders. Root string `json:"root,omitempty"` // The list of files to try. Each path here is // considered related to Root. If nil, the request // URL's path will be assumed. Files and // directories are treated distinctly, so to match // a directory, the filepath MUST end in a forward // slash `/`. To match a regular file, there must // be no trailing slash. Accepts placeholders. If // the policy is "first_exist", then an error may // be triggered as a fallback by configuring "=" // followed by a status code number, // for example "=404". TryFiles []string `json:"try_files,omitempty"` // How to choose a file in TryFiles. Can be: // // - first_exist // - first_exist_fallback // - smallest_size // - largest_size // - most_recently_modified // // Default is first_exist. TryPolicy string `json:"try_policy,omitempty"` // A list of delimiters to use to split the path in two // when trying files. If empty, no splitting will // occur, and the path will be tried as-is. For each // split value, the left-hand side of the split, // including the split value, will be the path tried. // For example, the path `/remote.php/dav/` using the // split value `.php` would try the file `/remote.php`. // Each delimiter must appear at the end of a URI path // component in order to be used as a split delimiter. SplitPath []string `json:"split_path,omitempty"` fsmap caddy.FileSystems logger *zap.Logger } // CaddyModule returns the Caddy module information. func (MatchFile) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.file", New: func() caddy.Module { return new(MatchFile) }, } } // UnmarshalCaddyfile sets up the matcher from Caddyfile tokens. Syntax: // // file { // root // try_files // try_policy first_exist|smallest_size|largest_size|most_recently_modified // } func (m *MatchFile) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { // iterate to merge multiple matchers into one for d.Next() { m.TryFiles = append(m.TryFiles, d.RemainingArgs()...) for d.NextBlock(0) { switch d.Val() { case "root": if !d.NextArg() { return d.ArgErr() } m.Root = d.Val() case "try_files": m.TryFiles = append(m.TryFiles, d.RemainingArgs()...) if len(m.TryFiles) == 0 { return d.ArgErr() } case "try_policy": if !d.NextArg() { return d.ArgErr() } m.TryPolicy = d.Val() case "split_path": m.SplitPath = d.RemainingArgs() if len(m.SplitPath) == 0 { return d.ArgErr() } default: return d.Errf("unrecognized subdirective: %s", d.Val()) } } } return nil } // CELLibrary produces options that expose this matcher for use in CEL // expression matchers. // // Example: // // expression file() // expression file({http.request.uri.path}, '/index.php') // expression file({'root': '/srv', 'try_files': [{http.request.uri.path}, '/index.php'], 'try_policy': 'first_exist', 'split_path': ['.php']}) func (MatchFile) CELLibrary(ctx caddy.Context) (cel.Library, error) { requestType := cel.ObjectType("http.Request") matcherFactory := func(data ref.Val) (caddyhttp.RequestMatcherWithError, error) { values, err := caddyhttp.CELValueToMapStrList(data) if err != nil { return nil, err } var root string if len(values["root"]) > 0 { root = values["root"][0] } var fsName string if len(values["fs"]) > 0 { fsName = values["fs"][0] } var try_policy string if len(values["try_policy"]) > 0 { try_policy = values["try_policy"][0] } m := MatchFile{ Root: root, TryFiles: values["try_files"], TryPolicy: try_policy, SplitPath: values["split_path"], FileSystem: fsName, } err = m.Provision(ctx) return m, err } envOptions := []cel.EnvOption{ cel.Macros(parser.NewGlobalVarArgMacro("file", celFileMatcherMacroExpander())), cel.Function("file", cel.Overload("file_request_map", []*cel.Type{requestType, caddyhttp.CELTypeJSON}, cel.BoolType)), cel.Function("file_request_map", cel.Overload("file_request_map", []*cel.Type{requestType, caddyhttp.CELTypeJSON}, cel.BoolType), cel.SingletonBinaryBinding(caddyhttp.CELMatcherRuntimeFunction("file_request_map", matcherFactory))), } programOptions := []cel.ProgramOption{ cel.CustomDecorator(caddyhttp.CELMatcherDecorator("file_request_map", matcherFactory)), } return caddyhttp.NewMatcherCELLibrary(envOptions, programOptions), nil } func celFileMatcherMacroExpander() parser.MacroExpander { return func(eh parser.ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { if len(args) == 0 { return eh.NewCall("file", eh.NewIdent(caddyhttp.CELRequestVarName), eh.NewMap(), ), nil } if len(args) == 1 { arg := args[0] if isCELStringLiteral(arg) || isCELCaddyPlaceholderCall(arg) { return eh.NewCall("file", eh.NewIdent(caddyhttp.CELRequestVarName), eh.NewMap(eh.NewMapEntry( eh.NewLiteral(types.String("try_files")), eh.NewList(arg), false, )), ), nil } if isCELTryFilesLiteral(arg) { return eh.NewCall("file", eh.NewIdent(caddyhttp.CELRequestVarName), arg), nil } return nil, &common.Error{ Location: eh.OffsetLocation(arg.ID()), Message: "matcher requires either a map or string literal argument", } } for _, arg := range args { if !isCELStringLiteral(arg) && !isCELCaddyPlaceholderCall(arg) { return nil, &common.Error{ Location: eh.OffsetLocation(arg.ID()), Message: "matcher only supports repeated string literal arguments", } } } return eh.NewCall("file", eh.NewIdent(caddyhttp.CELRequestVarName), eh.NewMap(eh.NewMapEntry( eh.NewLiteral(types.String("try_files")), eh.NewList(args...), false, )), ), nil } } // Provision sets up m's defaults. func (m *MatchFile) Provision(ctx caddy.Context) error { m.logger = ctx.Logger() m.fsmap = ctx.FileSystems() if m.Root == "" { m.Root = "{http.vars.root}" } if m.FileSystem == "" { m.FileSystem = "{http.vars.fs}" } // if list of files to try was omitted entirely, assume URL path // (use placeholder instead of r.URL.Path; see issue #4146) if m.TryFiles == nil { m.TryFiles = []string{"{http.request.uri.path}"} } return nil } // Validate ensures m has a valid configuration. func (m MatchFile) Validate() error { switch m.TryPolicy { case "", tryPolicyFirstExist, tryPolicyFirstExistFallback, tryPolicyLargestSize, tryPolicySmallestSize, tryPolicyMostRecentlyMod: default: return fmt.Errorf("unknown try policy %s", m.TryPolicy) } return nil } // Match returns true if r matches m. Returns true // if a file was matched. If so, four placeholders // will be available: // - http.matchers.file.relative: Path to file relative to site root // - http.matchers.file.absolute: Path to file including site root // - http.matchers.file.type: file or directory // - http.matchers.file.remainder: Portion remaining after splitting file path (if configured) func (m MatchFile) Match(r *http.Request) bool { match, err := m.selectFile(r) if err != nil { // nolint:staticcheck caddyhttp.SetVar(r.Context(), caddyhttp.MatcherErrorVarKey, err) } return match } // MatchWithError returns true if r matches m. func (m MatchFile) MatchWithError(r *http.Request) (bool, error) { return m.selectFile(r) } // selectFile chooses a file according to m.TryPolicy by appending // the paths in m.TryFiles to m.Root, with placeholder replacements. func (m MatchFile) selectFile(r *http.Request) (bool, error) { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) root := filepath.Clean(repl.ReplaceAll(m.Root, ".")) fsName := repl.ReplaceAll(m.FileSystem, "") fileSystem, ok := m.fsmap.Get(fsName) if !ok { if c := m.logger.Check(zapcore.ErrorLevel, "use of unregistered filesystem"); c != nil { c.Write(zap.String("fs", fsName)) } return false, nil } type matchCandidate struct { fullpath, relative, splitRemainder string } // makeCandidates evaluates placeholders in file and expands any glob expressions // to build a list of file candidates. Special glob characters are escaped in // placeholder replacements so globs cannot be expanded from placeholders, and // globs are not evaluated on Windows because of its path separator character: // escaping is not supported so we can't safely glob on Windows, or we can't // support placeholders on Windows (pick one). (Actually, evaluating untrusted // globs is not the end of the world since the file server will still hide any // hidden files, it just might lead to unexpected behavior.) makeCandidates := func(file string) []matchCandidate { // first, evaluate placeholders in the file pattern expandedFile, err := repl.ReplaceFunc(file, func(variable string, val any) (any, error) { if runtime.GOOS == "windows" { return val, nil } switch v := val.(type) { case string: return globSafeRepl.Replace(v), nil case fmt.Stringer: return globSafeRepl.Replace(v.String()), nil } return val, nil }) if err != nil { if c := m.logger.Check(zapcore.ErrorLevel, "evaluating placeholders"); c != nil { c.Write(zap.Error(err)) } expandedFile = file // "oh well," I guess? } // clean the path and split, if configured -- we must split before // globbing so that the file system doesn't include the remainder // ("afterSplit") in the filename; be sure to restore trailing slash beforeSplit, afterSplit := m.firstSplit(path.Clean(expandedFile)) if strings.HasSuffix(file, "/") { beforeSplit += "/" } // create the full path to the file by prepending the site root fullPattern := caddyhttp.SanitizedPathJoin(root, beforeSplit) // expand glob expressions, but not on Windows because Glob() doesn't // support escaping on Windows due to path separator) var globResults []string if runtime.GOOS == "windows" { globResults = []string{fullPattern} // precious Windows } else { globResults, err = fs.Glob(fileSystem, fullPattern) if err != nil { if c := m.logger.Check(zapcore.ErrorLevel, "expanding glob"); c != nil { c.Write(zap.Error(err)) } } } // for each glob result, combine all the forms of the path candidates := make([]matchCandidate, 0, len(globResults)) for _, result := range globResults { candidates = append(candidates, matchCandidate{ fullpath: result, relative: strings.TrimPrefix(result, root), splitRemainder: afterSplit, }) } return candidates } // setPlaceholders creates the placeholders for the matched file setPlaceholders := func(candidate matchCandidate, isDir bool) { repl.Set("http.matchers.file.relative", filepath.ToSlash(candidate.relative)) repl.Set("http.matchers.file.absolute", filepath.ToSlash(candidate.fullpath)) repl.Set("http.matchers.file.remainder", filepath.ToSlash(candidate.splitRemainder)) fileType := "file" if isDir { fileType = "directory" } repl.Set("http.matchers.file.type", fileType) } // match file according to the configured policy switch m.TryPolicy { case "", tryPolicyFirstExist, tryPolicyFirstExistFallback: maxI := -1 if m.TryPolicy == tryPolicyFirstExistFallback { maxI = len(m.TryFiles) - 1 } for i, pattern := range m.TryFiles { // If the pattern is a status code, emit an error, // which short-circuits the middleware pipeline and // writes an HTTP error response. if err := parseErrorCode(pattern); err != nil { return false, err } candidates := makeCandidates(pattern) for _, c := range candidates { // Skip the IO if using fallback policy and it's the latest item if i == maxI { setPlaceholders(c, false) return true, nil } if info, exists := m.strictFileExists(fileSystem, c.fullpath); exists { setPlaceholders(c, info.IsDir()) return true, nil } } } case tryPolicyLargestSize: var largestSize int64 var largest matchCandidate var largestInfo os.FileInfo for _, pattern := range m.TryFiles { candidates := makeCandidates(pattern) for _, c := range candidates { info, err := fs.Stat(fileSystem, c.fullpath) if err == nil && info.Size() > largestSize { largestSize = info.Size() largest = c largestInfo = info } } } if largestInfo == nil { return false, nil } setPlaceholders(largest, largestInfo.IsDir()) return true, nil case tryPolicySmallestSize: var smallestSize int64 var smallest matchCandidate var smallestInfo os.FileInfo for _, pattern := range m.TryFiles { candidates := makeCandidates(pattern) for _, c := range candidates { info, err := fs.Stat(fileSystem, c.fullpath) if err == nil && (smallestSize == 0 || info.Size() < smallestSize) { smallestSize = info.Size() smallest = c smallestInfo = info } } } if smallestInfo == nil { return false, nil } setPlaceholders(smallest, smallestInfo.IsDir()) return true, nil case tryPolicyMostRecentlyMod: var recent matchCandidate var recentInfo os.FileInfo for _, pattern := range m.TryFiles { candidates := makeCandidates(pattern) for _, c := range candidates { info, err := fs.Stat(fileSystem, c.fullpath) if err == nil && (recentInfo == nil || info.ModTime().After(recentInfo.ModTime())) { recent = c recentInfo = info } } } if recentInfo == nil { return false, nil } setPlaceholders(recent, recentInfo.IsDir()) return true, nil } return false, nil } // parseErrorCode checks if the input is a status // code number, prefixed by "=", and returns an // error if so. func parseErrorCode(input string) error { if len(input) > 1 && input[0] == '=' { code, err := strconv.Atoi(input[1:]) if err != nil || code < 100 || code > 999 { return nil } return caddyhttp.Error(code, fmt.Errorf("%s", input[1:])) } return nil } // strictFileExists returns true if file exists // and matches the convention of the given file // path. If the path ends in a forward slash, // the file must also be a directory; if it does // NOT end in a forward slash, the file must NOT // be a directory. func (m MatchFile) strictFileExists(fileSystem fs.FS, file string) (os.FileInfo, bool) { info, err := fs.Stat(fileSystem, file) if err != nil { // in reality, this can be any error // such as permission or even obscure // ones like "is not a directory" (when // trying to stat a file within a file); // in those cases we can't be sure if // the file exists, so we just treat any // error as if it does not exist; see // https://stackoverflow.com/a/12518877/1048862 return nil, false } if strings.HasSuffix(file, separator) { // by convention, file paths ending // in a path separator must be a directory return info, info.IsDir() } // by convention, file paths NOT ending // in a path separator must NOT be a directory return info, !info.IsDir() } // firstSplit returns the first result where the path // can be split in two by a value in m.SplitPath. The // return values are the first piece of the path that // ends with the split substring and the remainder. // If the path cannot be split, the path is returned // as-is (with no remainder). func (m MatchFile) firstSplit(path string) (splitPart, remainder string) { for _, split := range m.SplitPath { if idx := indexFold(path, split); idx > -1 { pos := idx + len(split) // skip the split if it's not the final part of the filename if pos != len(path) && !strings.HasPrefix(path[pos:], "/") { continue } return path[:pos], path[pos:] } } return path, "" } // There is no strings.IndexFold() function like there is strings.EqualFold(), // but we can use strings.EqualFold() to build our own case-insensitive // substring search (as of Go 1.14). func indexFold(haystack, needle string) int { nlen := len(needle) for i := 0; i+nlen < len(haystack); i++ { if strings.EqualFold(haystack[i:i+nlen], needle) { return i } } return -1 } // isCELTryFilesLiteral returns whether the expression resolves to a map literal containing // only string keys with or a placeholder call. func isCELTryFilesLiteral(e ast.Expr) bool { switch e.Kind() { case ast.MapKind: mapExpr := e.AsMap() for _, entry := range mapExpr.Entries() { mapKey := entry.AsMapEntry().Key() mapVal := entry.AsMapEntry().Value() if !isCELStringLiteral(mapKey) { return false } mapKeyStr := mapKey.AsLiteral().ConvertToType(types.StringType).Value() switch mapKeyStr { case "try_files", "split_path": if !isCELStringListLiteral(mapVal) { return false } case "try_policy", "root": if !(isCELStringExpr(mapVal)) { return false } default: return false } } return true case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.SelectKind, ast.StructKind: // appeasing the linter :) } return false } // isCELStringExpr indicates whether the expression is a supported string expression func isCELStringExpr(e ast.Expr) bool { return isCELStringLiteral(e) || isCELCaddyPlaceholderCall(e) || isCELConcatCall(e) } // isCELStringLiteral returns whether the expression is a CEL string literal. func isCELStringLiteral(e ast.Expr) bool { switch e.Kind() { case ast.LiteralKind: constant := e.AsLiteral() switch constant.Type() { case types.StringType: return true } case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.MapKind, ast.SelectKind, ast.StructKind: // appeasing the linter :) } return false } // isCELCaddyPlaceholderCall returns whether the expression is a caddy placeholder call. func isCELCaddyPlaceholderCall(e ast.Expr) bool { switch e.Kind() { case ast.CallKind: call := e.AsCall() if call.FunctionName() == caddyhttp.CELPlaceholderFuncName { return true } case ast.UnspecifiedExprKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind: // appeasing the linter :) } return false } // isCELConcatCall tests whether the expression is a concat function (+) with string, placeholder, or // other concat call arguments. func isCELConcatCall(e ast.Expr) bool { switch e.Kind() { case ast.CallKind: call := e.AsCall() if call.Target().Kind() != ast.UnspecifiedExprKind { return false } if call.FunctionName() != operators.Add { return false } for _, arg := range call.Args() { if !isCELStringExpr(arg) { return false } } return true case ast.UnspecifiedExprKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind: // appeasing the linter :) } return false } // isCELStringListLiteral returns whether the expression resolves to a list literal // containing only string constants or a placeholder call. func isCELStringListLiteral(e ast.Expr) bool { switch e.Kind() { case ast.ListKind: list := e.AsList() for _, elem := range list.Elements() { if !isCELStringExpr(elem) { return false } } return true case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind: // appeasing the linter :) } return false } // globSafeRepl replaces special glob characters with escaped // equivalents. Note that the filepath godoc states that // escaping is not done on Windows because of the separator. var globSafeRepl = strings.NewReplacer( "*", "\\*", "[", "\\[", "?", "\\?", "\\", "\\\\", ) const ( tryPolicyFirstExist = "first_exist" tryPolicyFirstExistFallback = "first_exist_fallback" tryPolicyLargestSize = "largest_size" tryPolicySmallestSize = "smallest_size" tryPolicyMostRecentlyMod = "most_recently_modified" ) // Interface guards var ( _ caddy.Validator = (*MatchFile)(nil) _ caddyhttp.RequestMatcherWithError = (*MatchFile)(nil) _ caddyhttp.CELLibraryProducer = (*MatchFile)(nil) ) ================================================ FILE: modules/caddyhttp/fileserver/matcher_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fileserver import ( "context" "net/http" "net/http/httptest" "net/url" "os" "path/filepath" "runtime" "strings" "testing" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/internal/filesystems" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) type testCase struct { path string expectedPath string expectedType string matched bool } func TestFileMatcher(t *testing.T) { // Windows doesn't like colons in files names isWindows := runtime.GOOS == "windows" if !isWindows { filename := "with:in-name.txt" f, err := os.Create("./testdata/" + filename) if err != nil { t.Fail() return } t.Cleanup(func() { os.Remove("./testdata/" + filename) }) f.WriteString(filename) f.Close() } for i, tc := range []testCase{ { path: "/foo.txt", expectedPath: "/foo.txt", expectedType: "file", matched: true, }, { path: "/foo.txt/", expectedPath: "/foo.txt", expectedType: "file", matched: true, }, { path: "/foo.txt?a=b", expectedPath: "/foo.txt", expectedType: "file", matched: true, }, { path: "/foodir", expectedPath: "/foodir/", expectedType: "directory", matched: true, }, { path: "/foodir/", expectedPath: "/foodir/", expectedType: "directory", matched: true, }, { path: "/foodir/foo.txt", expectedPath: "/foodir/foo.txt", expectedType: "file", matched: true, }, { path: "/missingfile.php", matched: false, }, { path: "ملف.txt", // the path file name is not escaped expectedPath: "/ملف.txt", expectedType: "file", matched: true, }, { path: url.PathEscape("ملف.txt"), // singly-escaped path expectedPath: "/ملف.txt", expectedType: "file", matched: true, }, { path: url.PathEscape(url.PathEscape("ملف.txt")), // doubly-escaped path expectedPath: "/%D9%85%D9%84%D9%81.txt", expectedType: "file", matched: true, }, { path: "./with:in-name.txt", // browsers send the request with the path as such expectedPath: "/with:in-name.txt", expectedType: "file", matched: !isWindows, }, } { fileMatcherTest(t, i, tc) } } func TestFileMatcherNonWindows(t *testing.T) { if runtime.GOOS == "windows" { return } // this is impossible to test on Windows, but tests a security patch for other platforms tc := testCase{ path: "/foodir/secr%5Cet.txt", expectedPath: "/foodir/secr\\et.txt", expectedType: "file", matched: true, } f, err := os.Create(filepath.Join("testdata", strings.TrimPrefix(tc.expectedPath, "/"))) if err != nil { t.Fatalf("could not create test file: %v", err) } defer f.Close() defer os.Remove(f.Name()) fileMatcherTest(t, 0, tc) } func fileMatcherTest(t *testing.T, i int, tc testCase) { m := &MatchFile{ fsmap: &filesystems.FileSystemMap{}, Root: "./testdata", TryFiles: []string{"{http.request.uri.path}", "{http.request.uri.path}/"}, } u, err := url.Parse(tc.path) if err != nil { t.Errorf("Test %d: parsing path: %v", i, err) } req := &http.Request{URL: u} repl := caddyhttp.NewTestReplacer(req) result, err := m.MatchWithError(req) if err != nil { t.Errorf("Test %d: unexpected error: %v", i, err) } if result != tc.matched { t.Errorf("Test %d: expected match=%t, got %t", i, tc.matched, result) } rel, ok := repl.Get("http.matchers.file.relative") if !ok && result { t.Errorf("Test %d: expected replacer value", i) } if !result { return } if rel != tc.expectedPath { t.Errorf("Test %d: actual path: %v, expected: %v", i, rel, tc.expectedPath) } fileType, _ := repl.Get("http.matchers.file.type") if fileType != tc.expectedType { t.Errorf("Test %d: actual file type: %v, expected: %v", i, fileType, tc.expectedType) } } func TestPHPFileMatcher(t *testing.T) { for i, tc := range []struct { path string expectedPath string expectedType string matched bool }{ { path: "/index.php", expectedPath: "/index.php", expectedType: "file", matched: true, }, { path: "/index.php/somewhere", expectedPath: "/index.php", expectedType: "file", matched: true, }, { path: "/remote.php", expectedPath: "/remote.php", expectedType: "file", matched: true, }, { path: "/remote.php/somewhere", expectedPath: "/remote.php", expectedType: "file", matched: true, }, { path: "/missingfile.php", matched: false, }, { path: "/notphp.php.txt", expectedPath: "/notphp.php.txt", expectedType: "file", matched: true, }, { path: "/notphp.php.txt/", expectedPath: "/notphp.php.txt", expectedType: "file", matched: true, }, { path: "/notphp.php.txt.suffixed", matched: false, }, { path: "/foo.php.php/index.php", expectedPath: "/foo.php.php/index.php", expectedType: "file", matched: true, }, { // See https://github.com/caddyserver/caddy/issues/3623 path: "/%E2%C3", expectedPath: "/%E2%C3", expectedType: "file", matched: false, }, { path: "/index.php?path={path}&{query}", expectedPath: "/index.php", expectedType: "file", matched: true, }, } { m := &MatchFile{ fsmap: &filesystems.FileSystemMap{}, Root: "./testdata", TryFiles: []string{"{http.request.uri.path}", "{http.request.uri.path}/index.php"}, SplitPath: []string{".php"}, } u, err := url.Parse(tc.path) if err != nil { t.Errorf("Test %d: parsing path: %v", i, err) } req := &http.Request{URL: u} repl := caddyhttp.NewTestReplacer(req) result, err := m.MatchWithError(req) if err != nil { t.Errorf("Test %d: unexpected error: %v", i, err) } if result != tc.matched { t.Errorf("Test %d: expected match=%t, got %t", i, tc.matched, result) } rel, ok := repl.Get("http.matchers.file.relative") if !ok && result { t.Errorf("Test %d: expected replacer value", i) } if !result { continue } if rel != tc.expectedPath { t.Errorf("Test %d: actual path: %v, expected: %v", i, rel, tc.expectedPath) } fileType, _ := repl.Get("http.matchers.file.type") if fileType != tc.expectedType { t.Errorf("Test %d: actual file type: %v, expected: %v", i, fileType, tc.expectedType) } } } func TestFirstSplit(t *testing.T) { m := MatchFile{ SplitPath: []string{".php"}, fsmap: &filesystems.FileSystemMap{}, } actual, remainder := m.firstSplit("index.PHP/somewhere") expected := "index.PHP" expectedRemainder := "/somewhere" if actual != expected { t.Errorf("Expected split %s but got %s", expected, actual) } if remainder != expectedRemainder { t.Errorf("Expected remainder %s but got %s", expectedRemainder, remainder) } } var expressionTests = []struct { name string expression *caddyhttp.MatchExpression urlTarget string httpMethod string httpHeader *http.Header wantErr bool wantResult bool clientCertificate []byte expectedPath string }{ { name: "file error no args (MatchFile)", expression: &caddyhttp.MatchExpression{ Expr: `file()`, }, urlTarget: "https://example.com/foo.txt", wantResult: true, }, { name: "file error bad try files (MatchFile)", expression: &caddyhttp.MatchExpression{ Expr: `file({"try_file": ["bad_arg"]})`, }, urlTarget: "https://example.com/foo", wantErr: true, }, { name: "file match short pattern index.php (MatchFile)", expression: &caddyhttp.MatchExpression{ Expr: `file("index.php")`, }, urlTarget: "https://example.com/foo", wantResult: true, }, { name: "file match short pattern foo.txt (MatchFile)", expression: &caddyhttp.MatchExpression{ Expr: `file({http.request.uri.path})`, }, urlTarget: "https://example.com/foo.txt", wantResult: true, }, { name: "file match index.php (MatchFile)", expression: &caddyhttp.MatchExpression{ Expr: `file({"root": "./testdata", "try_files": [{http.request.uri.path}, "/index.php"]})`, }, urlTarget: "https://example.com/foo", wantResult: true, }, { name: "file match long pattern foo.txt (MatchFile)", expression: &caddyhttp.MatchExpression{ Expr: `file({"root": "./testdata", "try_files": [{http.request.uri.path}]})`, }, urlTarget: "https://example.com/foo.txt", wantResult: true, }, { name: "file match long pattern foo.txt with concatenation (MatchFile)", expression: &caddyhttp.MatchExpression{ Expr: `file({"root": ".", "try_files": ["./testdata" + {http.request.uri.path}]})`, }, urlTarget: "https://example.com/foo.txt", wantResult: true, }, { name: "file not match long pattern (MatchFile)", expression: &caddyhttp.MatchExpression{ Expr: `file({"root": "./testdata", "try_files": [{http.request.uri.path}]})`, }, urlTarget: "https://example.com/nopenope.txt", wantResult: false, }, { name: "file match long pattern foo.txt with try_policy (MatchFile)", expression: &caddyhttp.MatchExpression{ Expr: `file({"root": "./testdata", "try_policy": "largest_size", "try_files": ["foo.txt", "large.txt"]})`, }, urlTarget: "https://example.com/", wantResult: true, expectedPath: "/large.txt", }, } func TestMatchExpressionMatch(t *testing.T) { for _, tst := range expressionTests { tc := tst t.Run(tc.name, func(t *testing.T) { caddyCtx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() err := tc.expression.Provision(caddyCtx) if err != nil { if !tc.wantErr { t.Errorf("MatchExpression.Provision() error = %v, wantErr %v", err, tc.wantErr) } return } req := httptest.NewRequest(tc.httpMethod, tc.urlTarget, nil) if tc.httpHeader != nil { req.Header = *tc.httpHeader } repl := caddyhttp.NewTestReplacer(req) repl.Set("http.vars.root", "./testdata") ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) matches, err := tc.expression.MatchWithError(req) if err != nil { t.Errorf("MatchExpression.Match() error = %v", err) return } if matches != tc.wantResult { t.Errorf("MatchExpression.Match() expected to return '%t', for expression : '%s'", tc.wantResult, tc.expression.Expr) } if tc.expectedPath != "" { path, ok := repl.Get("http.matchers.file.relative") if !ok { t.Errorf("MatchExpression.Match() expected to return path '%s', but got none", tc.expectedPath) } if path != tc.expectedPath { t.Errorf("MatchExpression.Match() expected to return path '%s', but got '%s'", tc.expectedPath, path) } } }) } } ================================================ FILE: modules/caddyhttp/fileserver/staticfiles.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fileserver import ( "bytes" "errors" "fmt" "io" "io/fs" weakrand "math/rand/v2" "mime" "net/http" "os" "path" "path/filepath" "runtime" "strconv" "strings" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode" ) func init() { caddy.RegisterModule(FileServer{}) } // FileServer implements a handler that serves static files. // // The path of the file to serve is constructed by joining the site root // and the sanitized request path. Any and all files within the root and // links with targets outside the site root may therefore be accessed. // For example, with a site root of `/www`, requests to `/foo/bar.txt` // will serve the file at `/www/foo/bar.txt`. // // The request path is sanitized using the Go standard library's // path.Clean() function (https://pkg.go.dev/path#Clean) before being // joined to the root. Request paths must be valid and well-formed. // // For requests that access directories instead of regular files, // Caddy will attempt to serve an index file if present. For example, // a request to `/dir/` will attempt to serve `/dir/index.html` if // it exists. The index file names to try are configurable. If a // requested directory does not have an index file, Caddy writes a // 404 response. Alternatively, file browsing can be enabled with // the "browse" parameter which shows a list of files when directories // are requested if no index file is present. If "browse" is enabled, // Caddy may serve a JSON array of the directory listing when the `Accept` // header mentions `application/json` with the following structure: // // [{ // "name": "", // "size": 0, // "url": "", // "mod_time": "", // "mode": 0, // "is_dir": false, // "is_symlink": false // }] // // with the `url` being relative to the request path and `mod_time` in the RFC 3339 format // with sub-second precision. For any other value for the `Accept` header, the // respective browse template is executed with `Content-Type: text/html`. // // By default, this handler will canonicalize URIs so that requests to // directories end with a slash, but requests to regular files do not. // This is enforced with HTTP redirects automatically and can be disabled. // Canonicalization redirects are not issued, however, if a URI rewrite // modified the last component of the path (the filename). // // This handler sets the Etag and Last-Modified headers for static files. // It does not perform MIME sniffing to determine Content-Type based on // contents, but does use the extension (if known); see the Go docs for // details: https://pkg.go.dev/mime#TypeByExtension // // The file server properly handles requests with If-Match, // If-Unmodified-Since, If-Modified-Since, If-None-Match, Range, and // If-Range headers. It includes the file's modification time in the // Last-Modified header of the response. type FileServer struct { // The file system implementation to use. By default, Caddy uses the local // disk file system. // // if a non default filesystem is used, it must be first be registered in the globals section. FileSystem string `json:"fs,omitempty"` // The path to the root of the site. Default is `{http.vars.root}` if set, // or current working directory otherwise. This should be a trusted value. // // Note that a site root is not a sandbox. Although the file server does // sanitize the request URI to prevent directory traversal, files (including // links) within the site root may be directly accessed based on the request // path. Files and folders within the root should be secure and trustworthy. Root string `json:"root,omitempty"` // A list of files or folders to hide; the file server will pretend as if // they don't exist. Accepts globular patterns like `*.ext` or `/foo/*/bar` // as well as placeholders. Because site roots can be dynamic, this list // uses file system paths, not request paths. To clarify, the base of // relative paths is the current working directory, NOT the site root. // // Entries without a path separator (`/` or `\` depending on OS) will match // any file or directory of that name regardless of its path. To hide only a // specific file with a name that may not be unique, always use a path // separator. For example, to hide all files or folder trees named "hidden", // put "hidden" in the list. To hide only ./hidden, put "./hidden" in the list. // // When possible, all paths are resolved to their absolute form before // comparisons are made. For maximum clarity and explictness, use complete, // absolute paths; or, for greater portability, use relative paths instead. // // Note that hide comparisons are case-sensitive. On case-insensitive // filesystems, requests with different path casing may still resolve to the // same file or directory on disk, so hide should not be treated as a // security boundary for sensitive paths. Hide []string `json:"hide,omitempty"` // The names of files to try as index files if a folder is requested. // Default: index.html, index.txt. IndexNames []string `json:"index_names,omitempty"` // Enables file listings if a directory was requested and no index // file is present. Browse *Browse `json:"browse,omitempty"` // Use redirects to enforce trailing slashes for directories, or to // remove trailing slash from URIs for files. Default is true. // // Canonicalization will not happen if the last element of the request's // path (the filename) is changed in an internal rewrite, to avoid // clobbering the explicit rewrite with implicit behavior. CanonicalURIs *bool `json:"canonical_uris,omitempty"` // Override the status code written when successfully serving a file. // Particularly useful when explicitly serving a file as display for // an error, like a 404 page. A placeholder may be used. By default, // the status code will typically be 200, or 206 for partial content. StatusCode caddyhttp.WeakString `json:"status_code,omitempty"` // If pass-thru mode is enabled and a requested file is not found, // it will invoke the next handler in the chain instead of returning // a 404 error. By default, this is false (disabled). PassThru bool `json:"pass_thru,omitempty"` // Selection of encoders to use to check for precompressed files. PrecompressedRaw caddy.ModuleMap `json:"precompressed,omitempty" caddy:"namespace=http.precompressed"` // If the client has no strong preference (q-factor), choose these encodings in order. // If no order specified here, the first encoding from the Accept-Encoding header // that both client and server support is used PrecompressedOrder []string `json:"precompressed_order,omitempty"` precompressors map[string]encode.Precompressed // List of file extensions to try to read Etags from. // If set, file Etags will be read from sidecar files // with any of these suffixes, instead of generating // our own Etag. // Keep in mind that the Etag values in the files have to be quoted as per RFC7232. // See https://datatracker.ietf.org/doc/html/rfc7232#section-2.3 for a few examples. EtagFileExtensions []string `json:"etag_file_extensions,omitempty"` fsmap caddy.FileSystems logger *zap.Logger } // CaddyModule returns the Caddy module information. func (FileServer) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.file_server", New: func() caddy.Module { return new(FileServer) }, } } // Provision sets up the static files responder. func (fsrv *FileServer) Provision(ctx caddy.Context) error { fsrv.logger = ctx.Logger() fsrv.fsmap = ctx.FileSystems() if fsrv.FileSystem == "" { fsrv.FileSystem = "{http.vars.fs}" } if fsrv.Root == "" { fsrv.Root = "{http.vars.root}" } if fsrv.IndexNames == nil { fsrv.IndexNames = defaultIndexNames } // for hide paths that are static (i.e. no placeholders), we can transform them into // absolute paths before the server starts for very slight performance improvement for i, h := range fsrv.Hide { if !strings.Contains(h, "{") && strings.Contains(h, separator) { if abs, err := caddy.FastAbs(h); err == nil { fsrv.Hide[i] = abs } } } // support precompressed sidecar files mods, err := ctx.LoadModule(fsrv, "PrecompressedRaw") if err != nil { return fmt.Errorf("loading encoder modules: %v", err) } for modName, modIface := range mods.(map[string]any) { p, ok := modIface.(encode.Precompressed) if !ok { return fmt.Errorf("module %s is not precompressor", modName) } ae := p.AcceptEncoding() if ae == "" { return fmt.Errorf("precompressor does not specify an Accept-Encoding value") } suffix := p.Suffix() if suffix == "" { return fmt.Errorf("precompressor does not specify a Suffix value") } if _, ok := fsrv.precompressors[ae]; ok { return fmt.Errorf("precompressor already added: %s", ae) } if fsrv.precompressors == nil { fsrv.precompressors = make(map[string]encode.Precompressed) } fsrv.precompressors[ae] = p } if fsrv.Browse != nil { // check sort options for idx, sortOption := range fsrv.Browse.SortOptions { switch idx { case 0: if sortOption != sortByName && sortOption != sortByNameDirFirst && sortOption != sortBySize && sortOption != sortByTime { return fmt.Errorf("the first option must be one of the following: %s, %s, %s, %s, but got %s", sortByName, sortByNameDirFirst, sortBySize, sortByTime, sortOption) } case 1: if sortOption != sortOrderAsc && sortOption != sortOrderDesc { return fmt.Errorf("the second option must be one of the following: %s, %s, but got %s", sortOrderAsc, sortOrderDesc, sortOption) } default: return fmt.Errorf("only max 2 sort options are allowed, but got %d", idx+1) } } } return nil } func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) if runtime.GOOS == "windows" { // reject paths with Alternate Data Streams (ADS) if strings.Contains(r.URL.Path, ":") { return caddyhttp.Error(http.StatusBadRequest, fmt.Errorf("illegal ADS path")) } // reject paths with "8.3" short names trimmedPath := strings.TrimRight(r.URL.Path, ". ") // Windows ignores trailing dots and spaces, sigh if len(path.Base(trimmedPath)) <= 12 && strings.Contains(trimmedPath, "~") { return caddyhttp.Error(http.StatusBadRequest, fmt.Errorf("illegal short name")) } // both of those could bypass file hiding or possibly leak information even if the file is not hidden } filesToHide := fsrv.transformHidePaths(repl) root := repl.ReplaceAll(fsrv.Root, ".") fsName := repl.ReplaceAll(fsrv.FileSystem, "") fileSystem, ok := fsrv.fsmap.Get(fsName) if !ok { return caddyhttp.Error(http.StatusNotFound, fmt.Errorf("filesystem not found")) } // remove any trailing `/` as it breaks fs.ValidPath() in the stdlib filename := strings.TrimSuffix(caddyhttp.SanitizedPathJoin(root, r.URL.Path), "/") if c := fsrv.logger.Check(zapcore.DebugLevel, "sanitized path join"); c != nil { c.Write( zap.String("site_root", root), zap.String("fs", fsName), zap.String("request_path", r.URL.Path), zap.String("result", filename), ) } // get information about the file info, err := fs.Stat(fileSystem, filename) if err != nil { err = fsrv.mapDirOpenError(fileSystem, err, filename) if errors.Is(err, fs.ErrNotExist) { return fsrv.notFound(w, r, next) } else if errors.Is(err, fs.ErrInvalid) { return caddyhttp.Error(http.StatusBadRequest, err) } else if errors.Is(err, fs.ErrPermission) { return caddyhttp.Error(http.StatusForbidden, err) } return caddyhttp.Error(http.StatusInternalServerError, err) } // if the request mapped to a directory, see if // there is an index file we can serve var implicitIndexFile bool if info.IsDir() && len(fsrv.IndexNames) > 0 { for _, indexPage := range fsrv.IndexNames { indexPage := repl.ReplaceAll(indexPage, "") indexPath := caddyhttp.SanitizedPathJoin(filename, indexPage) if fileHidden(indexPath, filesToHide) { // pretend this file doesn't exist if c := fsrv.logger.Check(zapcore.DebugLevel, "hiding index file"); c != nil { c.Write( zap.String("filename", indexPath), zap.Strings("files_to_hide", filesToHide), ) } continue } indexInfo, err := fs.Stat(fileSystem, indexPath) if err != nil { continue } // don't rewrite the request path to append // the index file, because we might need to // do a canonical-URL redirect below based // on the URL as-is // we've chosen to use this index file, // so replace the last file info and path // with that of the index file info = indexInfo filename = indexPath implicitIndexFile = true if c := fsrv.logger.Check(zapcore.DebugLevel, "located index file"); c != nil { c.Write(zap.String("filename", filename)) } break } } // if still referencing a directory, delegate // to browse or return an error if info.IsDir() { if c := fsrv.logger.Check(zapcore.DebugLevel, "no index file in directory"); c != nil { c.Write( zap.String("path", filename), zap.Strings("index_filenames", fsrv.IndexNames), ) } if fsrv.Browse != nil && !fileHidden(filename, filesToHide) { return fsrv.serveBrowse(fileSystem, root, filename, w, r, next) } return fsrv.notFound(w, r, next) } // one last check to ensure the file isn't hidden (we might // have changed the filename from when we last checked) if fileHidden(filename, filesToHide) { if c := fsrv.logger.Check(zapcore.DebugLevel, "hiding file"); c != nil { c.Write( zap.String("filename", filename), zap.Strings("files_to_hide", filesToHide), ) } return fsrv.notFound(w, r, next) } // if URL canonicalization is enabled, we need to enforce trailing // slash convention: if a directory, trailing slash; if a file, no // trailing slash - not enforcing this can break relative hrefs // in HTML (see https://github.com/caddyserver/caddy/issues/2741) if fsrv.CanonicalURIs == nil || *fsrv.CanonicalURIs { // Only redirect if the last element of the path (the filename) was not // rewritten; if the admin wanted to rewrite to the canonical path, they // would have, and we have to be very careful not to introduce unwanted // redirects and especially redirect loops! // See https://github.com/caddyserver/caddy/issues/4205. origReq := r.Context().Value(caddyhttp.OriginalRequestCtxKey).(http.Request) if path.Base(origReq.URL.Path) == path.Base(r.URL.Path) { if implicitIndexFile && !strings.HasSuffix(origReq.URL.Path, "/") { to := origReq.URL.Path + "/" if c := fsrv.logger.Check(zapcore.DebugLevel, "redirecting to canonical URI (adding trailing slash for directory"); c != nil { c.Write( zap.String("from_path", origReq.URL.Path), zap.String("to_path", to), ) } return redirect(w, r, to) } else if !implicitIndexFile && strings.HasSuffix(origReq.URL.Path, "/") { to := origReq.URL.Path[:len(origReq.URL.Path)-1] if c := fsrv.logger.Check(zapcore.DebugLevel, "redirecting to canonical URI (removing trailing slash for file"); c != nil { c.Write( zap.String("from_path", origReq.URL.Path), zap.String("to_path", to), ) } return redirect(w, r, to) } } } var file fs.File respHeader := w.Header() // etag is usually unset, but if the user knows what they're doing, let them override it etag := respHeader.Get("Etag") // static file responses are often compressed, either on-the-fly // or with precompressed sidecar files; in any case, the headers // should contain "Vary: Accept-Encoding" even when not compressed // so caches can craft a reliable key (according to REDbot results) // see #5849 respHeader.Add("Vary", "Accept-Encoding") // check for precompressed files for _, ae := range encode.AcceptedEncodings(r, fsrv.PrecompressedOrder) { precompress, ok := fsrv.precompressors[ae] if !ok { continue } compressedFilename := filename + precompress.Suffix() compressedInfo, err := fs.Stat(fileSystem, compressedFilename) if err != nil || compressedInfo.IsDir() { if c := fsrv.logger.Check(zapcore.DebugLevel, "precompressed file not accessible"); c != nil { c.Write(zap.String("filename", compressedFilename), zap.Error(err)) } continue } if c := fsrv.logger.Check(zapcore.DebugLevel, "opening compressed sidecar file"); c != nil { c.Write(zap.String("filename", compressedFilename), zap.Error(err)) } file, err = fsrv.openFile(fileSystem, compressedFilename, w) if err != nil { if c := fsrv.logger.Check(zapcore.WarnLevel, "opening precompressed file failed"); c != nil { c.Write(zap.String("filename", compressedFilename), zap.Error(err)) } if caddyErr, ok := err.(caddyhttp.HandlerError); ok && caddyErr.StatusCode == http.StatusServiceUnavailable { return err } file = nil continue } defer file.Close() respHeader.Set("Content-Encoding", ae) // stdlib won't set Content-Length for non-range requests if Content-Encoding is set. // see: https://github.com/caddyserver/caddy/issues/7040 // Setting the Range header manually will result in 206 Partial Content. // see: https://github.com/caddyserver/caddy/issues/7250 if r.Header.Get("Range") == "" { respHeader.Set("Content-Length", strconv.FormatInt(compressedInfo.Size(), 10)) } // try to get the etag from pre computed files if an etag suffix list was provided if etag == "" && fsrv.EtagFileExtensions != nil { etag, err = fsrv.getEtagFromFile(fileSystem, compressedFilename) if err != nil { return err } } // don't assign info = compressedInfo because sidecars are kind // of transparent; however we do need to set the Etag: // https://caddy.community/t/gzipped-sidecar-file-wrong-same-etag/16793 if etag == "" { etag = calculateEtag(compressedInfo) } break } // no precompressed file found, use the actual file if file == nil { if c := fsrv.logger.Check(zapcore.DebugLevel, "opening file"); c != nil { c.Write(zap.String("filename", filename)) } // open the file file, err = fsrv.openFile(fileSystem, filename, w) if err != nil { if herr, ok := err.(caddyhttp.HandlerError); ok && herr.StatusCode == http.StatusNotFound { return fsrv.notFound(w, r, next) } return err // error is already structured } defer file.Close() // try to get the etag from pre computed files if an etag suffix list was provided if etag == "" && fsrv.EtagFileExtensions != nil { etag, err = fsrv.getEtagFromFile(fileSystem, filename) if err != nil { return err } } if etag == "" { etag = calculateEtag(info) } } // at this point, we're serving a file; Go std lib supports only // GET and HEAD, which is sensible for a static file server - reject // any other methods (see issue #5166) if r.Method != http.MethodGet && r.Method != http.MethodHead { // if we're in an error context, then it doesn't make sense // to repeat the error; just continue because we're probably // trying to write an error page response (see issue #5703) if _, ok := r.Context().Value(caddyhttp.ErrorCtxKey).(error); !ok { respHeader.Add("Allow", "GET, HEAD") return caddyhttp.Error(http.StatusMethodNotAllowed, nil) } } // set the Etag - note that a conditional If-None-Match request is handled // by http.ServeContent below, which checks against this Etag value if etag != "" { respHeader.Set("Etag", etag) } if respHeader.Get("Content-Type") == "" { mtyp := mime.TypeByExtension(filepath.Ext(filename)) if mtyp == "" { // do not allow Go to sniff the content-type; see https://www.youtube.com/watch?v=8t8JYpt0egE respHeader["Content-Type"] = nil } else { respHeader.Set("Content-Type", mtyp) } } var statusCodeOverride int // if this handler exists in an error context (i.e. is part of a // handler chain that is supposed to handle a previous error), // we should set status code to the one from the error instead // of letting http.ServeContent set the default (usually 200) if reqErr, ok := r.Context().Value(caddyhttp.ErrorCtxKey).(error); ok { statusCodeOverride = http.StatusInternalServerError if handlerErr, ok := reqErr.(caddyhttp.HandlerError); ok { if handlerErr.StatusCode > 0 { statusCodeOverride = handlerErr.StatusCode } } } // if a status code override is configured, run the replacer on it if codeStr := fsrv.StatusCode.String(); codeStr != "" { statusCodeOverride, err = strconv.Atoi(repl.ReplaceAll(codeStr, "")) if err != nil { return caddyhttp.Error(http.StatusInternalServerError, err) } } // if we do have an override from the previous two parts, then // we wrap the response writer to intercept the WriteHeader call if statusCodeOverride > 0 { w = statusOverrideResponseWriter{ResponseWriter: w, code: statusCodeOverride} } // let the standard library do what it does best; note, however, // that errors generated by ServeContent are written immediately // to the response, so we cannot handle them (but errors there // are rare) http.ServeContent(w, r, info.Name(), info.ModTime(), file.(io.ReadSeeker)) return nil } // openFile opens the file at the given filename. If there was an error, // the response is configured to inform the client how to best handle it // and a well-described handler error is returned (do not wrap the // returned error value). func (fsrv *FileServer) openFile(fileSystem fs.FS, filename string, w http.ResponseWriter) (fs.File, error) { file, err := fileSystem.Open(filename) if err != nil { err = fsrv.mapDirOpenError(fileSystem, err, filename) if errors.Is(err, fs.ErrNotExist) { if c := fsrv.logger.Check(zapcore.DebugLevel, "file not found"); c != nil { c.Write(zap.String("filename", filename), zap.Error(err)) } return nil, caddyhttp.Error(http.StatusNotFound, err) } else if errors.Is(err, fs.ErrPermission) { if c := fsrv.logger.Check(zapcore.DebugLevel, "permission denied"); c != nil { c.Write(zap.String("filename", filename), zap.Error(err)) } return nil, caddyhttp.Error(http.StatusForbidden, err) } // maybe the server is under load and ran out of file descriptors? // have client wait arbitrary seconds to help prevent a stampede //nolint:gosec backoff := weakrand.IntN(maxBackoff-minBackoff) + minBackoff w.Header().Set("Retry-After", strconv.Itoa(backoff)) if c := fsrv.logger.Check(zapcore.DebugLevel, "retry after backoff"); c != nil { c.Write(zap.String("filename", filename), zap.Int("backoff", backoff), zap.Error(err)) } return nil, caddyhttp.Error(http.StatusServiceUnavailable, err) } return file, nil } // mapDirOpenError maps the provided non-nil error from opening name // to a possibly better non-nil error. In particular, it turns OS-specific errors // about opening files in non-directories into os.ErrNotExist. See golang/go#18984. // Adapted from the Go standard library; originally written by Nathaniel Caza. // https://go-review.googlesource.com/c/go/+/36635/ // https://go-review.googlesource.com/c/go/+/36804/ func (fsrv *FileServer) mapDirOpenError(fileSystem fs.FS, originalErr error, name string) error { if errors.Is(originalErr, fs.ErrNotExist) || errors.Is(originalErr, fs.ErrPermission) { return originalErr } var pathErr *fs.PathError if errors.As(originalErr, &pathErr) { return fs.ErrInvalid } parts := strings.Split(name, separator) for i := range parts { if parts[i] == "" { continue } fi, err := fs.Stat(fileSystem, strings.Join(parts[:i+1], separator)) if err != nil { return originalErr } if !fi.IsDir() { return fs.ErrNotExist } } return originalErr } // transformHidePaths performs replacements for all the elements of fsrv.Hide and // makes them absolute paths (if they contain a path separator), then returns a // new list of the transformed values. func (fsrv *FileServer) transformHidePaths(repl *caddy.Replacer) []string { hide := make([]string, len(fsrv.Hide)) for i := range fsrv.Hide { hide[i] = repl.ReplaceAll(fsrv.Hide[i], "") if strings.Contains(hide[i], separator) { abs, err := caddy.FastAbs(hide[i]) if err == nil { hide[i] = abs } } } return hide } // fileHidden returns true if filename is hidden according to the hide list. // filename must be a relative or absolute file system path, not a request // URI path. It is expected that all the paths in the hide list are absolute // paths or are singular filenames (without a path separator). func fileHidden(filename string, hide []string) bool { if len(hide) == 0 { return false } // all path comparisons use the complete absolute path if possible filenameAbs, err := caddy.FastAbs(filename) if err == nil { filename = filenameAbs } var components []string for _, h := range hide { if !strings.Contains(h, separator) { // if there is no separator in h, then we assume the user // wants to hide any files or folders that match that // name; thus we have to compare against each component // of the filename, e.g. hiding "bar" would hide "/bar" // as well as "/foo/bar/baz" but not "/barstool". if len(components) == 0 { components = strings.Split(filename, separator) } for _, c := range components { if hidden, _ := filepath.Match(h, c); hidden { return true } } } else if after, ok := strings.CutPrefix(filename, h); ok { // if there is a separator in h, and filename is exactly // prefixed with h, then we can do a prefix match so that // "/foo" matches "/foo/bar" but not "/foobar". withoutPrefix := after if strings.HasPrefix(withoutPrefix, separator) { return true } } // in the general case, a glob match will suffice if hidden, _ := filepath.Match(h, filename); hidden { return true } } return false } // notFound returns a 404 error or, if pass-thru is enabled, // it calls the next handler in the chain. func (fsrv *FileServer) notFound(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { if fsrv.PassThru { return next.ServeHTTP(w, r) } return caddyhttp.Error(http.StatusNotFound, nil) } // calculateEtag computes an entity tag using a strong validator // without consuming the contents of the file. It requires the // file info contain the correct size and modification time. // It strives to implement the semantics regarding ETags as defined // by RFC 9110 section 8.8.3 and 8.8.1. See // https://www.rfc-editor.org/rfc/rfc9110.html#section-8.8.3. // // As our implementation uses file modification timestamp and size, // note the following from RFC 9110 section 8.8.1: "A representation's // modification time, if defined with only one-second resolution, // might be a weak validator if it is possible for the representation to // be modified twice during a single second and retrieved between those // modifications." The ext4 file system, which underpins the vast majority // of Caddy deployments, stores mod times with millisecond precision, // which we consider precise enough to qualify as a strong validator. func calculateEtag(d os.FileInfo) string { mtime := d.ModTime() if mtimeUnix := mtime.Unix(); mtimeUnix == 0 || mtimeUnix == 1 { return "" // not useful anyway; see issue #5548 } var sb strings.Builder sb.WriteRune('"') sb.WriteString(strconv.FormatInt(mtime.UnixNano(), 36)) sb.WriteString(strconv.FormatInt(d.Size(), 36)) sb.WriteRune('"') return sb.String() } // Finds the first corresponding etag file for a given file in the file system and return its content func (fsrv *FileServer) getEtagFromFile(fileSystem fs.FS, filename string) (string, error) { for _, suffix := range fsrv.EtagFileExtensions { etagFilename := filename + suffix etag, err := fs.ReadFile(fileSystem, etagFilename) if errors.Is(err, fs.ErrNotExist) { continue } if err != nil { return "", fmt.Errorf("cannot read etag from file %s: %v", etagFilename, err) } // Etags should not contain newline characters etag = bytes.ReplaceAll(etag, []byte("\n"), []byte{}) return string(etag), nil } return "", nil } // redirect performs a redirect to a given path. The 'toPath' parameter // MUST be solely a path, and MUST NOT include a query. func redirect(w http.ResponseWriter, r *http.Request, toPath string) error { for strings.HasPrefix(toPath, "//") { // prevent path-based open redirects toPath = strings.TrimPrefix(toPath, "/") } // preserve the query string if present if r.URL.RawQuery != "" { toPath += "?" + r.URL.RawQuery } http.Redirect(w, r, toPath, http.StatusPermanentRedirect) return nil } // statusOverrideResponseWriter intercepts WriteHeader calls // to instead write the HTTP status code we want instead // of the one http.ServeContent will use by default (usually 200) type statusOverrideResponseWriter struct { http.ResponseWriter code int } // WriteHeader intercepts calls by the stdlib to WriteHeader // to instead write the HTTP status code we want. func (wr statusOverrideResponseWriter) WriteHeader(int) { wr.ResponseWriter.WriteHeader(wr.code) } // Unwrap returns the underlying ResponseWriter, necessary for // http.ResponseController to work correctly. func (wr statusOverrideResponseWriter) Unwrap() http.ResponseWriter { return wr.ResponseWriter } var defaultIndexNames = []string{"index.html", "index.txt"} const ( minBackoff, maxBackoff = 2, 5 separator = string(filepath.Separator) ) // Interface guards var ( _ caddy.Provisioner = (*FileServer)(nil) _ caddyhttp.MiddlewareHandler = (*FileServer)(nil) ) ================================================ FILE: modules/caddyhttp/fileserver/staticfiles_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fileserver import ( "path/filepath" "runtime" "strings" "testing" ) func TestFileHidden(t *testing.T) { for i, tc := range []struct { inputHide []string inputPath string expect bool }{ { inputHide: nil, inputPath: "", expect: false, }, { inputHide: []string{".gitignore"}, inputPath: "/.gitignore", expect: true, }, { inputHide: []string{".git"}, inputPath: "/.gitignore", expect: false, }, { inputHide: []string{"/.git"}, inputPath: "/.gitignore", expect: false, }, { inputHide: []string{".git"}, inputPath: "/.git", expect: true, }, { inputHide: []string{".git"}, inputPath: "/.git/foo", expect: true, }, { inputHide: []string{".git"}, inputPath: "/foo/.git/bar", expect: true, }, { inputHide: []string{"/prefix"}, inputPath: "/prefix/foo", expect: true, }, { inputHide: []string{"/foo/*/bar"}, inputPath: "/foo/asdf/bar", expect: true, }, { inputHide: []string{"*.txt"}, inputPath: "/foo/bar.txt", expect: true, }, { inputHide: []string{"/foo/bar/*.txt"}, inputPath: "/foo/bar/baz.txt", expect: true, }, { inputHide: []string{"/foo/bar/*.txt"}, inputPath: "/foo/bar.txt", expect: false, }, { inputHide: []string{"/foo/bar/*.txt"}, inputPath: "/foo/bar/index.html", expect: false, }, { inputHide: []string{"/foo"}, inputPath: "/foo", expect: true, }, { inputHide: []string{"/foo"}, inputPath: "/foobar", expect: false, }, { inputHide: []string{"first", "second"}, inputPath: "/second", expect: true, }, } { if runtime.GOOS == "windows" { if strings.HasPrefix(tc.inputPath, "/") { tc.inputPath, _ = filepath.Abs(tc.inputPath) } tc.inputPath = filepath.FromSlash(tc.inputPath) for i := range tc.inputHide { if strings.HasPrefix(tc.inputHide[i], "/") { tc.inputHide[i], _ = filepath.Abs(tc.inputHide[i]) } tc.inputHide[i] = filepath.FromSlash(tc.inputHide[i]) } } actual := fileHidden(tc.inputPath, tc.inputHide) if actual != tc.expect { t.Errorf("Test %d: Does %v hide %s? Got %t but expected %t", i, tc.inputHide, tc.inputPath, actual, tc.expect) } } } ================================================ FILE: modules/caddyhttp/fileserver/testdata/%D9%85%D9%84%D9%81.txt ================================================ %D9%85%D9%84%D9%81.txt ================================================ FILE: modules/caddyhttp/fileserver/testdata/foo.php.php/index.php ================================================ foo.php.php/index.php ================================================ FILE: modules/caddyhttp/fileserver/testdata/foo.txt ================================================ foo.txt ================================================ FILE: modules/caddyhttp/fileserver/testdata/foodir/bar.txt ================================================ foodir/bar.txt ================================================ FILE: modules/caddyhttp/fileserver/testdata/foodir/foo.txt ================================================ foodir/foo.txt ================================================ FILE: modules/caddyhttp/fileserver/testdata/index.php ================================================ index.php ================================================ FILE: modules/caddyhttp/fileserver/testdata/large.txt ================================================ This is a file with more content than the other files in this directory such that tests using the largest_size policy pick this file, or the smallest_size policy avoids this file. ================================================ FILE: modules/caddyhttp/fileserver/testdata/notphp.php.txt ================================================ notphp.php.txt ================================================ FILE: modules/caddyhttp/fileserver/testdata/remote.php ================================================ remote.php ================================================ FILE: modules/caddyhttp/fileserver/testdata/ملف.txt ================================================ ملف.txt ================================================ FILE: modules/caddyhttp/headers/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package headers import ( "fmt" "net/http" "reflect" "strings" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { httpcaddyfile.RegisterDirective("header", parseCaddyfile) httpcaddyfile.RegisterDirective("request_header", parseReqHdrCaddyfile) } // parseCaddyfile sets up the handler for response headers from // Caddyfile tokens. Syntax: // // header [] [[+|-|?|>] [] []] { // [+] [ []] // ? // - // > // [defer] // } // // Either a block can be opened or a single header field can be configured // in the first line, but not both in the same directive. Header operations // are deferred to write-time if any headers are being deleted or if the // 'defer' subdirective is used. + appends a header value, - deletes a field, // ? conditionally sets a value only if the header field is not already set, // and > sets a field with defer enabled. func parseCaddyfile(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) { h.Next() // consume directive name matcherSet, err := h.ExtractMatcherSet() if err != nil { return nil, err } h.Next() // consume the directive name again (matcher parsing resets) makeHandler := func() Handler { return Handler{ Response: &RespHeaderOps{ HeaderOps: &HeaderOps{}, }, } } handler, handlerWithRequire := makeHandler(), makeHandler() // first see if headers are in the initial line var hasArgs bool if h.NextArg() { hasArgs = true field := h.Val() var value string var replacement *string if h.NextArg() { value = h.Val() } if h.NextArg() { arg := h.Val() replacement = &arg } err := applyHeaderOp( handler.Response.HeaderOps, handler.Response, field, value, replacement, ) if err != nil { return nil, h.Err(err.Error()) } if len(handler.Response.HeaderOps.Delete) > 0 { handler.Response.Deferred = true } } // if not, they should be in a block for h.NextBlock(0) { field := h.Val() if field == "defer" { handler.Response.Deferred = true continue } if field == "match" { responseMatchers := make(map[string]caddyhttp.ResponseMatcher) err := caddyhttp.ParseNamedResponseMatcher(h.NewFromNextSegment(), responseMatchers) if err != nil { return nil, err } matcher := responseMatchers["match"] handler.Response.Require = &matcher continue } if hasArgs { return nil, h.Err("cannot specify headers in both arguments and block") // because it would be weird } // sometimes it is habitual for users to suffix a field name with a colon, // as if they were writing a curl command or something; see // https://caddy.community/t/v2-reverse-proxy-please-add-cors-example-to-the-docs/7349/19 field = strings.TrimSuffix(field, ":") var value string var replacement *string if h.NextArg() { value = h.Val() } if h.NextArg() { arg := h.Val() replacement = &arg } handlerToUse := handler if strings.HasPrefix(field, "?") { handlerToUse = handlerWithRequire } err := applyHeaderOp( handlerToUse.Response.HeaderOps, handlerToUse.Response, field, value, replacement, ) if err != nil { return nil, h.Err(err.Error()) } } var configValues []httpcaddyfile.ConfigValue if !reflect.DeepEqual(handler, makeHandler()) { configValues = append(configValues, h.NewRoute(matcherSet, handler)...) } if !reflect.DeepEqual(handlerWithRequire, makeHandler()) { configValues = append(configValues, h.NewRoute(matcherSet, handlerWithRequire)...) } return configValues, nil } // parseReqHdrCaddyfile sets up the handler for request headers // from Caddyfile tokens. Syntax: // // request_header [] [[+|-] [] []] func parseReqHdrCaddyfile(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) { h.Next() // consume directive name matcherSet, err := h.ExtractMatcherSet() if err != nil { return nil, err } h.Next() // consume the directive name again (matcher parsing resets) if !h.NextArg() { return nil, h.ArgErr() } field := h.Val() hdr := Handler{ Request: &HeaderOps{}, } // sometimes it is habitual for users to suffix a field name with a colon, // as if they were writing a curl command or something; see // https://caddy.community/t/v2-reverse-proxy-please-add-cors-example-to-the-docs/7349/19 field = strings.TrimSuffix(field, ":") var value string var replacement *string if h.NextArg() { value = h.Val() } if h.NextArg() { arg := h.Val() replacement = &arg if h.NextArg() { return nil, h.ArgErr() } } if hdr.Request == nil { hdr.Request = new(HeaderOps) } if err := CaddyfileHeaderOp(hdr.Request, field, value, replacement); err != nil { return nil, h.Err(err.Error()) } configValues := h.NewRoute(matcherSet, hdr) if h.NextArg() { return nil, h.ArgErr() } return configValues, nil } // CaddyfileHeaderOp applies a new header operation according to // field, value, and replacement. The field can be prefixed with // "+" or "-" to specify adding or removing; otherwise, the value // will be set (overriding any previous value). If replacement is // non-nil, value will be treated as a regular expression which // will be used to search and then replacement will be used to // complete the substring replacement; in that case, any + or - // prefix to field will be ignored. func CaddyfileHeaderOp(ops *HeaderOps, field, value string, replacement *string) error { return applyHeaderOp(ops, nil, field, value, replacement) } func applyHeaderOp(ops *HeaderOps, respHeaderOps *RespHeaderOps, field, value string, replacement *string) error { switch { case strings.HasPrefix(field, "+"): // append if ops.Add == nil { ops.Add = make(http.Header) } ops.Add.Add(field[1:], value) case strings.HasPrefix(field, "-"): // delete ops.Delete = append(ops.Delete, field[1:]) if respHeaderOps != nil { respHeaderOps.Deferred = true } case strings.HasPrefix(field, "?"): // default (conditional on not existing) - response headers only if respHeaderOps == nil { return fmt.Errorf("%v: the default header modifier ('?') can only be used on response headers; for conditional manipulation of request headers, use matchers", field) } if respHeaderOps.Require == nil { respHeaderOps.Require = &caddyhttp.ResponseMatcher{ Headers: make(http.Header), } } field = strings.TrimPrefix(field, "?") respHeaderOps.Require.Headers[field] = nil if respHeaderOps.Set == nil { respHeaderOps.Set = make(http.Header) } respHeaderOps.Set.Set(field, value) case replacement != nil: // replace // allow defer shortcut for replace syntax if strings.HasPrefix(field, ">") && respHeaderOps != nil { respHeaderOps.Deferred = true } if ops.Replace == nil { ops.Replace = make(map[string][]Replacement) } field = strings.TrimLeft(field, "+-?>") ops.Replace[field] = append( ops.Replace[field], Replacement{ SearchRegexp: value, Replace: *replacement, }, ) case strings.HasPrefix(field, ">"): // set (overwrite) with defer if ops.Set == nil { ops.Set = make(http.Header) } ops.Set.Set(field[1:], value) if respHeaderOps != nil { respHeaderOps.Deferred = true } default: // set (overwrite) if ops.Set == nil { ops.Set = make(http.Header) } ops.Set.Set(field, value) } return nil } ================================================ FILE: modules/caddyhttp/headers/headers.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package headers import ( "fmt" "net/http" "regexp" "strings" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(Handler{}) } // Handler is a middleware which modifies request and response headers. // // Changes to headers are applied immediately, except for the response // headers when Deferred is true or when Required is set. In those cases, // the changes are applied when the headers are written to the response. // Note that deferred changes do not take effect if an error occurs later // in the middleware chain. // // Properties in this module accept placeholders. // // Response header operations can be conditioned upon response status code // and/or other header values. type Handler struct { Request *HeaderOps `json:"request,omitempty"` Response *RespHeaderOps `json:"response,omitempty"` } // CaddyModule returns the Caddy module information. func (Handler) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.headers", New: func() caddy.Module { return new(Handler) }, } } // Provision sets up h's configuration. func (h *Handler) Provision(ctx caddy.Context) error { if h.Request != nil { err := h.Request.Provision(ctx) if err != nil { return err } } if h.Response != nil { err := h.Response.Provision(ctx) if err != nil { return err } } return nil } // Validate ensures h's configuration is valid. func (h Handler) Validate() error { if h.Request != nil { err := h.Request.validate() if err != nil { return err } } if h.Response != nil && h.Response.HeaderOps != nil { err := h.Response.validate() if err != nil { return err } } return nil } func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) if h.Request != nil { h.Request.ApplyToRequest(r) } if h.Response != nil { if h.Response.Deferred || h.Response.Require != nil { w = &responseWriterWrapper{ ResponseWriterWrapper: &caddyhttp.ResponseWriterWrapper{ResponseWriter: w}, replacer: repl, require: h.Response.Require, headerOps: h.Response.HeaderOps, } } else { h.Response.ApplyTo(w.Header(), repl) } } return next.ServeHTTP(w, r) } // HeaderOps defines manipulations for HTTP headers. type HeaderOps struct { // Adds HTTP headers; does not replace any existing header fields. Add http.Header `json:"add,omitempty"` // Sets HTTP headers; replaces existing header fields. Set http.Header `json:"set,omitempty"` // Names of HTTP header fields to delete. Basic wildcards are supported: // // - Start with `*` for all field names with the given suffix; // - End with `*` for all field names with the given prefix; // - Start and end with `*` for all field names containing a substring. Delete []string `json:"delete,omitempty"` // Performs in-situ substring replacements of HTTP headers. // Keys are the field names on which to perform the associated replacements. // If the field name is `*`, the replacements are performed on all header fields. Replace map[string][]Replacement `json:"replace,omitempty"` } // Provision sets up the header operations. func (ops *HeaderOps) Provision(_ caddy.Context) error { if ops == nil { return nil // it's possible no ops are configured; fix #6893 } for fieldName, replacements := range ops.Replace { for i, r := range replacements { if r.SearchRegexp == "" { continue } // Check if it contains placeholders if containsPlaceholders(r.SearchRegexp) { // Contains placeholders, skips precompilation, and recompiles at runtime continue } // Does not contain placeholders, safe to precompile re, err := regexp.Compile(r.SearchRegexp) if err != nil { return fmt.Errorf("replacement %d for header field '%s': %v", i, fieldName, err) } replacements[i].re = re } } return nil } // containsPlaceholders checks if the string contains Caddy placeholder syntax {key} func containsPlaceholders(s string) bool { _, after, ok := strings.Cut(s, "{") if !ok { return false } closeIdx := strings.Index(after, "}") if closeIdx == -1 { return false } // Make sure there is content between the brackets return closeIdx > 0 } func (ops HeaderOps) validate() error { for fieldName, replacements := range ops.Replace { for _, r := range replacements { if r.Search != "" && r.SearchRegexp != "" { return fmt.Errorf("cannot specify both a substring search and a regular expression search for field '%s'", fieldName) } } } return nil } // Replacement describes a string replacement, // either a simple and fast substring search // or a slower but more powerful regex search. type Replacement struct { // The substring to search for. Search string `json:"search,omitempty"` // The regular expression to search with. SearchRegexp string `json:"search_regexp,omitempty"` // The string with which to replace matches. Replace string `json:"replace,omitempty"` re *regexp.Regexp } // RespHeaderOps defines manipulations for response headers. type RespHeaderOps struct { *HeaderOps // If set, header operations will be deferred until // they are written out and only performed if the // response matches these criteria. Require *caddyhttp.ResponseMatcher `json:"require,omitempty"` // If true, header operations will be deferred until // they are written out. Superseded if Require is set. // Usually you will need to set this to true if any // fields are being deleted. Deferred bool `json:"deferred,omitempty"` } // ApplyTo applies ops to hdr using repl. func (ops *HeaderOps) ApplyTo(hdr http.Header, repl *caddy.Replacer) { if ops == nil { return } // before manipulating headers in other ways, check if there // is configuration to delete all headers, and do that first // because if a header is to be added, we don't want to delete // it also for _, fieldName := range ops.Delete { fieldName = repl.ReplaceKnown(fieldName, "") if fieldName == "*" { clear(hdr) } } // add for fieldName, vals := range ops.Add { fieldName = repl.ReplaceKnown(fieldName, "") for _, v := range vals { hdr.Add(fieldName, repl.ReplaceKnown(v, "")) } } // set for fieldName, vals := range ops.Set { fieldName = repl.ReplaceKnown(fieldName, "") var newVals []string for i := range vals { // append to new slice so we don't overwrite // the original values in ops.Set newVals = append(newVals, repl.ReplaceKnown(vals[i], "")) } hdr.Set(fieldName, strings.Join(newVals, ",")) } // delete for _, fieldName := range ops.Delete { fieldName = strings.ToLower(repl.ReplaceKnown(fieldName, "")) if fieldName == "*" { continue // handled above } switch { case strings.HasPrefix(fieldName, "*") && strings.HasSuffix(fieldName, "*"): for existingField := range hdr { if strings.Contains(strings.ToLower(existingField), fieldName[1:len(fieldName)-1]) { delete(hdr, existingField) } } case strings.HasPrefix(fieldName, "*"): for existingField := range hdr { if strings.HasSuffix(strings.ToLower(existingField), fieldName[1:]) { delete(hdr, existingField) } } case strings.HasSuffix(fieldName, "*"): for existingField := range hdr { if strings.HasPrefix(strings.ToLower(existingField), fieldName[:len(fieldName)-1]) { delete(hdr, existingField) } } default: hdr.Del(fieldName) } } // replace for fieldName, replacements := range ops.Replace { fieldName = http.CanonicalHeaderKey(repl.ReplaceKnown(fieldName, "")) // all fields... if fieldName == "*" { for _, r := range replacements { search := repl.ReplaceKnown(r.Search, "") replace := repl.ReplaceKnown(r.Replace, "") for fieldName, vals := range hdr { for i := range vals { if r.re != nil { // Use precompiled regular expressions hdr[fieldName][i] = r.re.ReplaceAllString(hdr[fieldName][i], replace) } else if r.SearchRegexp != "" { // Runtime compilation of regular expressions searchRegexp := repl.ReplaceKnown(r.SearchRegexp, "") if re, err := regexp.Compile(searchRegexp); err == nil { hdr[fieldName][i] = re.ReplaceAllString(hdr[fieldName][i], replace) } // If compilation fails, skip this replacement } else { hdr[fieldName][i] = strings.ReplaceAll(hdr[fieldName][i], search, replace) } } } } continue } // ...or only with the named field for _, r := range replacements { search := repl.ReplaceKnown(r.Search, "") replace := repl.ReplaceKnown(r.Replace, "") for hdrFieldName, vals := range hdr { // see issue #4330 for why we don't simply use hdr[fieldName] if http.CanonicalHeaderKey(hdrFieldName) != fieldName { continue } for i := range vals { if r.re != nil { hdr[hdrFieldName][i] = r.re.ReplaceAllString(hdr[hdrFieldName][i], replace) } else if r.SearchRegexp != "" { searchRegexp := repl.ReplaceKnown(r.SearchRegexp, "") if re, err := regexp.Compile(searchRegexp); err == nil { hdr[hdrFieldName][i] = re.ReplaceAllString(hdr[hdrFieldName][i], replace) } } else { hdr[hdrFieldName][i] = strings.ReplaceAll(hdr[hdrFieldName][i], search, replace) } } } } } } // ApplyToRequest applies ops to r, specially handling the Host // header which the standard library does not include with the // header map with all the others. This method mutates r.Host. func (ops HeaderOps) ApplyToRequest(r *http.Request) { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) // capture the current Host header so we can // reset to it when we're done origHost, hadHost := r.Header["Host"] // append r.Host; this way, we know that our value // was last in the list, and if an Add operation // appended something else after it, that's probably // fine because it's weird to have multiple Host // headers anyway and presumably the one they added // is the one they wanted r.Header["Host"] = append(r.Header["Host"], r.Host) // apply header operations ops.ApplyTo(r.Header, repl) // retrieve the last Host value (likely the one we appended) if len(r.Header["Host"]) > 0 { r.Host = r.Header["Host"][len(r.Header["Host"])-1] } else { r.Host = "" } // reset the Host header slice if hadHost { r.Header["Host"] = origHost } else { delete(r.Header, "Host") } } // responseWriterWrapper defers response header // operations until WriteHeader is called. type responseWriterWrapper struct { *caddyhttp.ResponseWriterWrapper replacer *caddy.Replacer require *caddyhttp.ResponseMatcher headerOps *HeaderOps wroteHeader bool } func (rww *responseWriterWrapper) WriteHeader(status int) { if rww.wroteHeader { return } // 1xx responses aren't final; just informational if status < 100 || status > 199 { rww.wroteHeader = true } if rww.require == nil || rww.require.Match(status, rww.ResponseWriterWrapper.Header()) { if rww.headerOps != nil { rww.headerOps.ApplyTo(rww.ResponseWriterWrapper.Header(), rww.replacer) } } rww.ResponseWriterWrapper.WriteHeader(status) } func (rww *responseWriterWrapper) Write(d []byte) (int, error) { if !rww.wroteHeader { rww.WriteHeader(http.StatusOK) } return rww.ResponseWriterWrapper.Write(d) } // Interface guards var ( _ caddy.Provisioner = (*Handler)(nil) _ caddyhttp.MiddlewareHandler = (*Handler)(nil) _ http.ResponseWriter = (*responseWriterWrapper)(nil) ) ================================================ FILE: modules/caddyhttp/headers/headers_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package headers import ( "context" "fmt" "net/http" "net/http/httptest" "reflect" "testing" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func TestHandler(t *testing.T) { for i, tc := range []struct { handler Handler reqHeader http.Header respHeader http.Header respStatusCode int expectedReqHeader http.Header expectedRespHeader http.Header }{ { handler: Handler{ Request: &HeaderOps{ Add: http.Header{ "Expose-Secrets": []string{"always"}, }, }, }, reqHeader: http.Header{ "Expose-Secrets": []string{"i'm serious"}, }, expectedReqHeader: http.Header{ "Expose-Secrets": []string{"i'm serious", "always"}, }, }, { handler: Handler{ Request: &HeaderOps{ Set: http.Header{ "Who-Wins": []string{"batman"}, }, }, }, reqHeader: http.Header{ "Who-Wins": []string{"joker"}, }, expectedReqHeader: http.Header{ "Who-Wins": []string{"batman"}, }, }, { handler: Handler{ Request: &HeaderOps{ Delete: []string{"Kick-Me"}, }, }, reqHeader: http.Header{ "Kick-Me": []string{"if you can"}, "Keep-Me": []string{"i swear i'm innocent"}, }, expectedReqHeader: http.Header{ "Keep-Me": []string{"i swear i'm innocent"}, }, }, { handler: Handler{ Request: &HeaderOps{ Delete: []string{ "*-suffix", "prefix-*", "*_*", }, }, }, reqHeader: http.Header{ "Header-Suffix": []string{"lalala"}, "Prefix-Test": []string{"asdf"}, "Host_Header": []string{"silly django... sigh"}, // see issue #4830 "Keep-Me": []string{"foofoofoo"}, }, expectedReqHeader: http.Header{ "Keep-Me": []string{"foofoofoo"}, }, }, { handler: Handler{ Request: &HeaderOps{ Replace: map[string][]Replacement{ "Best-Server": { Replacement{ Search: "NGINX", Replace: "the Caddy web server", }, Replacement{ SearchRegexp: `Apache(\d+)`, Replace: "Caddy", }, }, }, }, }, reqHeader: http.Header{ "Best-Server": []string{"it's NGINX, undoubtedly", "I love Apache2"}, }, expectedReqHeader: http.Header{ "Best-Server": []string{"it's the Caddy web server, undoubtedly", "I love Caddy"}, }, }, { handler: Handler{ Response: &RespHeaderOps{ Require: &caddyhttp.ResponseMatcher{ Headers: http.Header{ "Cache-Control": nil, }, }, HeaderOps: &HeaderOps{ Add: http.Header{ "Cache-Control": []string{"no-cache"}, }, }, }, }, respHeader: http.Header{}, expectedRespHeader: http.Header{ "Cache-Control": []string{"no-cache"}, }, }, { // same as above, but checks that response headers are left alone when "Require" conditions are unmet handler: Handler{ Response: &RespHeaderOps{ Require: &caddyhttp.ResponseMatcher{ Headers: http.Header{ "Cache-Control": nil, }, }, HeaderOps: &HeaderOps{ Add: http.Header{ "Cache-Control": []string{"no-cache"}, }, }, }, }, respHeader: http.Header{ "Cache-Control": []string{"something"}, }, expectedRespHeader: http.Header{ "Cache-Control": []string{"something"}, }, }, { handler: Handler{ Response: &RespHeaderOps{ Require: &caddyhttp.ResponseMatcher{ Headers: http.Header{ "Cache-Control": []string{"no-cache"}, }, }, HeaderOps: &HeaderOps{ Delete: []string{"Cache-Control"}, }, }, }, respHeader: http.Header{ "Cache-Control": []string{"no-cache"}, }, expectedRespHeader: http.Header{}, }, { handler: Handler{ Response: &RespHeaderOps{ Require: &caddyhttp.ResponseMatcher{ StatusCode: []int{5}, }, HeaderOps: &HeaderOps{ Add: http.Header{ "Fail-5xx": []string{"true"}, }, }, }, }, respStatusCode: 503, respHeader: http.Header{}, expectedRespHeader: http.Header{ "Fail-5xx": []string{"true"}, }, }, { handler: Handler{ Request: &HeaderOps{ Replace: map[string][]Replacement{ "Case-Insensitive": { Replacement{ Search: "issue4330", Replace: "issue #4330", }, }, }, }, }, reqHeader: http.Header{ "case-insensitive": []string{"issue4330"}, "Other-Header": []string{"issue4330"}, }, expectedReqHeader: http.Header{ "case-insensitive": []string{"issue #4330"}, "Other-Header": []string{"issue4330"}, }, }, } { rr := httptest.NewRecorder() req := &http.Request{Header: tc.reqHeader} repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) tc.handler.Provision(caddy.Context{}) next := nextHandler(func(w http.ResponseWriter, r *http.Request) error { for k, hdrs := range tc.respHeader { for _, v := range hdrs { w.Header().Add(k, v) } } status := 200 if tc.respStatusCode != 0 { status = tc.respStatusCode } w.WriteHeader(status) if tc.expectedReqHeader != nil && !reflect.DeepEqual(r.Header, tc.expectedReqHeader) { return fmt.Errorf("expected request header %v, got %v", tc.expectedReqHeader, r.Header) } return nil }) if err := tc.handler.ServeHTTP(rr, req, next); err != nil { t.Errorf("Test %d: %v", i, err) continue } actual := rr.Header() if tc.expectedRespHeader != nil && !reflect.DeepEqual(actual, tc.expectedRespHeader) { t.Errorf("Test %d: expected response header %v, got %v", i, tc.expectedRespHeader, actual) continue } } } type nextHandler func(http.ResponseWriter, *http.Request) error func (f nextHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) error { return f(w, r) } func TestContainsPlaceholders(t *testing.T) { for i, tc := range []struct { input string expected bool }{ {"static", false}, {"{placeholder}", true}, {"prefix-{placeholder}-suffix", true}, {"{}", false}, {"no-braces", false}, {"{unclosed", false}, {"unopened}", false}, } { actual := containsPlaceholders(tc.input) if actual != tc.expected { t.Errorf("Test %d: containsPlaceholders(%q) = %v, expected %v", i, tc.input, actual, tc.expected) } } } func TestHeaderProvisionSkipsPlaceholders(t *testing.T) { ops := &HeaderOps{ Replace: map[string][]Replacement{ "Static": { Replacement{SearchRegexp: ":443", Replace: "STATIC"}, }, "Dynamic": { Replacement{SearchRegexp: ":{http.request.local.port}", Replace: "DYNAMIC"}, }, }, } err := ops.Provision(caddy.Context{}) if err != nil { t.Fatalf("Provision failed: %v", err) } // Static regex should be precompiled if ops.Replace["Static"][0].re == nil { t.Error("Expected static regex to be precompiled") } // Dynamic regex with placeholder should not be precompiled if ops.Replace["Dynamic"][0].re != nil { t.Error("Expected dynamic regex with placeholder to not be precompiled") } } func TestPlaceholderInSearchRegexp(t *testing.T) { handler := Handler{ Response: &RespHeaderOps{ HeaderOps: &HeaderOps{ Replace: map[string][]Replacement{ "Test-Header": { Replacement{ SearchRegexp: ":{http.request.local.port}", Replace: "PLACEHOLDER-WORKS", }, }, }, }, }, } // Provision the handler err := handler.Provision(caddy.Context{}) if err != nil { t.Fatalf("Provision failed: %v", err) } replacement := handler.Response.HeaderOps.Replace["Test-Header"][0] t.Logf("After provision - SearchRegexp: %q, re: %v", replacement.SearchRegexp, replacement.re) rr := httptest.NewRecorder() req := httptest.NewRequest("GET", "http://localhost:443/", nil) repl := caddy.NewReplacer() repl.Set("http.request.local.port", "443") ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) rr.Header().Set("Test-Header", "prefix:443suffix") t.Logf("Initial header: %v", rr.Header()) next := nextHandler(func(w http.ResponseWriter, r *http.Request) error { w.WriteHeader(200) return nil }) err = handler.ServeHTTP(rr, req, next) if err != nil { t.Fatalf("ServeHTTP failed: %v", err) } t.Logf("Final header: %v", rr.Header()) result := rr.Header().Get("Test-Header") expected := "prefixPLACEHOLDER-WORKSsuffix" if result != expected { t.Errorf("Expected header value %q, got %q", expected, result) } } ================================================ FILE: modules/caddyhttp/http2listener.go ================================================ package caddyhttp import ( "crypto/tls" "io" "net" "go.uber.org/zap" "golang.org/x/net/http2" ) type connectionStater interface { ConnectionState() tls.ConnectionState } // http2Listener wraps the listener to solve the following problems: // 1. prevent genuine h2c connections from succeeding if h2c is not enabled // and the connection doesn't implment connectionStater or the resulting NegotiatedProtocol // isn't http2. // This does allow a connection to pass as tls enabled even if it's not, listener wrappers // can do this. // 2. After wrapping the connection doesn't implement connectionStater, emit a warning so that listener // wrapper authors will hopefully implement it. // 3. check if the connection matches a specific http version. h2/h2c has a distinct preface. type http2Listener struct { useTLS bool useH1 bool useH2 bool net.Listener logger *zap.Logger } func (h *http2Listener) Accept() (net.Conn, error) { conn, err := h.Listener.Accept() if err != nil { return nil, err } // *tls.Conn doesn't need to be wrapped because we already removed unwanted alpns // and handshake won't succeed without mutually supported alpns if tlsConn, ok := conn.(*tls.Conn); ok { return tlsConn, nil } _, isConnectionStater := conn.(connectionStater) // emit a warning if h.useTLS && !isConnectionStater { h.logger.Warn("tls is enabled, but listener wrapper returns a connection that doesn't implement connectionStater") } else if !h.useTLS && isConnectionStater { h.logger.Warn("tls is disabled, but listener wrapper returns a connection that implements connectionStater") } // if both h1 and h2 are enabled, we don't need to check the preface if h.useH1 && h.useH2 { if isConnectionStater { return tlsStateConn{conn}, nil } return conn, nil } // impossible both are false, either useH1 or useH2 must be true, // or else the listener wouldn't be created h2Conn := &http2Conn{ h2Expected: h.useH2, logger: h.logger, Conn: conn, } if isConnectionStater { return tlsStateConn{http2StateConn{h2Conn}}, nil } return h2Conn, nil } // tlsStateConn wraps a net.Conn that implements connectionStater to hide that method // we can call netConn to get the original net.Conn and get the tls connection state // golang 1.25 will call that method, and it breaks h2 with connections other than *tls.Conn type tlsStateConn struct { net.Conn } func (conn tlsStateConn) tlsNetConn() net.Conn { return conn.Conn } type http2StateConn struct { *http2Conn } func (conn http2StateConn) ConnectionState() tls.ConnectionState { return conn.Conn.(connectionStater).ConnectionState() } type http2Conn struct { // current index where the preface should match, // no matching is done if idx is >= len(http2.ClientPreface) idx int // whether the connection is expected to be h2/h2c h2Expected bool // log if one such connection is detected logger *zap.Logger net.Conn } func (c *http2Conn) Read(p []byte) (int, error) { if c.idx >= len(http2.ClientPreface) { return c.Conn.Read(p) } n, err := c.Conn.Read(p) for i := range n { // first mismatch if p[i] != http2.ClientPreface[c.idx] { // close the connection if h2 is expected if c.h2Expected { c.logger.Debug("h1 connection detected, but h1 is not enabled") _ = c.Conn.Close() return 0, io.EOF } // no need to continue matching anymore c.idx = len(http2.ClientPreface) return n, err } c.idx++ // matching complete if c.idx == len(http2.ClientPreface) && !c.h2Expected { c.logger.Debug("h2/h2c connection detected, but h2/h2c is not enabled") _ = c.Conn.Close() return 0, io.EOF } } return n, err } ================================================ FILE: modules/caddyhttp/httpredirectlistener.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "bufio" "bytes" "fmt" "io" "net" "net/http" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(HTTPRedirectListenerWrapper{}) } // HTTPRedirectListenerWrapper provides HTTP->HTTPS redirects for // connections that come on the TLS port as an HTTP request, // by detecting using the first few bytes that it's not a TLS // handshake, but instead an HTTP request. // // This is especially useful when using a non-standard HTTPS port. // A user may simply type the address in their browser without the // https:// scheme, which would cause the browser to attempt the // connection over HTTP, but this would cause a "Client sent an // HTTP request to an HTTPS server" error response. // // This listener wrapper must be placed BEFORE the "tls" listener // wrapper, for it to work properly. type HTTPRedirectListenerWrapper struct { // MaxHeaderBytes is the maximum size to parse from a client's // HTTP request headers. Default: 1 MB MaxHeaderBytes int64 `json:"max_header_bytes,omitempty"` } func (HTTPRedirectListenerWrapper) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.listeners.http_redirect", New: func() caddy.Module { return new(HTTPRedirectListenerWrapper) }, } } func (h *HTTPRedirectListenerWrapper) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { return nil } func (h *HTTPRedirectListenerWrapper) WrapListener(l net.Listener) net.Listener { return &httpRedirectListener{l, h.MaxHeaderBytes} } // httpRedirectListener is listener that checks the first few bytes // of the request when the server is intended to accept HTTPS requests, // to respond to an HTTP request with a redirect. type httpRedirectListener struct { net.Listener maxHeaderBytes int64 } // Accept waits for and returns the next connection to the listener, // wrapping it with a httpRedirectConn. func (l *httpRedirectListener) Accept() (net.Conn, error) { c, err := l.Listener.Accept() if err != nil { return nil, err } maxHeaderBytes := l.maxHeaderBytes if maxHeaderBytes == 0 { maxHeaderBytes = 1024 * 1024 } return &httpRedirectConn{ Conn: c, limit: maxHeaderBytes, r: bufio.NewReader(c), }, nil } type httpRedirectConn struct { net.Conn once bool limit int64 r *bufio.Reader } // Read tries to peek at the first few bytes of the request, and if we get // an error reading the headers, and that error was due to the bytes looking // like an HTTP request, then we perform a HTTP->HTTPS redirect on the same // port as the original connection. func (c *httpRedirectConn) Read(p []byte) (int, error) { if c.once { return c.r.Read(p) } // no need to use sync.Once - net.Conn is not read from concurrently. c.once = true firstBytes, err := c.r.Peek(5) if err != nil { return 0, err } // If the request doesn't look like HTTP, then it's probably // TLS bytes, and we don't need to do anything. if !firstBytesLookLikeHTTP(firstBytes) { return c.r.Read(p) } // From now on, we can be almost certain the request is HTTP. // The returned error will be non nil and caller are expected to // close the connection. // Set the read limit, io.MultiReader is needed because // when resetting, *bufio.Reader discards buffered data. buffered, _ := c.r.Peek(c.r.Buffered()) mr := io.MultiReader(bytes.NewReader(buffered), c.Conn) c.r.Reset(io.LimitReader(mr, c.limit)) // Parse the HTTP request, so we can get the Host and URL to redirect to. req, err := http.ReadRequest(c.r) if err != nil { return 0, fmt.Errorf("couldn't read HTTP request") } // Build the redirect response, using the same Host and URL, // but replacing the scheme with https. headers := make(http.Header) headers.Add("Location", "https://"+req.Host+req.URL.String()) resp := &http.Response{ Proto: "HTTP/1.0", Status: "308 Permanent Redirect", StatusCode: 308, ProtoMajor: 1, ProtoMinor: 0, Header: headers, } err = resp.Write(c.Conn) if err != nil { return 0, fmt.Errorf("couldn't write HTTP->HTTPS redirect") } return 0, fmt.Errorf("redirected HTTP request on HTTPS port") } // firstBytesLookLikeHTTP reports whether a TLS record header // looks like it might've been a misdirected plaintext HTTP request. func firstBytesLookLikeHTTP(hdr []byte) bool { switch string(hdr[:5]) { case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO": return true } return false } var ( _ caddy.ListenerWrapper = (*HTTPRedirectListenerWrapper)(nil) _ caddyfile.Unmarshaler = (*HTTPRedirectListenerWrapper)(nil) ) ================================================ FILE: modules/caddyhttp/intercept/intercept.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package intercept import ( "bytes" "fmt" "io" "net/http" "strconv" "strings" "sync" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(Intercept{}) httpcaddyfile.RegisterHandlerDirective("intercept", parseCaddyfile) } // Intercept is a middleware that intercepts then replaces or modifies the original response. // It can, for instance, be used to implement X-Sendfile/X-Accel-Redirect-like features // when using modules like FrankenPHP or Caddy Snake. // // EXPERIMENTAL: Subject to change or removal. type Intercept struct { // List of handlers and their associated matchers to evaluate // after successful response generation. // The first handler that matches the original response will // be invoked. The original response body will not be // written to the client; // it is up to the handler to finish handling the response. // // Three new placeholders are available in this handler chain: // - `{http.intercept.status_code}` The status code from the response // - `{http.intercept.header.*}` The headers from the response HandleResponse []caddyhttp.ResponseHandler `json:"handle_response,omitempty"` // Holds the named response matchers from the Caddyfile while adapting responseMatchers map[string]caddyhttp.ResponseMatcher // Holds the handle_response Caddyfile tokens while adapting handleResponseSegments []*caddyfile.Dispenser logger *zap.Logger } // CaddyModule returns the Caddy module information. // // EXPERIMENTAL: Subject to change or removal. func (Intercept) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.intercept", New: func() caddy.Module { return new(Intercept) }, } } // Provision ensures that i is set up properly before use. // // EXPERIMENTAL: Subject to change or removal. func (irh *Intercept) Provision(ctx caddy.Context) error { // set up any response routes for i, rh := range irh.HandleResponse { err := rh.Provision(ctx) if err != nil { return fmt.Errorf("provisioning response handler %d: %w", i, err) } } irh.logger = ctx.Logger() return nil } var bufPool = sync.Pool{ New: func() any { return new(bytes.Buffer) }, } // TODO: handle status code replacement // // EXPERIMENTAL: Subject to change or removal. type interceptedResponseHandler struct { caddyhttp.ResponseRecorder replacer *caddy.Replacer handler caddyhttp.ResponseHandler handlerIndex int statusCode int } // EXPERIMENTAL: Subject to change or removal. func (irh interceptedResponseHandler) WriteHeader(statusCode int) { if irh.statusCode != 0 && (statusCode < 100 || statusCode >= 200) { irh.ResponseRecorder.WriteHeader(irh.statusCode) return } irh.ResponseRecorder.WriteHeader(statusCode) } // EXPERIMENTAL: Subject to change or removal. func (irh interceptedResponseHandler) Unwrap() http.ResponseWriter { return irh.ResponseRecorder } // EXPERIMENTAL: Subject to change or removal. func (ir Intercept) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) rec := interceptedResponseHandler{replacer: repl} rec.ResponseRecorder = caddyhttp.NewResponseRecorder(w, buf, func(status int, header http.Header) bool { // see if any response handler is configured for this original response for i, rh := range ir.HandleResponse { if rh.Match != nil && !rh.Match.Match(status, header) { continue } rec.handler = rh rec.handlerIndex = i // if configured to only change the status code, // do that then stream if statusCodeStr := rh.StatusCode.String(); statusCodeStr != "" { sc, err := strconv.Atoi(repl.ReplaceAll(statusCodeStr, "")) if err != nil { rec.statusCode = http.StatusInternalServerError } else { rec.statusCode = sc } } return rec.statusCode == 0 } return false }) if err := next.ServeHTTP(rec, r); err != nil { return err } if !rec.Buffered() { return nil } // set up the replacer so that parts of the original response can be // used for routing decisions for field, value := range rec.Header() { repl.Set("http.intercept.header."+field, strings.Join(value, ",")) } repl.Set("http.intercept.status_code", rec.Status()) if c := ir.logger.Check(zapcore.DebugLevel, "handling response"); c != nil { c.Write(zap.Int("handler", rec.handlerIndex)) } // response recorder doesn't create a new copy of the original headers, they're // present in the original response writer // create a new recorder to see if any response body from the new handler is present, // if not, use the already buffered response body recorder := caddyhttp.NewResponseRecorder(w, nil, nil) if err := rec.handler.Routes.Compile(emptyHandler).ServeHTTP(recorder, r); err != nil { return err } // no new response status and the status is not 0 if recorder.Status() == 0 && rec.Status() != 0 { w.WriteHeader(rec.Status()) } // no new response body and there is some in the original response // TODO: what if the new response doesn't have a body by design? // see: https://github.com/caddyserver/caddy/pull/6232#issue-2235224400 if recorder.Size() == 0 && buf.Len() > 0 { _, err := io.Copy(w, buf) return err } return nil } // this handler does nothing because everything we need is already buffered var emptyHandler caddyhttp.Handler = caddyhttp.HandlerFunc(func(_ http.ResponseWriter, req *http.Request) error { return nil }) // UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax: // // intercept [] { // # intercept original responses // @name { // status // header [] // } // replace_status [] // handle_response [] { // // } // } // // The FinalizeUnmarshalCaddyfile method should be called after this // to finalize parsing of "handle_response" blocks, if possible. // // EXPERIMENTAL: Subject to change or removal. func (i *Intercept) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { // collect the response matchers defined as subdirectives // prefixed with "@" for use with "handle_response" blocks i.responseMatchers = make(map[string]caddyhttp.ResponseMatcher) d.Next() // consume the directive name for d.NextBlock(0) { // if the subdirective has an "@" prefix then we // parse it as a response matcher for use with "handle_response" if strings.HasPrefix(d.Val(), matcherPrefix) { err := caddyhttp.ParseNamedResponseMatcher(d.NewFromNextSegment(), i.responseMatchers) if err != nil { return err } continue } switch d.Val() { case "handle_response": // delegate the parsing of handle_response to the caller, // since we need the httpcaddyfile.Helper to parse subroutes. // See h.FinalizeUnmarshalCaddyfile i.handleResponseSegments = append(i.handleResponseSegments, d.NewFromNextSegment()) case "replace_status": args := d.RemainingArgs() if len(args) != 1 && len(args) != 2 { return d.Errf("must have one or two arguments: an optional response matcher, and a status code") } responseHandler := caddyhttp.ResponseHandler{} if len(args) == 2 { if !strings.HasPrefix(args[0], matcherPrefix) { return d.Errf("must use a named response matcher, starting with '@'") } foundMatcher, ok := i.responseMatchers[args[0]] if !ok { return d.Errf("no named response matcher defined with name '%s'", args[0][1:]) } responseHandler.Match = &foundMatcher responseHandler.StatusCode = caddyhttp.WeakString(args[1]) } else if len(args) == 1 { responseHandler.StatusCode = caddyhttp.WeakString(args[0]) } // make sure there's no block, cause it doesn't make sense if nesting := d.Nesting(); d.NextBlock(nesting) { return d.Errf("cannot define routes for 'replace_status', use 'handle_response' instead.") } i.HandleResponse = append( i.HandleResponse, responseHandler, ) default: return d.Errf("unrecognized subdirective %s", d.Val()) } } return nil } // FinalizeUnmarshalCaddyfile finalizes the Caddyfile parsing which // requires having an httpcaddyfile.Helper to function, to parse subroutes. // // EXPERIMENTAL: Subject to change or removal. func (i *Intercept) FinalizeUnmarshalCaddyfile(helper httpcaddyfile.Helper) error { for _, d := range i.handleResponseSegments { // consume the "handle_response" token d.Next() args := d.RemainingArgs() // TODO: Remove this check at some point in the future if len(args) == 2 { return d.Errf("configuring 'handle_response' for status code replacement is no longer supported. Use 'replace_status' instead.") } if len(args) > 1 { return d.Errf("too many arguments for 'handle_response': %s", args) } var matcher *caddyhttp.ResponseMatcher if len(args) == 1 { // the first arg should always be a matcher. if !strings.HasPrefix(args[0], matcherPrefix) { return d.Errf("must use a named response matcher, starting with '@'") } foundMatcher, ok := i.responseMatchers[args[0]] if !ok { return d.Errf("no named response matcher defined with name '%s'", args[0][1:]) } matcher = &foundMatcher } // parse the block as routes handler, err := httpcaddyfile.ParseSegmentAsSubroute(helper.WithDispenser(d.NewFromNextSegment())) if err != nil { return err } subroute, ok := handler.(*caddyhttp.Subroute) if !ok { return helper.Errf("segment was not parsed as a subroute") } i.HandleResponse = append( i.HandleResponse, caddyhttp.ResponseHandler{ Match: matcher, Routes: subroute.Routes, }, ) } // move the handle_response entries without a matcher to the end. // we can't use sort.SliceStable because it will reorder the rest of the // entries which may be undesirable because we don't have a good // heuristic to use for sorting. withoutMatchers := []caddyhttp.ResponseHandler{} withMatchers := []caddyhttp.ResponseHandler{} for _, hr := range i.HandleResponse { if hr.Match == nil { withoutMatchers = append(withoutMatchers, hr) } else { withMatchers = append(withMatchers, hr) } } i.HandleResponse = append(withMatchers, withoutMatchers...) // clean up the bits we only needed for adapting i.handleResponseSegments = nil i.responseMatchers = nil return nil } const matcherPrefix = "@" func parseCaddyfile(helper httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { var ir Intercept if err := ir.UnmarshalCaddyfile(helper.Dispenser); err != nil { return nil, err } if err := ir.FinalizeUnmarshalCaddyfile(helper); err != nil { return nil, err } return ir, nil } // Interface guards var ( _ caddy.Provisioner = (*Intercept)(nil) _ caddyfile.Unmarshaler = (*Intercept)(nil) _ caddyhttp.MiddlewareHandler = (*Intercept)(nil) ) ================================================ FILE: modules/caddyhttp/invoke.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "fmt" "net/http" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(Invoke{}) } // Invoke implements a handler that compiles and executes a // named route that was defined on the server. // // EXPERIMENTAL: Subject to change or removal. type Invoke struct { // Name is the key of the named route to execute Name string `json:"name,omitempty"` } // CaddyModule returns the Caddy module information. func (Invoke) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.invoke", New: func() caddy.Module { return new(Invoke) }, } } func (invoke *Invoke) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error { server := r.Context().Value(ServerCtxKey).(*Server) if route, ok := server.NamedRoutes[invoke.Name]; ok { return route.Compile(next).ServeHTTP(w, r) } return fmt.Errorf("invoke: route '%s' not found", invoke.Name) } // Interface guards var ( _ MiddlewareHandler = (*Invoke)(nil) ) ================================================ FILE: modules/caddyhttp/ip_matchers.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "errors" "fmt" "net" "net/http" "net/netip" "strings" "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types/ref" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/internal" ) // MatchRemoteIP matches requests by the remote IP address, // i.e. the IP address of the direct connection to Caddy. type MatchRemoteIP struct { // The IPs or CIDR ranges to match. Ranges []string `json:"ranges,omitempty"` // cidrs and zones vars should aligned always in the same // length and indexes for matching later cidrs []*netip.Prefix zones []string logger *zap.Logger } // MatchClientIP matches requests by the client IP address, // i.e. the resolved address, considering trusted proxies. type MatchClientIP struct { // The IPs or CIDR ranges to match. Ranges []string `json:"ranges,omitempty"` // cidrs and zones vars should aligned always in the same // length and indexes for matching later cidrs []*netip.Prefix zones []string logger *zap.Logger } func init() { caddy.RegisterModule(MatchRemoteIP{}) caddy.RegisterModule(MatchClientIP{}) } // CaddyModule returns the Caddy module information. func (MatchRemoteIP) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.remote_ip", New: func() caddy.Module { return new(MatchRemoteIP) }, } } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (m *MatchRemoteIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { // iterate to merge multiple matchers into one for d.Next() { for d.NextArg() { if d.Val() == "forwarded" { return d.Err("the 'forwarded' option is no longer supported; use the 'client_ip' matcher instead") } if d.Val() == "private_ranges" { m.Ranges = append(m.Ranges, internal.PrivateRangesCIDR()...) continue } m.Ranges = append(m.Ranges, d.Val()) } if d.NextBlock(0) { return d.Err("malformed remote_ip matcher: blocks are not supported") } } return nil } // CELLibrary produces options that expose this matcher for use in CEL // expression matchers. // // Example: // // expression remote_ip('192.168.0.0/16', '172.16.0.0/12', '10.0.0.0/8') func (MatchRemoteIP) CELLibrary(ctx caddy.Context) (cel.Library, error) { return CELMatcherImpl( // name of the macro, this is the function name that users see when writing expressions. "remote_ip", // name of the function that the macro will be rewritten to call. "remote_ip_match_request_list", // internal data type of the MatchPath value. []*cel.Type{cel.ListType(cel.StringType)}, // function to convert a constant list of strings to a MatchPath instance. func(data ref.Val) (RequestMatcherWithError, error) { refStringList := stringSliceType strList, err := data.ConvertToNative(refStringList) if err != nil { return nil, err } m := MatchRemoteIP{} for _, input := range strList.([]string) { if input == "forwarded" { return nil, errors.New("the 'forwarded' option is no longer supported; use the 'client_ip' matcher instead") } m.Ranges = append(m.Ranges, input) } err = m.Provision(ctx) return m, err }, ) } // Provision parses m's IP ranges, either from IP or CIDR expressions. func (m *MatchRemoteIP) Provision(ctx caddy.Context) error { m.logger = ctx.Logger() cidrs, zones, err := provisionCidrsZonesFromRanges(m.Ranges) if err != nil { return err } m.cidrs = cidrs m.zones = zones return nil } // Match returns true if r matches m. func (m MatchRemoteIP) Match(r *http.Request) bool { match, err := m.MatchWithError(r) if err != nil { SetVar(r.Context(), MatcherErrorVarKey, err) } return match } // MatchWithError returns true if r matches m. func (m MatchRemoteIP) MatchWithError(r *http.Request) (bool, error) { // if handshake is not finished, we infer 0-RTT that has // not verified remote IP; could be spoofed, so we throw // HTTP 425 status to tell the client to try again after // the handshake is complete if r.TLS != nil && !r.TLS.HandshakeComplete { return false, Error(http.StatusTooEarly, fmt.Errorf("TLS handshake not complete, remote IP cannot be verified")) } address := r.RemoteAddr clientIP, zoneID, err := parseIPZoneFromString(address) if err != nil { if c := m.logger.Check(zapcore.ErrorLevel, "getting remote "); c != nil { c.Write(zap.Error(err)) } return false, nil } matches, zoneFilter := matchIPByCidrZones(clientIP, zoneID, m.cidrs, m.zones) if !matches && !zoneFilter { if c := m.logger.Check(zapcore.DebugLevel, "zone ID from remote IP did not match"); c != nil { c.Write(zap.String("zone", zoneID)) } } return matches, nil } // CaddyModule returns the Caddy module information. func (MatchClientIP) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.client_ip", New: func() caddy.Module { return new(MatchClientIP) }, } } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (m *MatchClientIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { // iterate to merge multiple matchers into one for d.Next() { for d.NextArg() { if d.Val() == "private_ranges" { m.Ranges = append(m.Ranges, internal.PrivateRangesCIDR()...) continue } m.Ranges = append(m.Ranges, d.Val()) } if d.NextBlock(0) { return d.Err("malformed client_ip matcher: blocks are not supported") } } return nil } // CELLibrary produces options that expose this matcher for use in CEL // expression matchers. // // Example: // // expression client_ip('192.168.0.0/16', '172.16.0.0/12', '10.0.0.0/8') func (MatchClientIP) CELLibrary(ctx caddy.Context) (cel.Library, error) { return CELMatcherImpl( // name of the macro, this is the function name that users see when writing expressions. "client_ip", // name of the function that the macro will be rewritten to call. "client_ip_match_request_list", // internal data type of the MatchPath value. []*cel.Type{cel.ListType(cel.StringType)}, // function to convert a constant list of strings to a MatchPath instance. func(data ref.Val) (RequestMatcherWithError, error) { refStringList := stringSliceType strList, err := data.ConvertToNative(refStringList) if err != nil { return nil, err } m := MatchClientIP{ Ranges: strList.([]string), } err = m.Provision(ctx) return m, err }, ) } // Provision parses m's IP ranges, either from IP or CIDR expressions. func (m *MatchClientIP) Provision(ctx caddy.Context) error { m.logger = ctx.Logger() cidrs, zones, err := provisionCidrsZonesFromRanges(m.Ranges) if err != nil { return err } m.cidrs = cidrs m.zones = zones return nil } // Match returns true if r matches m. func (m MatchClientIP) Match(r *http.Request) bool { match, err := m.MatchWithError(r) if err != nil { SetVar(r.Context(), MatcherErrorVarKey, err) } return match } // MatchWithError returns true if r matches m. func (m MatchClientIP) MatchWithError(r *http.Request) (bool, error) { // if handshake is not finished, we infer 0-RTT that has // not verified remote IP; could be spoofed, so we throw // HTTP 425 status to tell the client to try again after // the handshake is complete if r.TLS != nil && !r.TLS.HandshakeComplete { return false, Error(http.StatusTooEarly, fmt.Errorf("TLS handshake not complete, remote IP cannot be verified")) } address := GetVar(r.Context(), ClientIPVarKey).(string) clientIP, zoneID, err := parseIPZoneFromString(address) if err != nil { m.logger.Error("getting client IP", zap.Error(err)) return false, nil } matches, zoneFilter := matchIPByCidrZones(clientIP, zoneID, m.cidrs, m.zones) if !matches && !zoneFilter { m.logger.Debug("zone ID from client IP did not match", zap.String("zone", zoneID)) } return matches, nil } func provisionCidrsZonesFromRanges(ranges []string) ([]*netip.Prefix, []string, error) { cidrs := []*netip.Prefix{} zones := []string{} repl := caddy.NewReplacer() for _, str := range ranges { str = repl.ReplaceAll(str, "") // Exclude the zone_id from the IP if strings.Contains(str, "%") { split := strings.Split(str, "%") str = split[0] // write zone identifiers in m.zones for matching later zones = append(zones, split[1]) } else { zones = append(zones, "") } if strings.Contains(str, "/") { ipNet, err := netip.ParsePrefix(str) if err != nil { return nil, nil, fmt.Errorf("parsing CIDR expression '%s': %v", str, err) } cidrs = append(cidrs, &ipNet) } else { ipAddr, err := netip.ParseAddr(str) if err != nil { return nil, nil, fmt.Errorf("invalid IP address: '%s': %v", str, err) } ipNew := netip.PrefixFrom(ipAddr, ipAddr.BitLen()) cidrs = append(cidrs, &ipNew) } } return cidrs, zones, nil } func parseIPZoneFromString(address string) (netip.Addr, string, error) { ipStr, _, err := net.SplitHostPort(address) if err != nil { ipStr = address // OK; probably didn't have a port } // Some IPv6-Addresses can contain zone identifiers at the end, // which are separated with "%" zoneID := "" if strings.Contains(ipStr, "%") { split := strings.Split(ipStr, "%") ipStr = split[0] zoneID = split[1] } ipAddr, err := netip.ParseAddr(ipStr) if err != nil { return netip.IPv4Unspecified(), "", err } return ipAddr, zoneID, nil } func matchIPByCidrZones(clientIP netip.Addr, zoneID string, cidrs []*netip.Prefix, zones []string) (bool, bool) { zoneFilter := true for i, ipRange := range cidrs { if ipRange.Contains(clientIP) { // Check if there are zone filters assigned and if they match. if zones[i] == "" || zoneID == zones[i] { return true, false } zoneFilter = false } } return false, zoneFilter } // Interface guards var ( _ RequestMatcherWithError = (*MatchRemoteIP)(nil) _ caddy.Provisioner = (*MatchRemoteIP)(nil) _ caddyfile.Unmarshaler = (*MatchRemoteIP)(nil) _ CELLibraryProducer = (*MatchRemoteIP)(nil) _ RequestMatcherWithError = (*MatchClientIP)(nil) _ caddy.Provisioner = (*MatchClientIP)(nil) _ caddyfile.Unmarshaler = (*MatchClientIP)(nil) _ CELLibraryProducer = (*MatchClientIP)(nil) ) ================================================ FILE: modules/caddyhttp/ip_range.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "fmt" "net/http" "net/netip" "strings" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/internal" ) func init() { caddy.RegisterModule(StaticIPRange{}) } // IPRangeSource gets a list of IP ranges. // // The request is passed as an argument to allow plugin implementations // to have more flexibility. But, a plugin MUST NOT modify the request. // The caller will have read the `r.RemoteAddr` before getting IP ranges. // // This should be a very fast function -- instant if possible. // The list of IP ranges should be sourced as soon as possible if loaded // from an external source (i.e. initially loaded during Provisioning), // so that it's ready to be used when requests start getting handled. // A read lock should probably be used to get the cached value if the // ranges can change at runtime (e.g. periodically refreshed). // Using a `caddy.UsagePool` may be a good idea to avoid having refetch // the values when a config reload occurs, which would waste time. // // If the list of IP ranges cannot be sourced, then provisioning SHOULD // fail. Getting the IP ranges at runtime MUST NOT fail, because it would // cancel incoming requests. If refreshing the list fails, then the // previous list of IP ranges should continue to be returned so that the // server can continue to operate normally. type IPRangeSource interface { GetIPRanges(*http.Request) []netip.Prefix } // StaticIPRange provides a static range of IP address prefixes (CIDRs). type StaticIPRange struct { // A static list of IP ranges (supports CIDR notation). Ranges []string `json:"ranges,omitempty"` // Holds the parsed CIDR ranges from Ranges. ranges []netip.Prefix } // CaddyModule returns the Caddy module information. func (StaticIPRange) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.ip_sources.static", New: func() caddy.Module { return new(StaticIPRange) }, } } func (s *StaticIPRange) Provision(ctx caddy.Context) error { for _, str := range s.Ranges { prefix, err := CIDRExpressionToPrefix(str) if err != nil { return err } s.ranges = append(s.ranges, prefix) } return nil } func (s *StaticIPRange) GetIPRanges(_ *http.Request) []netip.Prefix { return s.ranges } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (m *StaticIPRange) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { if !d.Next() { return nil } for d.NextArg() { if d.Val() == "private_ranges" { m.Ranges = append(m.Ranges, internal.PrivateRangesCIDR()...) continue } m.Ranges = append(m.Ranges, d.Val()) } return nil } // CIDRExpressionToPrefix takes a string which could be either a // CIDR expression or a single IP address, and returns a netip.Prefix. func CIDRExpressionToPrefix(expr string) (netip.Prefix, error) { // Having a slash means it should be a CIDR expression if strings.Contains(expr, "/") { prefix, err := netip.ParsePrefix(expr) if err != nil { return netip.Prefix{}, fmt.Errorf("parsing CIDR expression: '%s': %v", expr, err) } return prefix, nil } // Otherwise it's likely a single IP address parsed, err := netip.ParseAddr(expr) if err != nil { return netip.Prefix{}, fmt.Errorf("invalid IP address: '%s': %v", expr, err) } prefix := netip.PrefixFrom(parsed, parsed.BitLen()) return prefix, nil } // Interface guards var ( _ caddy.Provisioner = (*StaticIPRange)(nil) _ caddyfile.Unmarshaler = (*StaticIPRange)(nil) _ IPRangeSource = (*StaticIPRange)(nil) ) // PrivateRangesCIDR returns a list of private CIDR range // strings, which can be used as a configuration shortcut. // Note: this function is used at least by mholt/caddy-l4. func PrivateRangesCIDR() []string { return internal.PrivateRangesCIDR() } ================================================ FILE: modules/caddyhttp/logging/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "strings" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { httpcaddyfile.RegisterHandlerDirective("log_append", parseCaddyfile) } // parseCaddyfile sets up the log_append handler from Caddyfile tokens. Syntax: // // log_append [] [<] func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { handler := new(LogAppend) err := handler.UnmarshalCaddyfile(h.Dispenser) return handler, err } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (h *LogAppend) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume directive name if !d.NextArg() { return d.ArgErr() } h.Key = d.Val() if !d.NextArg() { return d.ArgErr() } if strings.HasPrefix(h.Key, "<") && len(h.Key) > 1 { h.Early = true h.Key = h.Key[1:] } h.Value = d.Val() return nil } // Interface guards var ( _ caddyfile.Unmarshaler = (*LogAppend)(nil) ) ================================================ FILE: modules/caddyhttp/logging/logappend.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "bytes" "encoding/base64" "net/http" "strings" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(LogAppend{}) } // LogAppend implements a middleware that takes a key and value, where // the key is the name of a log field and the value is a placeholder, // or variable key, or constant value to use for that field. type LogAppend struct { // Key is the name of the log field. Key string `json:"key,omitempty"` // Value is the value to use for the log field. // If it is a placeholder (with surrounding `{}`), // it will be evaluated when the log is written. // If the value is a key that exists in the `vars` // map, the value of that key will be used. Otherwise // the value will be used as-is as a constant string. Value string `json:"value,omitempty"` // Early, if true, adds the log field before calling // the next handler in the chain. By default, the log // field is added on the way back up the middleware chain, // after all subsequent handlers have completed. Early bool `json:"early,omitempty"` } // CaddyModule returns the Caddy module information. func (LogAppend) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.log_append", New: func() caddy.Module { return new(LogAppend) }, } } func (h LogAppend) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { // Determine if we need to add the log field early. // We do if the Early flag is set, or for convenience, // if the value is a special placeholder for the request body. needsEarly := h.Early || h.Value == placeholderRequestBody || h.Value == placeholderRequestBodyBase64 // Check if we need to buffer the response for special placeholders needsResponseBody := h.Value == placeholderResponseBody || h.Value == placeholderResponseBodyBase64 if needsEarly && !needsResponseBody { // Add the log field before calling the next handler // (but not if we need the response body, which isn't available yet) h.addLogField(r, nil) } var rec caddyhttp.ResponseRecorder var buf *bytes.Buffer if needsResponseBody { // Wrap the response writer with a recorder to capture the response body buf = new(bytes.Buffer) rec = caddyhttp.NewResponseRecorder(w, buf, func(status int, header http.Header) bool { // Always buffer the response when we need to log the body return true }) w = rec } // Run the next handler in the chain. // If an error occurs, we still want to add // any extra log fields that we can, so we // hold onto the error and return it later. handlerErr := next.ServeHTTP(w, r) if needsResponseBody { // Write the buffered response to the client if rec.Buffered() { h.addLogField(r, buf) err := rec.WriteResponse() if err != nil { return err } } return handlerErr } if !h.Early { // Add the log field after the handler completes h.addLogField(r, buf) } return handlerErr } // addLogField adds the log field to the request's extra log fields. // If buf is not nil, it contains the buffered response body for special // response body placeholders. func (h LogAppend) addLogField(r *http.Request, buf *bytes.Buffer) { ctx := r.Context() vars := ctx.Value(caddyhttp.VarsCtxKey).(map[string]any) repl := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer) extra := ctx.Value(caddyhttp.ExtraLogFieldsCtxKey).(*caddyhttp.ExtraLogFields) var varValue any // Handle special case placeholders for response body if h.Value == placeholderResponseBody { if buf != nil { varValue = buf.String() } else { varValue = "" } } else if h.Value == placeholderResponseBodyBase64 { if buf != nil { varValue = base64.StdEncoding.EncodeToString(buf.Bytes()) } else { varValue = "" } } else if strings.HasPrefix(h.Value, "{") && strings.HasSuffix(h.Value, "}") && strings.Count(h.Value, "{") == 1 { // the value looks like a placeholder, so get its value varValue, _ = repl.Get(strings.Trim(h.Value, "{}")) } else if val, ok := vars[h.Value]; ok { // the value is a key in the vars map varValue = val } else { // the value is a constant string varValue = h.Value } // Add the field to the extra log fields. // We use zap.Any because it will reflect // to the correct type for us. extra.Add(zap.Any(h.Key, varValue)) } const ( // Special placeholder values that are handled by log_append // rather than by the replacer. placeholderRequestBody = "{http.request.body}" placeholderRequestBodyBase64 = "{http.request.body_base64}" placeholderResponseBody = "{http.response.body}" placeholderResponseBodyBase64 = "{http.response.body_base64}" ) // Interface guards var ( _ caddyhttp.MiddlewareHandler = (*LogAppend)(nil) ) ================================================ FILE: modules/caddyhttp/logging.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "context" "encoding/json" "errors" "log/slog" "net" "net/http" "strings" "sync" "go.uber.org/zap" "go.uber.org/zap/exp/zapslog" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterSlogHandlerFactory(func(handler slog.Handler, core zapcore.Core, moduleID string) slog.Handler { return &extraFieldsSlogHandler{defaultHandler: handler, core: core, moduleID: moduleID} }) } // ServerLogConfig describes a server's logging configuration. If // enabled without customization, all requests to this server are // logged to the default logger; logger destinations may be // customized per-request-host. type ServerLogConfig struct { // The default logger name for all logs emitted by this server for // hostnames that are not in the logger_names map. DefaultLoggerName string `json:"default_logger_name,omitempty"` // LoggerNames maps request hostnames to one or more custom logger // names. For example, a mapping of `"example.com": ["example"]` would // cause access logs from requests with a Host of example.com to be // emitted by a logger named "http.log.access.example". If there are // multiple logger names, then the log will be emitted to all of them. // If the logger name is an empty, the default logger is used, i.e. // the logger "http.log.access". // // Keys must be hostnames (without ports), and may contain wildcards // to match subdomains. The value is an array of logger names. // // For backwards compatibility, if the value is a string, it is treated // as a single-element array. LoggerNames map[string]StringArray `json:"logger_names,omitempty"` // By default, all requests to this server will be logged if // access logging is enabled. This field lists the request // hosts for which access logging should be disabled. SkipHosts []string `json:"skip_hosts,omitempty"` // If true, requests to any host not appearing in the // logger_names map will not be logged. SkipUnmappedHosts bool `json:"skip_unmapped_hosts,omitempty"` // If true, credentials that are otherwise omitted, will be logged. // The definition of credentials is defined by https://fetch.spec.whatwg.org/#credentials, // and this includes some request and response headers, i.e `Cookie`, // `Set-Cookie`, `Authorization`, and `Proxy-Authorization`. ShouldLogCredentials bool `json:"should_log_credentials,omitempty"` // Log each individual handler that is invoked. // Requires that the log emit at DEBUG level. // // NOTE: This may log the configuration of your // HTTP handler modules; do not enable this in // insecure contexts when there is sensitive // data in the configuration. // // EXPERIMENTAL: Subject to change or removal. Trace bool `json:"trace,omitempty"` } // wrapLogger wraps logger in one or more logger named // according to user preferences for the given host. func (slc ServerLogConfig) wrapLogger(logger *zap.Logger, req *http.Request) []*zap.Logger { // using the `log_name` directive or the `access_logger_names` variable, // the logger names can be overridden for the current request if names := GetVar(req.Context(), AccessLoggerNameVarKey); names != nil { if namesSlice, ok := names.([]any); ok { loggers := make([]*zap.Logger, 0, len(namesSlice)) for _, loggerName := range namesSlice { // no name, use the default logger if loggerName == "" { loggers = append(loggers, logger) continue } // make a logger with the given name loggers = append(loggers, logger.Named(loggerName.(string))) } return loggers } } // get the hostname from the request, with the port number stripped host, _, err := net.SplitHostPort(req.Host) if err != nil { host = req.Host } // get the logger names for this host from the config hosts := slc.getLoggerHosts(host) // make a list of named loggers, or the default logger loggers := make([]*zap.Logger, 0, len(hosts)) for _, loggerName := range hosts { // no name, use the default logger if loggerName == "" { loggers = append(loggers, logger) continue } // make a logger with the given name loggers = append(loggers, logger.Named(loggerName)) } return loggers } func (slc ServerLogConfig) getLoggerHosts(host string) []string { // try the exact hostname first if hosts, ok := slc.LoggerNames[host]; ok { return hosts } // try matching wildcard domains if other non-specific loggers exist labels := strings.Split(host, ".") for i := range labels { if labels[i] == "" { continue } labels[i] = "*" wildcardHost := strings.Join(labels, ".") if hosts, ok := slc.LoggerNames[wildcardHost]; ok { return hosts } } return []string{slc.DefaultLoggerName} } func (slc *ServerLogConfig) clone() *ServerLogConfig { clone := &ServerLogConfig{ DefaultLoggerName: slc.DefaultLoggerName, LoggerNames: make(map[string]StringArray), SkipHosts: append([]string{}, slc.SkipHosts...), SkipUnmappedHosts: slc.SkipUnmappedHosts, ShouldLogCredentials: slc.ShouldLogCredentials, } for k, v := range slc.LoggerNames { clone.LoggerNames[k] = append([]string{}, v...) } return clone } // StringArray is a slices of strings, but also accepts // a single string as a value when JSON unmarshaling, // converting it to a slice of one string. type StringArray []string // UnmarshalJSON satisfies json.Unmarshaler. func (sa *StringArray) UnmarshalJSON(b []byte) error { var jsonObj any err := json.Unmarshal(b, &jsonObj) if err != nil { return err } switch obj := jsonObj.(type) { case string: *sa = StringArray([]string{obj}) return nil case []any: s := make([]string, 0, len(obj)) for _, v := range obj { value, ok := v.(string) if !ok { return errors.New("unsupported type") } s = append(s, value) } *sa = StringArray(s) return nil } return errors.New("unsupported type") } // errLogValues inspects err and returns the status code // to use, the error log message, and any extra fields. // If err is a HandlerError, the returned values will // have richer information. func errLogValues(err error) (status int, msg string, fields func() []zapcore.Field) { var handlerErr HandlerError if errors.As(err, &handlerErr) { status = handlerErr.StatusCode if handlerErr.Err == nil { msg = err.Error() } else { msg = handlerErr.Err.Error() } fields = func() []zapcore.Field { return []zapcore.Field{ zap.Int("status", handlerErr.StatusCode), zap.String("err_id", handlerErr.ID), zap.String("err_trace", handlerErr.Trace), } } return status, msg, fields } fields = func() []zapcore.Field { return []zapcore.Field{ zap.Error(err), } } status = http.StatusInternalServerError msg = err.Error() return status, msg, fields } // ExtraLogFields is a list of extra fields to log with every request. type ExtraLogFields struct { fields []zapcore.Field handlers sync.Map } // Add adds a field to the list of extra fields to log. func (e *ExtraLogFields) Add(field zap.Field) { e.handlers.Clear() e.fields = append(e.fields, field) } // Set sets a field in the list of extra fields to log. // If the field already exists, it is replaced. func (e *ExtraLogFields) Set(field zap.Field) { e.handlers.Clear() for i := range e.fields { if e.fields[i].Key == field.Key { e.fields[i] = field return } } e.fields = append(e.fields, field) } func (e *ExtraLogFields) getSloggerHandler(handler *extraFieldsSlogHandler) (h slog.Handler) { if existing, ok := e.handlers.Load(handler); ok { return existing.(slog.Handler) } if handler.moduleID == "" { h = zapslog.NewHandler(handler.core.With(e.fields)) } else { h = zapslog.NewHandler(handler.core.With(e.fields), zapslog.WithName(handler.moduleID)) } if handler.group != "" { h = h.WithGroup(handler.group) } if handler.attrs != nil { h = h.WithAttrs(handler.attrs) } e.handlers.Store(handler, h) return h } const ( // Variable name used to indicate that this request // should be omitted from the access logs LogSkipVar string = "log_skip" // For adding additional fields to the access logs ExtraLogFieldsCtxKey caddy.CtxKey = "extra_log_fields" // Variable name used to indicate the logger to be used AccessLoggerNameVarKey string = "access_logger_names" ) type extraFieldsSlogHandler struct { defaultHandler slog.Handler core zapcore.Core moduleID string group string attrs []slog.Attr } func (e *extraFieldsSlogHandler) Enabled(ctx context.Context, level slog.Level) bool { return e.defaultHandler.Enabled(ctx, level) } func (e *extraFieldsSlogHandler) Handle(ctx context.Context, record slog.Record) error { if elf, ok := ctx.Value(ExtraLogFieldsCtxKey).(*ExtraLogFields); ok { return elf.getSloggerHandler(e).Handle(ctx, record) } return e.defaultHandler.Handle(ctx, record) } func (e *extraFieldsSlogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { return &extraFieldsSlogHandler{ e.defaultHandler.WithAttrs(attrs), e.core, e.moduleID, e.group, append(e.attrs, attrs...), } } func (e *extraFieldsSlogHandler) WithGroup(name string) slog.Handler { return &extraFieldsSlogHandler{ e.defaultHandler.WithGroup(name), e.core, e.moduleID, name, e.attrs, } } ================================================ FILE: modules/caddyhttp/map/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package maphandler import ( "strings" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { httpcaddyfile.RegisterHandlerDirective("map", parseCaddyfile) } // parseCaddyfile sets up the map handler from Caddyfile tokens. Syntax: // // map [] { // [~] // default // } // // If the input value is prefixed with a tilde (~), then the input will be parsed as a // regular expression. // // The Caddyfile adapter treats outputs that are a literal hyphen (-) as a null/nil // value. This is useful if you want to fall back to default for that particular output. // // The number of outputs for each mapping must not be more than the number of destinations. // However, for convenience, there may be fewer outputs than destinations and any missing // outputs will be filled in implicitly. func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { h.Next() // consume directive name var handler Handler // source if !h.NextArg() { return nil, h.ArgErr() } handler.Source = h.Val() // destinations handler.Destinations = h.RemainingArgs() if len(handler.Destinations) == 0 { return nil, h.Err("missing destination argument(s)") } for _, dest := range handler.Destinations { if shorthand := httpcaddyfile.WasReplacedPlaceholderShorthand(dest); shorthand != "" { return nil, h.Errf("destination %s conflicts with a Caddyfile placeholder shorthand", shorthand) } } // mappings for h.NextBlock(0) { // defaults are a special case if h.Val() == "default" { if len(handler.Defaults) > 0 { return nil, h.Err("defaults already defined") } handler.Defaults = h.RemainingArgs() for len(handler.Defaults) < len(handler.Destinations) { handler.Defaults = append(handler.Defaults, "") } continue } // every line maps an input value to one or more outputs in := h.Val() var outs []any for h.NextArg() { val := h.ScalarVal() if val == "-" { outs = append(outs, nil) } else { outs = append(outs, val) } } // cannot have more outputs than destinations if len(outs) > len(handler.Destinations) { return nil, h.Err("too many outputs") } // for convenience, can have fewer outputs than destinations, but the // underlying handler won't accept that, so we fill in nil values for len(outs) < len(handler.Destinations) { outs = append(outs, nil) } // create the mapping mapping := Mapping{Outputs: outs} if strings.HasPrefix(in, "~") { mapping.InputRegexp = in[1:] } else { mapping.Input = in } handler.Mappings = append(handler.Mappings, mapping) } return handler, nil } ================================================ FILE: modules/caddyhttp/map/map.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package maphandler import ( "fmt" "net/http" "regexp" "slices" "strings" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(Handler{}) } // Handler implements a middleware that maps inputs to outputs. Specifically, it // compares a source value against the map inputs, and for one that matches, it // applies the output values to each destination. Destinations become placeholder // names. // // Mapped placeholders are not evaluated until they are used, so even for very // large mappings, this handler is quite efficient. type Handler struct { // Source is the placeholder from which to get the input value. Source string `json:"source,omitempty"` // Destinations are the names of placeholders in which to store the outputs. // Destination values should be wrapped in braces, for example, {my_placeholder}. Destinations []string `json:"destinations,omitempty"` // Mappings from source values (inputs) to destination values (outputs). // The first matching, non-nil mapping will be applied. Mappings []Mapping `json:"mappings,omitempty"` // If no mappings match or if the mapped output is null/nil, the associated // default output will be applied (optional). Defaults []string `json:"defaults,omitempty"` } // CaddyModule returns the Caddy module information. func (Handler) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.map", New: func() caddy.Module { return new(Handler) }, } } // Provision sets up h. func (h *Handler) Provision(_ caddy.Context) error { for j, dest := range h.Destinations { if strings.Count(dest, "{") != 1 || !strings.HasPrefix(dest, "{") { return fmt.Errorf("destination must be a placeholder and only a placeholder") } h.Destinations[j] = strings.Trim(dest, "{}") } for i, m := range h.Mappings { if m.InputRegexp == "" { continue } var err error h.Mappings[i].re, err = regexp.Compile(m.InputRegexp) if err != nil { return fmt.Errorf("compiling regexp for mapping %d: %v", i, err) } } // TODO: improve efficiency even further by using an actual map type // for the non-regexp mappings, OR sort them and do a binary search return nil } // Validate ensures that h is configured properly. func (h *Handler) Validate() error { nDest, nDef := len(h.Destinations), len(h.Defaults) if nDef > 0 && nDef != nDest { return fmt.Errorf("%d destinations != %d defaults", nDest, nDef) } seen := make(map[string]int) for i, m := range h.Mappings { // prevent confusing/ambiguous mappings if m.Input != "" && m.InputRegexp != "" { return fmt.Errorf("mapping %d has both input and input_regexp fields specified, which is confusing", i) } // prevent duplicate mappings input := m.Input if m.InputRegexp != "" { input = m.InputRegexp } if prev, ok := seen[input]; ok { return fmt.Errorf("mapping %d has a duplicate input '%s' previously used with mapping %d", i, input, prev) } seen[input] = i // ensure mappings have 1:1 output-to-destination correspondence nOut := len(m.Outputs) if nOut != nDest { return fmt.Errorf("mapping %d has %d outputs but there are %d destinations defined", i, nOut, nDest) } } return nil } func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) // defer work until a variable is actually evaluated by using replacer's Map callback repl.Map(func(key string) (any, bool) { // return early if the variable is not even a configured destination destIdx := slices.Index(h.Destinations, key) if destIdx < 0 { return nil, false } input := repl.ReplaceAll(h.Source, "") // find the first mapping matching the input and return // the requested destination/output value for _, m := range h.Mappings { output := m.Outputs[destIdx] if output == nil { continue } outputStr := caddy.ToString(output) // evaluate regular expression if configured if m.re != nil { var result []byte matches := m.re.FindStringSubmatchIndex(input) if matches == nil { continue } result = m.re.ExpandString(result, outputStr, input, matches) return string(result), true } // otherwise simple string comparison if input == m.Input { return repl.ReplaceAll(outputStr, ""), true } } // fall back to default if no match or if matched nil value if len(h.Defaults) > destIdx { return repl.ReplaceAll(h.Defaults[destIdx], ""), true } return nil, true }) return next.ServeHTTP(w, r) } // Mapping describes a mapping from input to outputs. type Mapping struct { // The input value to match. Must be distinct from other mappings. // Mutually exclusive to input_regexp. Input string `json:"input,omitempty"` // The input regular expression to match. Mutually exclusive to input. InputRegexp string `json:"input_regexp,omitempty"` // Upon a match with the input, each output is positionally correlated // with each destination of the parent handler. An output that is null // (nil) will be treated as if it was not mapped at all. Outputs []any `json:"outputs,omitempty"` re *regexp.Regexp } // Interface guards var ( _ caddy.Provisioner = (*Handler)(nil) _ caddy.Validator = (*Handler)(nil) _ caddyhttp.MiddlewareHandler = (*Handler)(nil) ) ================================================ FILE: modules/caddyhttp/map/map_test.go ================================================ package maphandler import ( "context" "net/http" "net/http/httptest" "reflect" "testing" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func TestHandler(t *testing.T) { for i, tc := range []struct { handler Handler reqURI string expect map[string]any }{ { reqURI: "/foo", handler: Handler{ Source: "{http.request.uri.path}", Destinations: []string{"{output}"}, Mappings: []Mapping{ { Input: "/foo", Outputs: []any{"FOO"}, }, }, }, expect: map[string]any{ "output": "FOO", }, }, { reqURI: "/abcdef", handler: Handler{ Source: "{http.request.uri.path}", Destinations: []string{"{output}"}, Mappings: []Mapping{ { InputRegexp: "(/abc)", Outputs: []any{"ABC"}, }, }, }, expect: map[string]any{ "output": "ABC", }, }, { reqURI: "/ABCxyzDEF", handler: Handler{ Source: "{http.request.uri.path}", Destinations: []string{"{output}"}, Mappings: []Mapping{ { InputRegexp: "(xyz)", Outputs: []any{"...${1}..."}, }, }, }, expect: map[string]any{ "output": "...xyz...", }, }, { // Test case from https://caddy.community/t/map-directive-and-regular-expressions/13866/14?u=matt reqURI: "/?s=0%27+AND+%28SELECT+0+FROM+%28SELECT+count%28%2A%29%2C+CONCAT%28%28SELECT+%40%40version%29%2C+0x23%2C+FLOOR%28RAND%280%29%2A2%29%29+AS+x+FROM+information_schema.columns+GROUP+BY+x%29+y%29+-+-+%27", handler: Handler{ Source: "{http.request.uri}", Destinations: []string{"{output}"}, Mappings: []Mapping{ { InputRegexp: "(?i)(\\^|`|<|>|%|\\\\|\\{|\\}|\\|)", Outputs: []any{"3"}, }, }, }, expect: map[string]any{ "output": "3", }, }, { reqURI: "/foo", handler: Handler{ Source: "{http.request.uri.path}", Destinations: []string{"{output}"}, Mappings: []Mapping{ { Input: "/foo", Outputs: []any{"{testvar}"}, }, }, }, expect: map[string]any{ "output": "testing", }, }, { reqURI: "/foo", handler: Handler{ Source: "{http.request.uri.path}", Destinations: []string{"{output}"}, Defaults: []string{"default"}, }, expect: map[string]any{ "output": "default", }, }, { reqURI: "/foo", handler: Handler{ Source: "{http.request.uri.path}", Destinations: []string{"{output}"}, Defaults: []string{"{testvar}"}, }, expect: map[string]any{ "output": "testing", }, }, } { if err := tc.handler.Provision(caddy.Context{}); err != nil { t.Fatalf("Test %d: Provisioning handler: %v", i, err) } req, err := http.NewRequest(http.MethodGet, tc.reqURI, nil) if err != nil { t.Fatalf("Test %d: Creating request: %v", i, err) } repl := caddyhttp.NewTestReplacer(req) repl.Set("testvar", "testing") ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) rr := httptest.NewRecorder() noop := caddyhttp.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) error { return nil }) if err := tc.handler.ServeHTTP(rr, req, noop); err != nil { t.Errorf("Test %d: Handler returned error: %v", i, err) continue } for key, expected := range tc.expect { actual, _ := repl.Get(key) if !reflect.DeepEqual(actual, expected) { t.Errorf("Test %d: Expected %#v but got %#v for {%s}", i, expected, actual, key) } } } } ================================================ FILE: modules/caddyhttp/marshalers.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "crypto/tls" "net" "net/http" "strings" "go.uber.org/zap/zapcore" ) // LoggableHTTPRequest makes an HTTP request loggable with zap.Object(). type LoggableHTTPRequest struct { *http.Request ShouldLogCredentials bool } // MarshalLogObject satisfies the zapcore.ObjectMarshaler interface. func (r LoggableHTTPRequest) MarshalLogObject(enc zapcore.ObjectEncoder) error { ip, port, err := net.SplitHostPort(r.RemoteAddr) if err != nil { ip = r.RemoteAddr port = "" } enc.AddString("remote_ip", ip) enc.AddString("remote_port", port) if ip, ok := GetVar(r.Context(), ClientIPVarKey).(string); ok { enc.AddString("client_ip", ip) } enc.AddString("proto", r.Proto) enc.AddString("method", r.Method) enc.AddString("host", r.Host) enc.AddString("uri", r.RequestURI) enc.AddObject("headers", LoggableHTTPHeader{ Header: r.Header, ShouldLogCredentials: r.ShouldLogCredentials, }) if r.TransferEncoding != nil { enc.AddArray("transfer_encoding", LoggableStringArray(r.TransferEncoding)) } if r.TLS != nil { enc.AddObject("tls", LoggableTLSConnState(*r.TLS)) } return nil } // LoggableHTTPHeader makes an HTTP header loggable with zap.Object(). // Headers with potentially sensitive information (Cookie, Set-Cookie, // Authorization, and Proxy-Authorization) are logged with empty values. type LoggableHTTPHeader struct { http.Header ShouldLogCredentials bool } // MarshalLogObject satisfies the zapcore.ObjectMarshaler interface. func (h LoggableHTTPHeader) MarshalLogObject(enc zapcore.ObjectEncoder) error { if h.Header == nil { return nil } for key, val := range h.Header { if !h.ShouldLogCredentials { switch strings.ToLower(key) { case "cookie", "set-cookie", "authorization", "proxy-authorization": val = []string{"REDACTED"} // see #5669. I still think ▒▒▒▒ would be cool. } } enc.AddArray(key, LoggableStringArray(val)) } return nil } // LoggableStringArray makes a slice of strings marshalable for logging. type LoggableStringArray []string // MarshalLogArray satisfies the zapcore.ArrayMarshaler interface. func (sa LoggableStringArray) MarshalLogArray(enc zapcore.ArrayEncoder) error { if sa == nil { return nil } for _, s := range sa { enc.AppendString(s) } return nil } // LoggableTLSConnState makes a TLS connection state loggable with zap.Object(). type LoggableTLSConnState tls.ConnectionState // MarshalLogObject satisfies the zapcore.ObjectMarshaler interface. func (t LoggableTLSConnState) MarshalLogObject(enc zapcore.ObjectEncoder) error { enc.AddBool("resumed", t.DidResume) enc.AddUint16("version", t.Version) enc.AddUint16("cipher_suite", t.CipherSuite) enc.AddString("proto", t.NegotiatedProtocol) enc.AddString("server_name", t.ServerName) enc.AddBool("ech", t.ECHAccepted) if len(t.PeerCertificates) > 0 { enc.AddString("client_common_name", t.PeerCertificates[0].Subject.CommonName) enc.AddString("client_serial", t.PeerCertificates[0].SerialNumber.String()) } return nil } // Interface guards var ( _ zapcore.ObjectMarshaler = (*LoggableHTTPRequest)(nil) _ zapcore.ObjectMarshaler = (*LoggableHTTPHeader)(nil) _ zapcore.ArrayMarshaler = (*LoggableStringArray)(nil) _ zapcore.ObjectMarshaler = (*LoggableTLSConnState)(nil) ) ================================================ FILE: modules/caddyhttp/matchers.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "encoding/json" "errors" "fmt" "net" "net/http" "net/textproto" "net/url" "path" "regexp" "runtime" "slices" "sort" "strconv" "strings" "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "golang.org/x/net/idna" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) type ( // MatchHost matches requests by the Host value (case-insensitive). // // When used in a top-level HTTP route, // [qualifying domain names](/docs/automatic-https#hostname-requirements) // may trigger [automatic HTTPS](/docs/automatic-https), which automatically // provisions and renews certificates for you. Before doing this, you // should ensure that DNS records for these domains are properly configured, // especially A/AAAA pointed at your server. // // Automatic HTTPS can be // [customized or disabled](/docs/modules/http#servers/automatic_https). // // Wildcards (`*`) may be used to represent exactly one label of the // hostname, in accordance with RFC 1034 (because host matchers are also // used for automatic HTTPS which influences TLS certificates). Thus, // a host of `*` matches hosts like `localhost` or `internal` but not // `example.com`. To catch all hosts, omit the host matcher entirely. // // The wildcard can be useful for matching all subdomains, for example: // `*.example.com` matches `foo.example.com` but not `foo.bar.example.com`. // // Duplicate entries will return an error. MatchHost []string // MatchPath case-insensitively matches requests by the URI's path. Path // matching is exact, not prefix-based, giving you more control and clarity // over matching. Wildcards (`*`) may be used: // // - At the end only, for a prefix match (`/prefix/*`) // - At the beginning only, for a suffix match (`*.suffix`) // - On both sides only, for a substring match (`*/contains/*`) // - In the middle, for a globular match (`/accounts/*/info`) // // Slashes are significant; i.e. `/foo*` matches `/foo`, `/foo/`, `/foo/bar`, // and `/foobar`; but `/foo/*` does not match `/foo` or `/foobar`. Valid // paths start with a slash `/`. // // Because there are, in general, multiple possible escaped forms of any // path, path matchers operate in unescaped space; that is, path matchers // should be written in their unescaped form to prevent ambiguities and // possible security issues, as all request paths will be normalized to // their unescaped forms before matcher evaluation. // // However, escape sequences in a match pattern are supported; they are // compared with the request's raw/escaped path for those bytes only. // In other words, a matcher of `/foo%2Fbar` will match a request path // of precisely `/foo%2Fbar`, but not `/foo/bar`. It follows that matching // the literal percent sign (%) in normalized space can be done using the // escaped form, `%25`. // // Even though wildcards (`*`) operate in the normalized space, the special // escaped wildcard (`%*`), which is not a valid escape sequence, may be // used in place of a span that should NOT be decoded; that is, `/bands/%*` // will match `/bands/AC%2fDC` whereas `/bands/*` will not. // // Even though path matching is done in normalized space, the special // wildcard `%*` may be used in place of a span that should NOT be decoded; // that is, `/bands/%*/` will match `/bands/AC%2fDC/` whereas `/bands/*/` // will not. // // This matcher is fast, so it does not support regular expressions or // capture groups. For slower but more powerful matching, use the // path_regexp matcher. (Note that due to the special treatment of // escape sequences in matcher patterns, they may perform slightly slower // in high-traffic environments.) MatchPath []string // MatchPathRE matches requests by a regular expression on the URI's path. // Path matching is performed in the unescaped (decoded) form of the path. // // Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}` // where `name` is the regular expression's name, and `capture_group` is either // the named or positional capture group from the expression itself. If no name // is given, then the placeholder omits the name: `{http.regexp.capture_group}` // (potentially leading to collisions). MatchPathRE struct{ MatchRegexp } // MatchMethod matches requests by the method. MatchMethod []string // MatchQuery matches requests by the URI's query string. It takes a JSON object // keyed by the query keys, with an array of string values to match for that key. // Query key matches are exact, but wildcards may be used for value matches. Both // keys and values may be placeholders. // // An example of the structure to match `?key=value&topic=api&query=something` is: // // ```json // { // "key": ["value"], // "topic": ["api"], // "query": ["*"] // } // ``` // // Invalid query strings, including those with bad escapings or illegal characters // like semicolons, will fail to parse and thus fail to match. // // **NOTE:** Notice that query string values are arrays, not singular values. This is // because repeated keys are valid in query strings, and each one may have a // different value. This matcher will match for a key if any one of its configured // values is assigned in the query string. Backend applications relying on query // strings MUST take into consideration that query string values are arrays and can // have multiple values. MatchQuery url.Values // MatchHeader matches requests by header fields. The key is the field // name and the array is the list of field values. It performs fast, // exact string comparisons of the field values. Fast prefix, suffix, // and substring matches can also be done by suffixing, prefixing, or // surrounding the value with the wildcard `*` character, respectively. // If a list is null, the header must not exist. If the list is empty, // the field must simply exist, regardless of its value. // // **NOTE:** Notice that header values are arrays, not singular values. This is // because repeated fields are valid in headers, and each one may have a // different value. This matcher will match for a field if any one of its configured // values matches in the header. Backend applications relying on headers MUST take // into consideration that header field values are arrays and can have multiple // values. MatchHeader http.Header // MatchHeaderRE matches requests by a regular expression on header fields. // // Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}` // where `name` is the regular expression's name, and `capture_group` is either // the named or positional capture group from the expression itself. If no name // is given, then the placeholder omits the name: `{http.regexp.capture_group}` // (potentially leading to collisions). MatchHeaderRE map[string]*MatchRegexp // MatchProtocol matches requests by protocol. Recognized values are // "http", "https", and "grpc" for broad protocol matches, or specific // HTTP versions can be specified like so: "http/1", "http/1.1", // "http/2", "http/3", or minimum versions: "http/2+", etc. MatchProtocol string // MatchTLS matches HTTP requests based on the underlying // TLS connection state. If this matcher is specified but // the request did not come over TLS, it will never match. // If this matcher is specified but is empty and the request // did come in over TLS, it will always match. MatchTLS struct { // Matches if the TLS handshake has completed. QUIC 0-RTT early // data may arrive before the handshake completes. Generally, it // is unsafe to replay these requests if they are not idempotent; // additionally, the remote IP of early data packets can more // easily be spoofed. It is conventional to respond with HTTP 425 // Too Early if the request cannot risk being processed in this // state. HandshakeComplete *bool `json:"handshake_complete,omitempty"` } // MatchNot matches requests by negating the results of its matcher // sets. A single "not" matcher takes one or more matcher sets. Each // matcher set is OR'ed; in other words, if any matcher set returns // true, the final result of the "not" matcher is false. Individual // matchers within a set work the same (i.e. different matchers in // the same set are AND'ed). // // NOTE: The generated docs which describe the structure of this // module are wrong because of how this type unmarshals JSON in a // custom way. The correct structure is: // // ```json // [ // {}, // {} // ] // ``` // // where each of the array elements is a matcher set, i.e. an // object keyed by matcher name. MatchNot struct { MatcherSetsRaw []caddy.ModuleMap `json:"-" caddy:"namespace=http.matchers"` MatcherSets []MatcherSet `json:"-"` } ) func init() { caddy.RegisterModule(MatchHost{}) caddy.RegisterModule(MatchPath{}) caddy.RegisterModule(MatchPathRE{}) caddy.RegisterModule(MatchMethod{}) caddy.RegisterModule(MatchQuery{}) caddy.RegisterModule(MatchHeader{}) caddy.RegisterModule(MatchHeaderRE{}) caddy.RegisterModule(new(MatchProtocol)) caddy.RegisterModule(MatchTLS{}) caddy.RegisterModule(MatchNot{}) } // CaddyModule returns the Caddy module information. func (MatchHost) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.host", New: func() caddy.Module { return new(MatchHost) }, } } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (m *MatchHost) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { // iterate to merge multiple matchers into one for d.Next() { *m = append(*m, d.RemainingArgs()...) if d.NextBlock(0) { return d.Err("malformed host matcher: blocks are not supported") } } return nil } // Provision sets up and validates m, including making it more efficient for large lists. func (m MatchHost) Provision(_ caddy.Context) error { // check for duplicates; they are nonsensical and reduce efficiency // (we could just remove them, but the user should know their config is erroneous) seen := make(map[string]int, len(m)) for i, host := range m { asciiHost, err := idna.ToASCII(host) if err != nil { return fmt.Errorf("converting hostname '%s' to ASCII: %v", host, err) } normalizedHost := strings.ToLower(asciiHost) if firstI, ok := seen[normalizedHost]; ok { return fmt.Errorf("host at index %d is repeated at index %d: %s", firstI, i, host) } // Normalize exact hosts for standardized comparison in large-list fastpath later on. // Keep wildcards/placeholders untouched. if m.fuzzy(asciiHost) { m[i] = asciiHost } else { m[i] = normalizedHost } seen[normalizedHost] = i } if m.large() { // sort the slice lexicographically, grouping "fuzzy" entries (wildcards and placeholders) // at the front of the list; this allows us to use binary search for exact matches, which // we have seen from experience is the most common kind of value in large lists; and any // other kinds of values (wildcards and placeholders) are grouped in front so the linear // search should find a match fairly quickly sort.Slice(m, func(i, j int) bool { iInexact, jInexact := m.fuzzy(m[i]), m.fuzzy(m[j]) if iInexact && !jInexact { return true } if !iInexact && jInexact { return false } return m[i] < m[j] }) } return nil } // Match returns true if r matches m. func (m MatchHost) Match(r *http.Request) bool { match, _ := m.MatchWithError(r) return match } // MatchWithError returns true if r matches m. func (m MatchHost) MatchWithError(r *http.Request) (bool, error) { reqHost, _, err := net.SplitHostPort(r.Host) if err != nil { // OK; probably didn't have a port reqHost = r.Host // make sure we strip the brackets from IPv6 addresses reqHost = strings.TrimPrefix(reqHost, "[") reqHost = strings.TrimSuffix(reqHost, "]") } if m.large() { reqHostLower := strings.ToLower(reqHost) // fast path: locate exact match using binary search (about 100-1000x faster for large lists) pos := sort.Search(len(m), func(i int) bool { if m.fuzzy(m[i]) { return false } return m[i] >= reqHostLower }) if pos < len(m) && m[pos] == reqHostLower { return true, nil } } repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) outer: for _, host := range m { // fast path: if matcher is large, we already know we don't have an exact // match, so we're only looking for fuzzy match now, which should be at the // front of the list; if we have reached a value that is not fuzzy, there // will be no match and we can short-circuit for efficiency if m.large() && !m.fuzzy(host) { break } host = repl.ReplaceAll(host, "") if strings.Contains(host, "*") { patternParts := strings.Split(host, ".") incomingParts := strings.Split(reqHost, ".") if len(patternParts) != len(incomingParts) { continue } for i := range patternParts { if patternParts[i] == "*" { continue } if !strings.EqualFold(patternParts[i], incomingParts[i]) { continue outer } } return true, nil } else if strings.EqualFold(reqHost, host) { return true, nil } } return false, nil } // CELLibrary produces options that expose this matcher for use in CEL // expression matchers. // // Example: // // expression host('localhost') func (MatchHost) CELLibrary(ctx caddy.Context) (cel.Library, error) { return CELMatcherImpl( "host", "host_match_request_list", []*cel.Type{cel.ListType(cel.StringType)}, func(data ref.Val) (RequestMatcherWithError, error) { refStringList := stringSliceType strList, err := data.ConvertToNative(refStringList) if err != nil { return nil, err } matcher := MatchHost(strList.([]string)) err = matcher.Provision(ctx) return matcher, err }, ) } // fuzzy returns true if the given hostname h is not a specific // hostname, e.g. has placeholders or wildcards. func (MatchHost) fuzzy(h string) bool { return strings.ContainsAny(h, "{*") } // large returns true if m is considered to be large. Optimizing // the matcher for smaller lists has diminishing returns. // See related benchmark function in test file to conduct experiments. func (m MatchHost) large() bool { return len(m) > 100 } // CaddyModule returns the Caddy module information. func (MatchPath) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.path", New: func() caddy.Module { return new(MatchPath) }, } } // Provision lower-cases the paths in m to ensure case-insensitive matching. func (m MatchPath) Provision(_ caddy.Context) error { for i := range m { if m[i] == "*" && i > 0 { // will always match, so just put it first m[0] = m[i] break } m[i] = strings.ToLower(m[i]) } return nil } // Match returns true if r matches m. func (m MatchPath) Match(r *http.Request) bool { match, _ := m.MatchWithError(r) return match } // MatchWithError returns true if r matches m. func (m MatchPath) MatchWithError(r *http.Request) (bool, error) { // Even though RFC 9110 says that path matching is case-sensitive // (https://www.rfc-editor.org/rfc/rfc9110.html#section-4.2.3), // we do case-insensitive matching to mitigate security issues // related to differences between operating systems, applications, // etc; if case-sensitive matching is needed, the regex matcher // can be used instead. reqPath := strings.ToLower(r.URL.Path) // See #2917; Windows ignores trailing dots and spaces // when accessing files (sigh), potentially causing a // security risk (cry) if PHP files end up being served // as static files, exposing the source code, instead of // being matched by *.php to be treated as PHP scripts. if runtime.GOOS == "windows" { // issue #5613 reqPath = strings.TrimRight(reqPath, ". ") } repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) for _, matchPattern := range m { matchPattern = repl.ReplaceAll(matchPattern, "") // special case: whole path is wildcard; this is unnecessary // as it matches all requests, which is the same as no matcher if matchPattern == "*" { return true, nil } // Clean the path, merge doubled slashes, etc. // This ensures maliciously crafted requests can't bypass // the path matcher. See #4407. Good security posture // requires that we should do all we can to reduce any // funny-looking paths into "normalized" forms such that // weird variants can't sneak by. // // How we clean the path depends on the kind of pattern: // we either merge slashes or we don't. If the pattern // has double slashes, we preserve them in the path. // // TODO: Despite the fact that the *vast* majority of path // matchers have only 1 pattern, a possible optimization is // to remember the cleaned form of the path for future // iterations; it's just that the way we clean depends on // the kind of pattern. mergeSlashes := !strings.Contains(matchPattern, "//") // if '%' appears in the match pattern, we interpret that to mean // the intent is to compare that part of the path in raw/escaped // space; i.e. "%40"=="%40", not "@", and "%2F"=="%2F", not "/" if strings.Contains(matchPattern, "%") { reqPathForPattern := CleanPath(r.URL.EscapedPath(), mergeSlashes) if m.matchPatternWithEscapeSequence(reqPathForPattern, matchPattern) { return true, nil } // doing prefix/suffix/substring matches doesn't make sense continue } reqPathForPattern := CleanPath(reqPath, mergeSlashes) // for substring, prefix, and suffix matching, only perform those // special, fast matches if they are the only wildcards in the pattern; // otherwise we assume a globular match if any * appears in the middle // special case: first and last characters are wildcard, // treat it as a fast substring match if strings.Count(matchPattern, "*") == 2 && strings.HasPrefix(matchPattern, "*") && strings.HasSuffix(matchPattern, "*") { if strings.Contains(reqPathForPattern, matchPattern[1:len(matchPattern)-1]) { return true, nil } continue } // only perform prefix/suffix match if it is the only wildcard... // I think that is more correct most of the time if strings.Count(matchPattern, "*") == 1 { // special case: first character is a wildcard, // treat it as a fast suffix match if strings.HasPrefix(matchPattern, "*") { if strings.HasSuffix(reqPathForPattern, matchPattern[1:]) { return true, nil } continue } // special case: last character is a wildcard, // treat it as a fast prefix match if strings.HasSuffix(matchPattern, "*") { if strings.HasPrefix(reqPathForPattern, matchPattern[:len(matchPattern)-1]) { return true, nil } continue } } // at last, use globular matching, which also is exact matching // if there are no glob/wildcard chars; we ignore the error here // because we can't handle it anyway matches, _ := path.Match(matchPattern, reqPathForPattern) if matches { return true, nil } } return false, nil } func (MatchPath) matchPatternWithEscapeSequence(escapedPath, matchPath string) bool { escapedPath = strings.ToLower(escapedPath) // We would just compare the pattern against r.URL.Path, // but the pattern contains %, indicating that we should // compare at least some part of the path in raw/escaped // space, not normalized space; so we build the string we // will compare against by adding the normalized parts // of the path, then switching to the escaped parts where // the pattern hints to us wherever % is present. var sb strings.Builder // iterate the pattern and escaped path in lock-step; // increment iPattern every time we consume a char from the pattern, // increment iPath every time we consume a char from the path; // iPattern and iPath are our cursors/iterator positions for each string var iPattern, iPath int for { if iPattern >= len(matchPath) || iPath >= len(escapedPath) { break } // get the next character from the request path pathCh := string(escapedPath[iPath]) var escapedPathCh string // normalize (decode) escape sequences if pathCh == "%" && len(escapedPath) >= iPath+3 { // hold onto this in case we find out the intent is to match in escaped space here; // we lowercase it even though technically the spec says: "For consistency, URI // producers and normalizers should use uppercase hexadecimal digits for all percent- // encodings" (RFC 3986 section 2.1) - we lowercased the matcher pattern earlier in // provisioning so we do the same here to gain case-insensitivity in equivalence; // besides, this string is never shown visibly escapedPathCh = strings.ToLower(escapedPath[iPath : iPath+3]) var err error pathCh, err = url.PathUnescape(escapedPathCh) if err != nil { // should be impossible unless EscapedPath() is giving us an invalid sequence! return false } iPath += 2 // escape sequence is 2 bytes longer than normal char } // now get the next character from the pattern normalize := true switch matchPath[iPattern] { case '%': // escape sequence // if not a wildcard ("%*"), compare literally; consume next two bytes of pattern if len(matchPath) >= iPattern+3 && matchPath[iPattern+1] != '*' { sb.WriteString(escapedPathCh) iPath++ iPattern += 2 break } // escaped wildcard sequence; consume next byte only ('*') iPattern++ normalize = false fallthrough case '*': // wildcard, so consume until next matching character remaining := escapedPath[iPath:] until := len(escapedPath) - iPath // go until end of string... if iPattern < len(matchPath)-1 { // ...unless the * is not at the end nextCh := matchPath[iPattern+1] until = strings.IndexByte(remaining, nextCh) if until == -1 { // terminating char of wildcard span not found, so definitely no match return false } } if until == 0 { // empty span; nothing to add on this iteration break } next := remaining[:until] if normalize { var err error next, err = url.PathUnescape(next) if err != nil { return false // should be impossible anyway } } sb.WriteString(next) iPath += until default: sb.WriteString(pathCh) iPath++ } iPattern++ } // we can now treat rawpath globs (%*) as regular globs (*) matchPath = strings.ReplaceAll(matchPath, "%*", "*") // ignore error here because we can't handle it anyway matches, _ := path.Match(matchPath, strings.ToLower(sb.String())) return matches } // CELLibrary produces options that expose this matcher for use in CEL // expression matchers. // // Example: // // expression path('*substring*', '*suffix') func (MatchPath) CELLibrary(ctx caddy.Context) (cel.Library, error) { return CELMatcherImpl( // name of the macro, this is the function name that users see when writing expressions. "path", // name of the function that the macro will be rewritten to call. "path_match_request_list", // internal data type of the MatchPath value. []*cel.Type{cel.ListType(cel.StringType)}, // function to convert a constant list of strings to a MatchPath instance. func(data ref.Val) (RequestMatcherWithError, error) { refStringList := stringSliceType strList, err := data.ConvertToNative(refStringList) if err != nil { return nil, err } matcher := MatchPath(strList.([]string)) err = matcher.Provision(ctx) return matcher, err }, ) } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (m *MatchPath) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { // iterate to merge multiple matchers into one for d.Next() { *m = append(*m, d.RemainingArgs()...) if d.NextBlock(0) { return d.Err("malformed path matcher: blocks are not supported") } } return nil } // CaddyModule returns the Caddy module information. func (MatchPathRE) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.path_regexp", New: func() caddy.Module { return new(MatchPathRE) }, } } // Match returns true if r matches m. func (m MatchPathRE) Match(r *http.Request) bool { match, _ := m.MatchWithError(r) return match } // MatchWithError returns true if r matches m. func (m MatchPathRE) MatchWithError(r *http.Request) (bool, error) { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) // Clean the path, merges doubled slashes, etc. // This ensures maliciously crafted requests can't bypass // the path matcher. See #4407 cleanedPath := cleanPath(r.URL.Path) return m.MatchRegexp.Match(cleanedPath, repl), nil } // CELLibrary produces options that expose this matcher for use in CEL // expression matchers. // // Example: // // expression path_regexp('^/bar') func (MatchPathRE) CELLibrary(ctx caddy.Context) (cel.Library, error) { unnamedPattern, err := CELMatcherImpl( "path_regexp", "path_regexp_request_string", []*cel.Type{cel.StringType}, func(data ref.Val) (RequestMatcherWithError, error) { pattern := data.(types.String) matcher := MatchPathRE{MatchRegexp{ Name: ctx.Value(MatcherNameCtxKey).(string), Pattern: string(pattern), }} err := matcher.Provision(ctx) return matcher, err }, ) if err != nil { return nil, err } namedPattern, err := CELMatcherImpl( "path_regexp", "path_regexp_request_string_string", []*cel.Type{cel.StringType, cel.StringType}, func(data ref.Val) (RequestMatcherWithError, error) { refStringList := stringSliceType params, err := data.ConvertToNative(refStringList) if err != nil { return nil, err } strParams := params.([]string) name := strParams[0] if name == "" { name = ctx.Value(MatcherNameCtxKey).(string) } matcher := MatchPathRE{MatchRegexp{ Name: name, Pattern: strParams[1], }} err = matcher.Provision(ctx) return matcher, err }, ) if err != nil { return nil, err } envOpts := append(unnamedPattern.CompileOptions(), namedPattern.CompileOptions()...) prgOpts := append(unnamedPattern.ProgramOptions(), namedPattern.ProgramOptions()...) return NewMatcherCELLibrary(envOpts, prgOpts), nil } // CaddyModule returns the Caddy module information. func (MatchMethod) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.method", New: func() caddy.Module { return new(MatchMethod) }, } } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (m *MatchMethod) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { // iterate to merge multiple matchers into one for d.Next() { *m = append(*m, d.RemainingArgs()...) if d.NextBlock(0) { return d.Err("malformed method matcher: blocks are not supported") } } return nil } // Match returns true if r matches m. func (m MatchMethod) Match(r *http.Request) bool { match, _ := m.MatchWithError(r) return match } // MatchWithError returns true if r matches m. func (m MatchMethod) MatchWithError(r *http.Request) (bool, error) { return slices.Contains(m, r.Method), nil } // CELLibrary produces options that expose this matcher for use in CEL // expression matchers. // // Example: // // expression method('PUT', 'POST') func (MatchMethod) CELLibrary(_ caddy.Context) (cel.Library, error) { return CELMatcherImpl( "method", "method_request_list", []*cel.Type{cel.ListType(cel.StringType)}, func(data ref.Val) (RequestMatcherWithError, error) { refStringList := stringSliceType strList, err := data.ConvertToNative(refStringList) if err != nil { return nil, err } return MatchMethod(strList.([]string)), nil }, ) } // CaddyModule returns the Caddy module information. func (MatchQuery) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.query", New: func() caddy.Module { return new(MatchQuery) }, } } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (m *MatchQuery) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { if *m == nil { *m = make(map[string][]string) } // iterate to merge multiple matchers into one for d.Next() { for _, query := range d.RemainingArgs() { if query == "" { continue } before, after, found := strings.Cut(query, "=") if !found { return d.Errf("malformed query matcher token: %s; must be in param=val format", d.Val()) } url.Values(*m).Add(before, after) } if d.NextBlock(0) { return d.Err("malformed query matcher: blocks are not supported") } } return nil } // Match returns true if r matches m. An empty m matches an empty query string. func (m MatchQuery) Match(r *http.Request) bool { match, _ := m.MatchWithError(r) return match } // MatchWithError returns true if r matches m. // An empty m matches an empty query string. func (m MatchQuery) MatchWithError(r *http.Request) (bool, error) { // If no query keys are configured, this only // matches an empty query string. if len(m) == 0 { return len(r.URL.Query()) == 0, nil } repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) // parse query string just once, for efficiency parsed, err := url.ParseQuery(r.URL.RawQuery) if err != nil { // Illegal query string. Likely bad escape sequence or unescaped literals. // Note that semicolons in query string have a controversial history. Summaries: // - https://github.com/golang/go/issues/50034 // - https://github.com/golang/go/issues/25192 // Despite the URL WHATWG spec mandating the use of & separators for query strings, // every URL parser implementation is different, and Filippo Valsorda rightly wrote: // "Relying on parser alignment for security is doomed." Overall conclusion is that // splitting on & and rejecting ; in key=value pairs is safer than accepting raw ;. // We regard the Go team's decision as sound and thus reject malformed query strings. return false, nil } // Count the amount of matched keys, to ensure we AND // between all configured query keys; all keys must // match at least one value. matchedKeys := 0 for param, vals := range m { param = repl.ReplaceAll(param, "") paramVal, found := parsed[param] if !found { return false, nil } for _, v := range vals { v = repl.ReplaceAll(v, "") if slices.Contains(paramVal, v) || v == "*" { matchedKeys++ break } } } return matchedKeys == len(m), nil } // CELLibrary produces options that expose this matcher for use in CEL // expression matchers. // // Example: // // expression query({'sort': 'asc'}) || query({'foo': ['*bar*', 'baz']}) func (MatchQuery) CELLibrary(_ caddy.Context) (cel.Library, error) { return CELMatcherImpl( "query", "query_matcher_request_map", []*cel.Type{CELTypeJSON}, func(data ref.Val) (RequestMatcherWithError, error) { mapStrListStr, err := CELValueToMapStrList(data) if err != nil { return nil, err } return MatchQuery(url.Values(mapStrListStr)), nil }, ) } // CaddyModule returns the Caddy module information. func (MatchHeader) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.header", New: func() caddy.Module { return new(MatchHeader) }, } } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (m *MatchHeader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { if *m == nil { *m = make(map[string][]string) } // iterate to merge multiple matchers into one for d.Next() { var field, val string if !d.Args(&field) { return d.Errf("malformed header matcher: expected field") } if strings.HasPrefix(field, "!") { if len(field) == 1 { return d.Errf("malformed header matcher: must have field name following ! character") } field = field[1:] headers := *m headers[field] = nil m = &headers if d.NextArg() { return d.Errf("malformed header matcher: null matching headers cannot have a field value") } } else { if !d.NextArg() { return d.Errf("malformed header matcher: expected both field and value") } // If multiple header matchers with the same header field are defined, // we want to add the existing to the list of headers (will be OR'ed) val = d.Val() http.Header(*m).Add(field, val) } if d.NextBlock(0) { return d.Err("malformed header matcher: blocks are not supported") } } return nil } // Match returns true if r matches m. func (m MatchHeader) Match(r *http.Request) bool { match, _ := m.MatchWithError(r) return match } // MatchWithError returns true if r matches m. func (m MatchHeader) MatchWithError(r *http.Request) (bool, error) { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) return matchHeaders(r.Header, http.Header(m), r.Host, r.TransferEncoding, repl), nil } // CELLibrary produces options that expose this matcher for use in CEL // expression matchers. // // Example: // // expression header({'content-type': 'image/png'}) // expression header({'foo': ['bar', 'baz']}) // match bar or baz func (MatchHeader) CELLibrary(_ caddy.Context) (cel.Library, error) { return CELMatcherImpl( "header", "header_matcher_request_map", []*cel.Type{CELTypeJSON}, func(data ref.Val) (RequestMatcherWithError, error) { mapStrListStr, err := CELValueToMapStrList(data) if err != nil { return nil, err } return MatchHeader(http.Header(mapStrListStr)), nil }, ) } // getHeaderFieldVals returns the field values for the given fieldName from input. // The host parameter should be obtained from the http.Request.Host field, and the // transferEncoding from http.Request.TransferEncoding, since net/http removes them // from the header map. func getHeaderFieldVals(input http.Header, fieldName, host string, transferEncoding []string) []string { fieldName = textproto.CanonicalMIMEHeaderKey(fieldName) if fieldName == "Host" && host != "" { return []string{host} } if fieldName == "Transfer-Encoding" && input[fieldName] == nil { return transferEncoding } return input[fieldName] } // matchHeaders returns true if input matches the criteria in against without regex. // The host parameter should be obtained from the http.Request.Host field since // net/http removes it from the header map. func matchHeaders(input, against http.Header, host string, transferEncoding []string, repl *caddy.Replacer) bool { for field, allowedFieldVals := range against { actualFieldVals := getHeaderFieldVals(input, field, host, transferEncoding) if allowedFieldVals != nil && len(allowedFieldVals) == 0 && actualFieldVals != nil { // a non-nil but empty list of allowed values means // match if the header field exists at all continue } if allowedFieldVals == nil && actualFieldVals == nil { // a nil list means match if the header does not exist at all continue } var match bool fieldVals: for _, actualFieldVal := range actualFieldVals { for _, allowedFieldVal := range allowedFieldVals { if repl != nil { allowedFieldVal = repl.ReplaceAll(allowedFieldVal, "") } switch { case allowedFieldVal == "*": match = true case strings.HasPrefix(allowedFieldVal, "*") && strings.HasSuffix(allowedFieldVal, "*"): match = strings.Contains(actualFieldVal, allowedFieldVal[1:len(allowedFieldVal)-1]) case strings.HasPrefix(allowedFieldVal, "*"): match = strings.HasSuffix(actualFieldVal, allowedFieldVal[1:]) case strings.HasSuffix(allowedFieldVal, "*"): match = strings.HasPrefix(actualFieldVal, allowedFieldVal[:len(allowedFieldVal)-1]) default: match = actualFieldVal == allowedFieldVal } if match { break fieldVals } } } if !match { return false } } return true } // CaddyModule returns the Caddy module information. func (MatchHeaderRE) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.header_regexp", New: func() caddy.Module { return new(MatchHeaderRE) }, } } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (m *MatchHeaderRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { if *m == nil { *m = make(map[string]*MatchRegexp) } // iterate to merge multiple matchers into one for d.Next() { var first, second, third string if !d.Args(&first, &second) { return d.ArgErr() } var name, field, val string if d.Args(&third) { name = first field = second val = third } else { field = first val = second } // Default to the named matcher's name, if no regexp name is provided if name == "" { name = d.GetContextString(caddyfile.MatcherNameCtxKey) } // If there's already a pattern for this field // then we would end up overwriting the old one if (*m)[field] != nil { return d.Errf("header_regexp matcher can only be used once per named matcher, per header field: %s", field) } (*m)[field] = &MatchRegexp{Pattern: val, Name: name} if d.NextBlock(0) { return d.Err("malformed header_regexp matcher: blocks are not supported") } } return nil } // Match returns true if r matches m. func (m MatchHeaderRE) Match(r *http.Request) bool { match, _ := m.MatchWithError(r) return match } // MatchWithError returns true if r matches m. func (m MatchHeaderRE) MatchWithError(r *http.Request) (bool, error) { for field, rm := range m { actualFieldVals := getHeaderFieldVals(r.Header, field, r.Host, r.TransferEncoding) match := false fieldVal: for _, actualFieldVal := range actualFieldVals { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) if rm.Match(actualFieldVal, repl) { match = true break fieldVal } } if !match { return false, nil } } return true, nil } // Provision compiles m's regular expressions. func (m MatchHeaderRE) Provision(ctx caddy.Context) error { for _, rm := range m { err := rm.Provision(ctx) if err != nil { return err } } return nil } // Validate validates m's regular expressions. func (m MatchHeaderRE) Validate() error { for _, rm := range m { err := rm.Validate() if err != nil { return err } } return nil } // CELLibrary produces options that expose this matcher for use in CEL // expression matchers. // // Example: // // expression header_regexp('foo', 'Field', 'fo+') func (MatchHeaderRE) CELLibrary(ctx caddy.Context) (cel.Library, error) { unnamedPattern, err := CELMatcherImpl( "header_regexp", "header_regexp_request_string_string", []*cel.Type{cel.StringType, cel.StringType}, func(data ref.Val) (RequestMatcherWithError, error) { refStringList := stringSliceType params, err := data.ConvertToNative(refStringList) if err != nil { return nil, err } strParams := params.([]string) matcher := MatchHeaderRE{} matcher[strParams[0]] = &MatchRegexp{ Pattern: strParams[1], Name: ctx.Value(MatcherNameCtxKey).(string), } err = matcher.Provision(ctx) return matcher, err }, ) if err != nil { return nil, err } namedPattern, err := CELMatcherImpl( "header_regexp", "header_regexp_request_string_string_string", []*cel.Type{cel.StringType, cel.StringType, cel.StringType}, func(data ref.Val) (RequestMatcherWithError, error) { refStringList := stringSliceType params, err := data.ConvertToNative(refStringList) if err != nil { return nil, err } strParams := params.([]string) name := strParams[0] if name == "" { name = ctx.Value(MatcherNameCtxKey).(string) } matcher := MatchHeaderRE{} matcher[strParams[1]] = &MatchRegexp{ Pattern: strParams[2], Name: name, } err = matcher.Provision(ctx) return matcher, err }, ) if err != nil { return nil, err } envOpts := append(unnamedPattern.CompileOptions(), namedPattern.CompileOptions()...) prgOpts := append(unnamedPattern.ProgramOptions(), namedPattern.ProgramOptions()...) return NewMatcherCELLibrary(envOpts, prgOpts), nil } // CaddyModule returns the Caddy module information. func (MatchProtocol) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.protocol", New: func() caddy.Module { return new(MatchProtocol) }, } } // Match returns true if r matches m. func (m MatchProtocol) Match(r *http.Request) bool { match, _ := m.MatchWithError(r) return match } // MatchWithError returns true if r matches m. func (m MatchProtocol) MatchWithError(r *http.Request) (bool, error) { switch string(m) { case "grpc": return strings.HasPrefix(r.Header.Get("content-type"), "application/grpc"), nil case "https": return r.TLS != nil, nil case "http": return r.TLS == nil, nil case "http/1.0": return r.ProtoMajor == 1 && r.ProtoMinor == 0, nil case "http/1.0+": return r.ProtoAtLeast(1, 0), nil case "http/1.1": return r.ProtoMajor == 1 && r.ProtoMinor == 1, nil case "http/1.1+": return r.ProtoAtLeast(1, 1), nil case "http/2": return r.ProtoMajor == 2, nil case "http/2+": return r.ProtoAtLeast(2, 0), nil case "http/3": return r.ProtoMajor == 3, nil case "http/3+": return r.ProtoAtLeast(3, 0), nil } return false, nil } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (m *MatchProtocol) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { // iterate to merge multiple matchers into one for d.Next() { var proto string if !d.Args(&proto) { return d.Err("expected exactly one protocol") } *m = MatchProtocol(proto) } return nil } // CELLibrary produces options that expose this matcher for use in CEL // expression matchers. // // Example: // // expression protocol('https') func (MatchProtocol) CELLibrary(_ caddy.Context) (cel.Library, error) { return CELMatcherImpl( "protocol", "protocol_request_string", []*cel.Type{cel.StringType}, func(data ref.Val) (RequestMatcherWithError, error) { protocolStr, ok := data.(types.String) if !ok { return nil, errors.New("protocol argument was not a string") } return MatchProtocol(strings.ToLower(string(protocolStr))), nil }, ) } // CaddyModule returns the Caddy module information. func (MatchTLS) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.tls", New: func() caddy.Module { return new(MatchTLS) }, } } // Match returns true if r matches m. func (m MatchTLS) Match(r *http.Request) bool { match, _ := m.MatchWithError(r) return match } // MatchWithError returns true if r matches m. func (m MatchTLS) MatchWithError(r *http.Request) (bool, error) { if r.TLS == nil { return false, nil } if m.HandshakeComplete != nil { if (!*m.HandshakeComplete && r.TLS.HandshakeComplete) || (*m.HandshakeComplete && !r.TLS.HandshakeComplete) { return false, nil } } return true, nil } // UnmarshalCaddyfile parses Caddyfile tokens for this matcher. Syntax: // // ... tls [early_data] // // EXPERIMENTAL SYNTAX: Subject to change. func (m *MatchTLS) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { // iterate to merge multiple matchers into one for d.Next() { if d.NextArg() { switch d.Val() { case "early_data": var false bool m.HandshakeComplete = &false default: return d.Errf("unrecognized option '%s'", d.Val()) } } if d.NextArg() { return d.ArgErr() } if d.NextBlock(0) { return d.Err("malformed tls matcher: blocks are not supported yet") } } return nil } // CaddyModule returns the Caddy module information. func (MatchNot) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.not", New: func() caddy.Module { return new(MatchNot) }, } } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (m *MatchNot) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { // iterate to merge multiple matchers into one for d.Next() { matcherSet, err := ParseCaddyfileNestedMatcherSet(d) if err != nil { return err } m.MatcherSetsRaw = append(m.MatcherSetsRaw, matcherSet) } return nil } // UnmarshalJSON satisfies json.Unmarshaler. It puts the JSON // bytes directly into m's MatcherSetsRaw field. func (m *MatchNot) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &m.MatcherSetsRaw) } // MarshalJSON satisfies json.Marshaler by marshaling // m's raw matcher sets. func (m MatchNot) MarshalJSON() ([]byte, error) { return json.Marshal(m.MatcherSetsRaw) } // Provision loads the matcher modules to be negated. func (m *MatchNot) Provision(ctx caddy.Context) error { matcherSets, err := ctx.LoadModule(m, "MatcherSetsRaw") if err != nil { return fmt.Errorf("loading matcher sets: %v", err) } for _, modMap := range matcherSets.([]map[string]any) { var ms MatcherSet for _, modIface := range modMap { if mod, ok := modIface.(RequestMatcherWithError); ok { ms = append(ms, mod) continue } if mod, ok := modIface.(RequestMatcher); ok { ms = append(ms, mod) continue } return fmt.Errorf("module is not a request matcher: %T", modIface) } m.MatcherSets = append(m.MatcherSets, ms) } return nil } // Match returns true if r matches m. Since this matcher negates // the embedded matchers, false is returned if any of its matcher // sets return true. func (m MatchNot) Match(r *http.Request) bool { match, _ := m.MatchWithError(r) return match } // MatchWithError returns true if r matches m. Since this matcher // negates the embedded matchers, false is returned if any of its // matcher sets return true. func (m MatchNot) MatchWithError(r *http.Request) (bool, error) { for _, ms := range m.MatcherSets { matches, err := ms.MatchWithError(r) if err != nil { return false, err } if matches { return false, nil } } return true, nil } // MatchRegexp is an embedable type for matching // using regular expressions. It adds placeholders // to the request's replacer. type MatchRegexp struct { // A unique name for this regular expression. Optional, // but useful to prevent overwriting captures from other // regexp matchers. Name string `json:"name,omitempty"` // The regular expression to evaluate, in RE2 syntax, // which is the same general syntax used by Go, Perl, // and Python. For details, see // [Go's regexp package](https://golang.org/pkg/regexp/). // Captures are accessible via placeholders. Unnamed // capture groups are exposed as their numeric, 1-based // index, while named capture groups are available by // the capture group name. Pattern string `json:"pattern"` compiled *regexp.Regexp } // Provision compiles the regular expression. func (mre *MatchRegexp) Provision(caddy.Context) error { re, err := regexp.Compile(mre.Pattern) if err != nil { return fmt.Errorf("compiling matcher regexp %s: %v", mre.Pattern, err) } mre.compiled = re return nil } // Validate ensures mre is set up correctly. func (mre *MatchRegexp) Validate() error { if mre.Name != "" && !wordRE.MatchString(mre.Name) { return fmt.Errorf("invalid regexp name (must contain only word characters): %s", mre.Name) } return nil } // Match returns true if input matches the compiled regular // expression in mre. It sets values on the replacer repl // associated with capture groups, using the given scope // (namespace). func (mre *MatchRegexp) Match(input string, repl *caddy.Replacer) bool { matches := mre.compiled.FindStringSubmatch(input) if matches == nil { return false } // save all capture groups, first by index for i, match := range matches { keySuffix := "." + strconv.Itoa(i) if mre.Name != "" { repl.Set(regexpPlaceholderPrefix+"."+mre.Name+keySuffix, match) } repl.Set(regexpPlaceholderPrefix+keySuffix, match) } // then by name for i, name := range mre.compiled.SubexpNames() { // skip the first element (the full match), and empty names if i == 0 || name == "" { continue } keySuffix := "." + name if mre.Name != "" { repl.Set(regexpPlaceholderPrefix+"."+mre.Name+keySuffix, matches[i]) } repl.Set(regexpPlaceholderPrefix+keySuffix, matches[i]) } return true } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { // iterate to merge multiple matchers into one for d.Next() { // If this is the second iteration of the loop // then there's more than one path_regexp matcher // and we would end up overwriting the old one if mre.Pattern != "" { return d.Err("regular expression can only be used once per named matcher") } args := d.RemainingArgs() switch len(args) { case 1: mre.Pattern = args[0] case 2: mre.Name = args[0] mre.Pattern = args[1] default: return d.ArgErr() } // Default to the named matcher's name, if no regexp name is provided if mre.Name == "" { mre.Name = d.GetContextString(caddyfile.MatcherNameCtxKey) } if d.NextBlock(0) { return d.Err("malformed path_regexp matcher: blocks are not supported") } } return nil } // ParseCaddyfileNestedMatcherSet parses the Caddyfile tokens for a nested // matcher set, and returns its raw module map value. func ParseCaddyfileNestedMatcherSet(d *caddyfile.Dispenser) (caddy.ModuleMap, error) { matcherMap := make(map[string]any) // in case there are multiple instances of the same matcher, concatenate // their tokens (we expect that UnmarshalCaddyfile should be able to // handle more than one segment); otherwise, we'd overwrite other // instances of the matcher in this set tokensByMatcherName := make(map[string][]caddyfile.Token) for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); { matcherName := d.Val() tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...) } for matcherName, tokens := range tokensByMatcherName { mod, err := caddy.GetModule("http.matchers." + matcherName) if err != nil { return nil, d.Errf("getting matcher module '%s': %v", matcherName, err) } unm, ok := mod.New().(caddyfile.Unmarshaler) if !ok { return nil, d.Errf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName) } err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens)) if err != nil { return nil, err } if rm, ok := unm.(RequestMatcherWithError); ok { matcherMap[matcherName] = rm continue } if rm, ok := unm.(RequestMatcher); ok { matcherMap[matcherName] = rm continue } return nil, fmt.Errorf("matcher module '%s' is not a request matcher", matcherName) } // we should now have a functional matcher, but we also // need to be able to marshal as JSON, otherwise config // adaptation will be missing the matchers! matcherSet := make(caddy.ModuleMap) for name, matcher := range matcherMap { jsonBytes, err := json.Marshal(matcher) if err != nil { return nil, fmt.Errorf("marshaling %T matcher: %v", matcher, err) } matcherSet[name] = jsonBytes } return matcherSet, nil } var wordRE = regexp.MustCompile(`\w+`) const regexpPlaceholderPrefix = "http.regexp" // MatcherErrorVarKey is the key used for the variable that // holds an optional error emitted from a request matcher, // to short-circuit the handler chain, since matchers cannot // return errors via the RequestMatcher interface. // // Deprecated: Matchers should implement RequestMatcherWithError // which can return an error directly, instead of smuggling it // through the vars map. const MatcherErrorVarKey = "matchers.error" // Interface guards var ( _ RequestMatcherWithError = (*MatchHost)(nil) _ caddy.Provisioner = (*MatchHost)(nil) _ RequestMatcherWithError = (*MatchPath)(nil) _ RequestMatcherWithError = (*MatchPathRE)(nil) _ caddy.Provisioner = (*MatchPathRE)(nil) _ RequestMatcherWithError = (*MatchMethod)(nil) _ RequestMatcherWithError = (*MatchQuery)(nil) _ RequestMatcherWithError = (*MatchHeader)(nil) _ RequestMatcherWithError = (*MatchHeaderRE)(nil) _ caddy.Provisioner = (*MatchHeaderRE)(nil) _ RequestMatcherWithError = (*MatchProtocol)(nil) _ RequestMatcherWithError = (*MatchNot)(nil) _ caddy.Provisioner = (*MatchNot)(nil) _ caddy.Provisioner = (*MatchRegexp)(nil) _ caddyfile.Unmarshaler = (*MatchHost)(nil) _ caddyfile.Unmarshaler = (*MatchPath)(nil) _ caddyfile.Unmarshaler = (*MatchPathRE)(nil) _ caddyfile.Unmarshaler = (*MatchMethod)(nil) _ caddyfile.Unmarshaler = (*MatchQuery)(nil) _ caddyfile.Unmarshaler = (*MatchHeader)(nil) _ caddyfile.Unmarshaler = (*MatchHeaderRE)(nil) _ caddyfile.Unmarshaler = (*MatchProtocol)(nil) _ caddyfile.Unmarshaler = (*VarsMatcher)(nil) _ caddyfile.Unmarshaler = (*MatchVarsRE)(nil) _ CELLibraryProducer = (*MatchHost)(nil) _ CELLibraryProducer = (*MatchPath)(nil) _ CELLibraryProducer = (*MatchPathRE)(nil) _ CELLibraryProducer = (*MatchMethod)(nil) _ CELLibraryProducer = (*MatchQuery)(nil) _ CELLibraryProducer = (*MatchHeader)(nil) _ CELLibraryProducer = (*MatchHeaderRE)(nil) _ CELLibraryProducer = (*MatchProtocol)(nil) _ CELLibraryProducer = (*VarsMatcher)(nil) _ CELLibraryProducer = (*MatchVarsRE)(nil) _ json.Marshaler = (*MatchNot)(nil) _ json.Unmarshaler = (*MatchNot)(nil) ) ================================================ FILE: modules/caddyhttp/matchers_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "context" "fmt" "net/http" "net/http/httptest" "net/url" "os" "runtime" "testing" "github.com/caddyserver/caddy/v2" ) func TestHostMatcher(t *testing.T) { err := os.Setenv("GO_BENCHMARK_DOMAIN", "localhost") if err != nil { t.Errorf("error while setting up environment: %v", err) } for i, tc := range []struct { match MatchHost input string expect bool }{ { match: MatchHost{}, input: "example.com", expect: false, }, { match: MatchHost{"example.com"}, input: "example.com", expect: true, }, { match: MatchHost{"EXAMPLE.COM"}, input: "example.com", expect: true, }, { match: MatchHost{"example.com"}, input: "EXAMPLE.COM", expect: true, }, { match: MatchHost{"example.com"}, input: "foo.example.com", expect: false, }, { match: MatchHost{"example.com"}, input: "EXAMPLE.COM", expect: true, }, { match: MatchHost{"foo.example.com"}, input: "foo.example.com", expect: true, }, { match: MatchHost{"foo.example.com"}, input: "bar.example.com", expect: false, }, { match: MatchHost{"éxàmplê.com"}, input: "xn--xmpl-0na6cm.com", expect: true, }, { match: MatchHost{"*.example.com"}, input: "example.com", expect: false, }, { match: MatchHost{"*.example.com"}, input: "SUB.EXAMPLE.COM", expect: true, }, { match: MatchHost{"*.example.com"}, input: "foo.example.com", expect: true, }, { match: MatchHost{"*.example.com"}, input: "foo.bar.example.com", expect: false, }, { match: MatchHost{"*.example.com", "example.net"}, input: "example.net", expect: true, }, { match: MatchHost{"example.net", "*.example.com"}, input: "foo.example.com", expect: true, }, { match: MatchHost{"*.example.net", "*.*.example.com"}, input: "foo.bar.example.com", expect: true, }, { match: MatchHost{"*.example.net", "sub.*.example.com"}, input: "sub.foo.example.com", expect: true, }, { match: MatchHost{"*.example.net", "sub.*.example.com"}, input: "sub.foo.example.net", expect: false, }, { match: MatchHost{"www.*.*"}, input: "www.example.com", expect: true, }, { match: MatchHost{"example.com"}, input: "example.com:5555", expect: true, }, { match: MatchHost{"{env.GO_BENCHMARK_DOMAIN}"}, input: "localhost", expect: true, }, { match: MatchHost{"{env.GO_NONEXISTENT}"}, input: "localhost", expect: false, }, } { req := &http.Request{Host: tc.input} repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) if err := tc.match.Provision(caddy.Context{}); err != nil { t.Errorf("Test %d %v: provisioning failed: %v", i, tc.match, err) } actual, err := tc.match.MatchWithError(req) if err != nil { t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err) } if actual != tc.expect { t.Errorf("Test %d %v: Expected %t, got %t for '%s'", i, tc.match, tc.expect, actual, tc.input) continue } } } func TestPathMatcher(t *testing.T) { for i, tc := range []struct { match MatchPath // not URI-encoded because not parsing from a URI input string // should be valid URI encoding (escaped) since it will become part of a request expect bool provisionErr bool }{ { match: MatchPath{}, input: "/", expect: false, }, { match: MatchPath{"/"}, input: "/", expect: true, }, { match: MatchPath{"/foo/bar"}, input: "/", expect: false, }, { match: MatchPath{"/foo/bar"}, input: "/foo/bar", expect: true, }, { match: MatchPath{"/foo/bar/"}, input: "/foo/bar", expect: false, }, { match: MatchPath{"/foo/bar/"}, input: "/foo/bar/", expect: true, }, { match: MatchPath{"/foo/bar/", "/other"}, input: "/other/", expect: false, }, { match: MatchPath{"/foo/bar/", "/other"}, input: "/other", expect: true, }, { match: MatchPath{"*.ext"}, input: "/foo/bar.ext", expect: true, }, { match: MatchPath{"*.php"}, input: "/index.PHP", expect: true, }, { match: MatchPath{"*.ext"}, input: "/foo/bar.ext", expect: true, }, { match: MatchPath{"/foo/*/baz"}, input: "/foo/bar/baz", expect: true, }, { match: MatchPath{"/foo/*/baz/bam"}, input: "/foo/bar/bam", expect: false, }, { match: MatchPath{"*substring*"}, input: "/foo/substring/bar.txt", expect: true, }, { match: MatchPath{"/foo"}, input: "/foo/bar", expect: false, }, { match: MatchPath{"/foo"}, input: "/foo/bar", expect: false, }, { match: MatchPath{"/foo"}, input: "/FOO", expect: true, }, { match: MatchPath{"/foo*"}, input: "/FOOOO", expect: true, }, { match: MatchPath{"/foo/bar.txt"}, input: "/foo/BAR.txt", expect: true, }, { match: MatchPath{"/foo*"}, input: "//foo/bar", expect: true, }, { match: MatchPath{"/foo"}, input: "//foo", expect: true, }, { match: MatchPath{"//foo"}, input: "/foo", expect: false, }, { match: MatchPath{"//foo"}, input: "//foo", expect: true, }, { match: MatchPath{"/foo//*"}, input: "/foo//bar", expect: true, }, { match: MatchPath{"/foo//*"}, input: "/foo/%2Fbar", expect: true, }, { match: MatchPath{"/foo/%2F*"}, input: "/foo/%2Fbar", expect: true, }, { match: MatchPath{"/foo/%2F*"}, input: "/foo//bar", expect: false, }, { match: MatchPath{"/foo//bar"}, input: "/foo//bar", expect: true, }, { match: MatchPath{"/foo/*//bar"}, input: "/foo///bar", expect: true, }, { match: MatchPath{"/foo/%*//bar"}, input: "/foo///bar", expect: true, }, { match: MatchPath{"/foo/%*//bar"}, input: "/foo//%2Fbar", expect: true, }, { match: MatchPath{"/foo*"}, input: "/%2F/foo", expect: true, }, { match: MatchPath{"*"}, input: "/", expect: true, }, { match: MatchPath{"*"}, input: "/foo/bar", expect: true, }, { match: MatchPath{"**"}, input: "/", expect: true, }, { match: MatchPath{"**"}, input: "/foo/bar", expect: true, }, // notice these next three test cases are the same normalized path but are written differently { match: MatchPath{"/%25@.txt"}, input: "/%25@.txt", expect: true, }, { match: MatchPath{"/%25@.txt"}, input: "/%25%40.txt", expect: true, }, { match: MatchPath{"/%25%40.txt"}, input: "/%25%40.txt", expect: true, }, { match: MatchPath{"/bands/*/*"}, input: "/bands/AC%2FDC/T.N.T", expect: false, // because * operates in normalized space }, { match: MatchPath{"/bands/%*/%*"}, input: "/bands/AC%2FDC/T.N.T", expect: true, }, { match: MatchPath{"/bands/%*/%*"}, input: "/bands/AC/DC/T.N.T", expect: false, }, { match: MatchPath{"/bands/%*"}, input: "/bands/AC/DC", expect: false, // not a suffix match }, { match: MatchPath{"/bands/%*"}, input: "/bands/AC%2FDC", expect: true, }, { match: MatchPath{"/foo%2fbar/baz"}, input: "/foo%2Fbar/baz", expect: true, }, { match: MatchPath{"/foo%2fbar/baz"}, input: "/foo/bar/baz", expect: false, }, { match: MatchPath{"/foo/bar/baz"}, input: "/foo%2fbar/baz", expect: true, }, { match: MatchPath{"/admin%2fpanel"}, input: "/ADMIN%2fpanel", expect: true, }, { match: MatchPath{"/admin%2fpa*el"}, input: "/ADMIN%2fPaAzZLm123NEL", expect: true, }, } { err := tc.match.Provision(caddy.Context{}) if err == nil && tc.provisionErr { t.Errorf("Test %d %v: Expected error provisioning, but there was no error", i, tc.match) } if err != nil && !tc.provisionErr { t.Errorf("Test %d %v: Expected no error provisioning, but there was an error: %v", i, tc.match, err) } if tc.provisionErr { continue // if it's not supposed to provision properly, pointless to test it } u, err := url.ParseRequestURI(tc.input) if err != nil { t.Fatalf("Test %d (%v): Invalid request URI (should be rejected by Go's HTTP server): %v", i, tc.input, err) } req := &http.Request{URL: u} repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) actual, err := tc.match.MatchWithError(req) if err != nil { t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err) } if actual != tc.expect { t.Errorf("Test %d %v: Expected %t, got %t for '%s'", i, tc.match, tc.expect, actual, tc.input) continue } } } func TestPathMatcherWindows(t *testing.T) { // only Windows has this bug where it will ignore // trailing dots and spaces in a filename if runtime.GOOS != "windows" { return } req := &http.Request{URL: &url.URL{Path: "/index.php . . .."}} repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) match := MatchPath{"*.php"} matched, err := match.MatchWithError(req) if err != nil { t.Errorf("Expected no error, but got: %v", err) } if !matched { t.Errorf("Expected to match; should ignore trailing dots and spaces") } } func TestPathREMatcher(t *testing.T) { for i, tc := range []struct { match MatchPathRE input string expect bool expectRepl map[string]string }{ { match: MatchPathRE{}, input: "/", expect: true, }, { match: MatchPathRE{MatchRegexp{Pattern: "/"}}, input: "/", expect: true, }, { match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}}, input: "/foo", expect: true, }, { match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}}, input: "/foo/", expect: true, }, { match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}}, input: "//foo", expect: true, }, { match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}}, input: "//foo/", expect: true, }, { match: MatchPathRE{MatchRegexp{Pattern: "^/foo"}}, input: "/%2F/foo/", expect: true, }, { match: MatchPathRE{MatchRegexp{Pattern: "/bar"}}, input: "/foo/", expect: false, }, { match: MatchPathRE{MatchRegexp{Pattern: "^/bar"}}, input: "/foo/bar", expect: false, }, { match: MatchPathRE{MatchRegexp{Pattern: "^/foo/(.*)/baz$", Name: "name"}}, input: "/foo/bar/baz", expect: true, expectRepl: map[string]string{"name.1": "bar"}, }, { match: MatchPathRE{MatchRegexp{Pattern: "^/foo/(?P.*)/baz$", Name: "name"}}, input: "/foo/bar/baz", expect: true, expectRepl: map[string]string{"name.myparam": "bar"}, }, { match: MatchPathRE{MatchRegexp{Pattern: "^/%@.txt"}}, input: "/%25@.txt", expect: true, }, { match: MatchPathRE{MatchRegexp{Pattern: "^/%25@.txt"}}, input: "/%25@.txt", expect: false, }, } { // compile the regexp and validate its name err := tc.match.Provision(caddy.Context{}) if err != nil { t.Errorf("Test %d %v: Provisioning: %v", i, tc.match, err) continue } err = tc.match.Validate() if err != nil { t.Errorf("Test %d %v: Validating: %v", i, tc.match, err) continue } // set up the fake request and its Replacer u, err := url.ParseRequestURI(tc.input) if err != nil { t.Fatalf("Test %d: Bad input URI: %v", i, err) } req := &http.Request{URL: u} repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) addHTTPVarsToReplacer(repl, req, httptest.NewRecorder()) actual, err := tc.match.MatchWithError(req) if err != nil { t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err) } if actual != tc.expect { t.Errorf("Test %d [%v]: Expected %t, got %t for input '%s'", i, tc.match.Pattern, tc.expect, actual, tc.input) continue } for key, expectVal := range tc.expectRepl { placeholder := fmt.Sprintf("{http.regexp.%s}", key) actualVal := repl.ReplaceAll(placeholder, "") if actualVal != expectVal { t.Errorf("Test %d [%v]: Expected placeholder {http.regexp.%s} to be '%s' but got '%s'", i, tc.match.Pattern, key, expectVal, actualVal) continue } } } } func TestHeaderMatcher(t *testing.T) { repl := caddy.NewReplacer() repl.Set("a", "foobar") for i, tc := range []struct { match MatchHeader input http.Header // make sure these are canonical cased (std lib will do that in a real request) host string expect bool }{ { match: MatchHeader{"Field": []string{"foo"}}, input: http.Header{"Field": []string{"foo"}}, expect: true, }, { match: MatchHeader{"Field": []string{"foo", "bar"}}, input: http.Header{"Field": []string{"bar"}}, expect: true, }, { match: MatchHeader{"Field": []string{"foo", "bar"}}, input: http.Header{"Alakazam": []string{"kapow"}}, expect: false, }, { match: MatchHeader{"Field": []string{"foo", "bar"}}, input: http.Header{"Field": []string{"kapow"}}, expect: false, }, { match: MatchHeader{"Field": []string{"foo", "bar"}}, input: http.Header{"Field": []string{"kapow", "foo"}}, expect: true, }, { match: MatchHeader{"Field1": []string{"foo"}, "Field2": []string{"bar"}}, input: http.Header{"Field1": []string{"foo"}, "Field2": []string{"bar"}}, expect: true, }, { match: MatchHeader{"field1": []string{"foo"}, "field2": []string{"bar"}}, input: http.Header{"Field1": []string{"foo"}, "Field2": []string{"bar"}}, expect: true, }, { match: MatchHeader{"field1": []string{"foo"}, "field2": []string{"bar"}}, input: http.Header{"Field1": []string{"foo"}, "Field2": []string{"kapow"}}, expect: false, }, { match: MatchHeader{"field1": []string{"*"}}, input: http.Header{"Field1": []string{"foo"}}, expect: true, }, { match: MatchHeader{"field1": []string{"*"}}, input: http.Header{"Field2": []string{"foo"}}, expect: false, }, { match: MatchHeader{"Field1": []string{"foo*"}}, input: http.Header{"Field1": []string{"foo"}}, expect: true, }, { match: MatchHeader{"Field1": []string{"foo*"}}, input: http.Header{"Field1": []string{"asdf", "foobar"}}, expect: true, }, { match: MatchHeader{"Field1": []string{"*bar"}}, input: http.Header{"Field1": []string{"asdf", "foobar"}}, expect: true, }, { match: MatchHeader{"host": []string{"localhost"}}, input: http.Header{}, host: "localhost", expect: true, }, { match: MatchHeader{"host": []string{"localhost"}}, input: http.Header{}, host: "caddyserver.com", expect: false, }, { match: MatchHeader{"Must-Not-Exist": nil}, input: http.Header{}, expect: true, }, { match: MatchHeader{"Must-Not-Exist": nil}, input: http.Header{"Must-Not-Exist": []string{"do not match"}}, expect: false, }, { match: MatchHeader{"Foo": []string{"{a}"}}, input: http.Header{"Foo": []string{"foobar"}}, expect: true, }, { match: MatchHeader{"Foo": []string{"{a}"}}, input: http.Header{"Foo": []string{"asdf"}}, expect: false, }, { match: MatchHeader{"Foo": []string{"{a}*"}}, input: http.Header{"Foo": []string{"foobar-baz"}}, expect: true, }, } { req := &http.Request{Header: tc.input, Host: tc.host} ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) actual, err := tc.match.MatchWithError(req) if err != nil { t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err) } if actual != tc.expect { t.Errorf("Test %d %v: Expected %t, got %t for '%s'", i, tc.match, tc.expect, actual, tc.input) continue } } } func TestQueryMatcher(t *testing.T) { for i, tc := range []struct { scenario string match MatchQuery input string expect bool }{ { scenario: "non match against a specific value", match: MatchQuery{"debug": []string{"1"}}, input: "/", expect: false, }, { scenario: "match against a specific value", match: MatchQuery{"debug": []string{"1"}}, input: "/?debug=1", expect: true, }, { scenario: "match against a wildcard", match: MatchQuery{"debug": []string{"*"}}, input: "/?debug=something", expect: true, }, { scenario: "non match against a wildcarded", match: MatchQuery{"debug": []string{"*"}}, input: "/?other=something", expect: false, }, { scenario: "match against an empty value", match: MatchQuery{"debug": []string{""}}, input: "/?debug", expect: true, }, { scenario: "non match against an empty value", match: MatchQuery{"debug": []string{""}}, input: "/?someparam", expect: false, }, { scenario: "empty matcher value should match empty query", match: MatchQuery{}, input: "/?", expect: true, }, { scenario: "nil matcher value should NOT match a non-empty query", match: MatchQuery{}, input: "/?foo=bar", expect: false, }, { scenario: "non-nil matcher should NOT match an empty query", match: MatchQuery{"": nil}, input: "/?", expect: false, }, { scenario: "match against a placeholder value", match: MatchQuery{"debug": []string{"{http.vars.debug}"}}, input: "/?debug=1", expect: true, }, { scenario: "match against a placeholder key", match: MatchQuery{"{http.vars.key}": []string{"1"}}, input: "/?somekey=1", expect: true, }, { scenario: "do not match when not all query params are present", match: MatchQuery{"debug": []string{"1"}, "foo": []string{"bar"}}, input: "/?debug=1", expect: false, }, { scenario: "match when all query params are present", match: MatchQuery{"debug": []string{"1"}, "foo": []string{"bar"}}, input: "/?debug=1&foo=bar", expect: true, }, { scenario: "do not match when the value of a query param does not match", match: MatchQuery{"debug": []string{"1"}, "foo": []string{"bar"}}, input: "/?debug=2&foo=bar", expect: false, }, { scenario: "do not match when all the values the query params do not match", match: MatchQuery{"debug": []string{"1"}, "foo": []string{"bar"}}, input: "/?debug=2&foo=baz", expect: false, }, { scenario: "match against two values for the same key", match: MatchQuery{"debug": []string{"1"}}, input: "/?debug=1&debug=2", expect: true, }, { scenario: "match against two values for the same key", match: MatchQuery{"debug": []string{"2", "1"}}, input: "/?debug=2&debug=1", expect: true, }, } { u, _ := url.Parse(tc.input) req := &http.Request{URL: u} repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) repl.Set("http.vars.debug", "1") repl.Set("http.vars.key", "somekey") req = req.WithContext(ctx) actual, err := tc.match.MatchWithError(req) if err != nil { t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err) } if actual != tc.expect { t.Errorf("Test %d %v: Expected %t, got %t for '%s'", i, tc.match, tc.expect, actual, tc.input) continue } } } func TestHeaderREMatcher(t *testing.T) { for i, tc := range []struct { match MatchHeaderRE input http.Header // make sure these are canonical cased (std lib will do that in a real request) host string expect bool expectRepl map[string]string }{ { match: MatchHeaderRE{"Field": &MatchRegexp{Pattern: "foo"}}, input: http.Header{"Field": []string{"foo"}}, expect: true, }, { match: MatchHeaderRE{"Field": &MatchRegexp{Pattern: "$foo^"}}, input: http.Header{"Field": []string{"foobar"}}, expect: false, }, { match: MatchHeaderRE{"Field": &MatchRegexp{Pattern: "^foo(.*)$", Name: "name"}}, input: http.Header{"Field": []string{"foobar"}}, expect: true, expectRepl: map[string]string{"name.1": "bar"}, }, { match: MatchHeaderRE{"Field": &MatchRegexp{Pattern: "^foo.*$", Name: "name"}}, input: http.Header{"Field": []string{"barfoo", "foobar"}}, expect: true, }, { match: MatchHeaderRE{"host": &MatchRegexp{Pattern: "^localhost$", Name: "name"}}, input: http.Header{}, host: "localhost", expect: true, }, { match: MatchHeaderRE{"host": &MatchRegexp{Pattern: "^local$", Name: "name"}}, input: http.Header{}, host: "localhost", expect: false, }, } { // compile the regexp and validate its name err := tc.match.Provision(caddy.Context{}) if err != nil { t.Errorf("Test %d %v: Provisioning: %v", i, tc.match, err) continue } err = tc.match.Validate() if err != nil { t.Errorf("Test %d %v: Validating: %v", i, tc.match, err) continue } // set up the fake request and its Replacer req := &http.Request{Header: tc.input, URL: new(url.URL), Host: tc.host} repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) addHTTPVarsToReplacer(repl, req, httptest.NewRecorder()) actual, err := tc.match.MatchWithError(req) if err != nil { t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err) } if actual != tc.expect { t.Errorf("Test %d [%v]: Expected %t, got %t for input '%s'", i, tc.match, tc.expect, actual, tc.input) continue } for key, expectVal := range tc.expectRepl { placeholder := fmt.Sprintf("{http.regexp.%s}", key) actualVal := repl.ReplaceAll(placeholder, "") if actualVal != expectVal { t.Errorf("Test %d [%v]: Expected placeholder {http.regexp.%s} to be '%s' but got '%s'", i, tc.match, key, expectVal, actualVal) continue } } } } func BenchmarkHeaderREMatcher(b *testing.B) { i := 0 match := MatchHeaderRE{"Field": &MatchRegexp{Pattern: "^foo(.*)$", Name: "name"}} input := http.Header{"Field": []string{"foobar"}} var host string err := match.Provision(caddy.Context{}) if err != nil { b.Errorf("Test %d %v: Provisioning: %v", i, match, err) } err = match.Validate() if err != nil { b.Errorf("Test %d %v: Validating: %v", i, match, err) } // set up the fake request and its Replacer req := &http.Request{Header: input, URL: new(url.URL), Host: host} repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) addHTTPVarsToReplacer(repl, req, httptest.NewRecorder()) for b.Loop() { match.MatchWithError(req) } } func TestVarREMatcher(t *testing.T) { for i, tc := range []struct { desc string match MatchVarsRE input VarsMiddleware headers http.Header expect bool expectRepl map[string]string }{ { desc: "match static value within var set by the VarsMiddleware succeeds", match: MatchVarsRE{"Var1": &MatchRegexp{Pattern: "foo"}}, input: VarsMiddleware{"Var1": "here is foo val"}, expect: true, }, { desc: "value set by VarsMiddleware not satisfying regexp matcher fails to match", match: MatchVarsRE{"Var1": &MatchRegexp{Pattern: "$foo^"}}, input: VarsMiddleware{"Var1": "foobar"}, expect: false, }, { desc: "successfully matched value is captured and its placeholder is added to replacer", match: MatchVarsRE{"Var1": &MatchRegexp{Pattern: "^foo(.*)$", Name: "name"}}, input: VarsMiddleware{"Var1": "foobar"}, expect: true, expectRepl: map[string]string{"name.1": "bar"}, }, { desc: "matching against a value of standard variables succeeds", match: MatchVarsRE{"{http.request.method}": &MatchRegexp{Pattern: "^G.[tT]$"}}, input: VarsMiddleware{}, expect: true, }, { desc: "matching against value of var set by the VarsMiddleware and referenced by its placeholder succeeds", match: MatchVarsRE{"{http.vars.Var1}": &MatchRegexp{Pattern: "[vV]ar[0-9]"}}, input: VarsMiddleware{"Var1": "var1Value"}, expect: true, }, { desc: "placeholder key value containing braces is not double-expanded", match: MatchVarsRE{"{http.request.header.X-Input}": &MatchRegexp{Pattern: ".+", Name: "val"}}, input: VarsMiddleware{}, headers: http.Header{"X-Input": []string{"{env.HOME}"}}, expect: true, expectRepl: map[string]string{"val.0": "{env.HOME}"}, }, } { t.Run(tc.desc, func(t *testing.T) { t.Parallel() // compile the regexp and validate its name err := tc.match.Provision(caddy.Context{}) if err != nil { t.Errorf("Test %d %v: Provisioning: %v", i, tc.match, err) return } err = tc.match.Validate() if err != nil { t.Errorf("Test %d %v: Validating: %v", i, tc.match, err) return } // set up the fake request and its Replacer req := &http.Request{URL: new(url.URL), Method: http.MethodGet, Header: tc.headers} repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) ctx = context.WithValue(ctx, VarsCtxKey, make(map[string]any)) req = req.WithContext(ctx) addHTTPVarsToReplacer(repl, req, httptest.NewRecorder()) tc.input.ServeHTTP(httptest.NewRecorder(), req, emptyHandler) actual, err := tc.match.MatchWithError(req) if err != nil { t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err) } if actual != tc.expect { t.Errorf("Test %d [%v]: Expected %t, got %t for input '%s'", i, tc.match, tc.expect, actual, tc.input) return } for key, expectVal := range tc.expectRepl { placeholder := fmt.Sprintf("{http.regexp.%s}", key) actualVal := repl.ReplaceAll(placeholder, "") if actualVal != expectVal { t.Errorf("Test %d [%v]: Expected placeholder {http.regexp.%s} to be '%s' but got '%s'", i, tc.match, key, expectVal, actualVal) return } } }) } } func TestNotMatcher(t *testing.T) { for i, tc := range []struct { host, path string match MatchNot expect bool }{ { host: "example.com", path: "/", match: MatchNot{}, expect: true, }, { host: "example.com", path: "/foo", match: MatchNot{ MatcherSets: []MatcherSet{ { MatchPath{"/foo"}, }, }, }, expect: false, }, { host: "example.com", path: "/bar", match: MatchNot{ MatcherSets: []MatcherSet{ { MatchPath{"/foo"}, }, }, }, expect: true, }, { host: "example.com", path: "/bar", match: MatchNot{ MatcherSets: []MatcherSet{ { MatchPath{"/foo"}, }, { MatchHost{"example.com"}, }, }, }, expect: false, }, { host: "example.com", path: "/bar", match: MatchNot{ MatcherSets: []MatcherSet{ { MatchPath{"/bar"}, }, { MatchHost{"example.com"}, }, }, }, expect: false, }, { host: "example.com", path: "/foo", match: MatchNot{ MatcherSets: []MatcherSet{ { MatchPath{"/bar"}, }, { MatchHost{"sub.example.com"}, }, }, }, expect: true, }, { host: "example.com", path: "/foo", match: MatchNot{ MatcherSets: []MatcherSet{ { MatchPath{"/foo"}, MatchHost{"example.com"}, }, }, }, expect: false, }, { host: "example.com", path: "/foo", match: MatchNot{ MatcherSets: []MatcherSet{ { MatchPath{"/bar"}, MatchHost{"example.com"}, }, }, }, expect: true, }, } { req := &http.Request{Host: tc.host, URL: &url.URL{Path: tc.path}} repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) actual, err := tc.match.MatchWithError(req) if err != nil { t.Errorf("Test %d %v: matching failed: %v", i, tc.match, err) } if actual != tc.expect { t.Errorf("Test %d %+v: Expected %t, got %t for: host=%s path=%s'", i, tc.match, tc.expect, actual, tc.host, tc.path) continue } } } func BenchmarkLargeHostMatcher(b *testing.B) { // this benchmark simulates a large host matcher (thousands of entries) where each // value is an exact hostname (not a placeholder or wildcard) - compare the results // of this with and without the binary search (comment out the various fast path // sections in Match) to conduct experiments const n = 10000 lastHost := fmt.Sprintf("%d.example.com", n-1) req := &http.Request{Host: lastHost} repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) matcher := make(MatchHost, n) for i := 0; i < n; i++ { matcher[i] = fmt.Sprintf("%d.example.com", i) } err := matcher.Provision(caddy.Context{}) if err != nil { b.Fatal(err) } for b.Loop() { matcher.MatchWithError(req) } } func BenchmarkHostMatcherWithoutPlaceholder(b *testing.B) { req := &http.Request{Host: "localhost"} repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) match := MatchHost{"localhost"} for b.Loop() { match.MatchWithError(req) } } func BenchmarkHostMatcherWithPlaceholder(b *testing.B) { err := os.Setenv("GO_BENCHMARK_DOMAIN", "localhost") if err != nil { b.Errorf("error while setting up environment: %v", err) } req := &http.Request{Host: "localhost"} repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) req = req.WithContext(ctx) match := MatchHost{"{env.GO_BENCHMARK_DOMAIN}"} for b.Loop() { match.MatchWithError(req) } } ================================================ FILE: modules/caddyhttp/metrics.go ================================================ package caddyhttp import ( "context" "errors" "net/http" "strings" "sync" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/internal/metrics" ) // Metrics configures metrics observations. // EXPERIMENTAL and subject to change or removal. // // Example configuration: // // { // "apps": { // "http": { // "metrics": { // "per_host": true, // "observe_catchall_hosts": false // }, // "servers": { // "srv0": { // "routes": [{ // "match": [{"host": ["example.com", "www.example.com"]}], // "handle": [{"handler": "static_response", "body": "Hello"}] // }] // } // } // } // } // } // // In this configuration: // - Requests to example.com and www.example.com get individual host labels // - All other hosts (e.g., attacker.com) are aggregated under "_other" label // - This prevents unlimited cardinality from arbitrary Host headers type Metrics struct { // Enable per-host metrics. Enabling this option may // incur high-memory consumption, depending on the number of hosts // managed by Caddy. // // CARDINALITY PROTECTION: To prevent unbounded cardinality attacks, // only explicitly configured hosts (via host matchers) are allowed // by default. Other hosts are aggregated under the "_other" label. // See AllowCatchAllHosts to change this behavior. PerHost bool `json:"per_host,omitempty"` // Allow metrics for catch-all hosts (hosts without explicit configuration). // When false (default), only hosts explicitly configured via host matchers // will get individual metrics labels. All other hosts will be aggregated // under the "_other" label to prevent cardinality explosion. // // This is automatically enabled for HTTPS servers (since certificates provide // some protection against unbounded cardinality), but disabled for HTTP servers // by default to prevent cardinality attacks from arbitrary Host headers. // // Set to true to allow all hosts to get individual metrics (NOT RECOMMENDED // for production environments exposed to the internet). ObserveCatchallHosts bool `json:"observe_catchall_hosts,omitempty"` init sync.Once httpMetrics *httpMetrics allowedHosts map[string]struct{} hasHTTPSServer bool } type httpMetrics struct { requestInFlight *prometheus.GaugeVec requestCount *prometheus.CounterVec requestErrors *prometheus.CounterVec requestDuration *prometheus.HistogramVec requestSize *prometheus.HistogramVec responseSize *prometheus.HistogramVec responseDuration *prometheus.HistogramVec } func initHTTPMetrics(ctx caddy.Context, metrics *Metrics) { const ns, sub = "caddy", "http" registry := ctx.GetMetricsRegistry() basicLabels := []string{"server", "handler"} if metrics.PerHost { basicLabels = append(basicLabels, "host") } metrics.httpMetrics.requestInFlight = promauto.With(registry).NewGaugeVec(prometheus.GaugeOpts{ Namespace: ns, Subsystem: sub, Name: "requests_in_flight", Help: "Number of requests currently handled by this server.", }, basicLabels) metrics.httpMetrics.requestErrors = promauto.With(registry).NewCounterVec(prometheus.CounterOpts{ Namespace: ns, Subsystem: sub, Name: "request_errors_total", Help: "Number of requests resulting in middleware errors.", }, basicLabels) metrics.httpMetrics.requestCount = promauto.With(registry).NewCounterVec(prometheus.CounterOpts{ Namespace: ns, Subsystem: sub, Name: "requests_total", Help: "Counter of HTTP(S) requests made.", }, basicLabels) // TODO: allow these to be customized in the config durationBuckets := prometheus.DefBuckets sizeBuckets := prometheus.ExponentialBuckets(256, 4, 8) httpLabels := []string{"server", "handler", "code", "method"} if metrics.PerHost { httpLabels = append(httpLabels, "host") } metrics.httpMetrics.requestDuration = promauto.With(registry).NewHistogramVec(prometheus.HistogramOpts{ Namespace: ns, Subsystem: sub, Name: "request_duration_seconds", Help: "Histogram of round-trip request durations.", Buckets: durationBuckets, }, httpLabels) metrics.httpMetrics.requestSize = promauto.With(registry).NewHistogramVec(prometheus.HistogramOpts{ Namespace: ns, Subsystem: sub, Name: "request_size_bytes", Help: "Total size of the request. Includes body", Buckets: sizeBuckets, }, httpLabels) metrics.httpMetrics.responseSize = promauto.With(registry).NewHistogramVec(prometheus.HistogramOpts{ Namespace: ns, Subsystem: sub, Name: "response_size_bytes", Help: "Size of the returned response.", Buckets: sizeBuckets, }, httpLabels) metrics.httpMetrics.responseDuration = promauto.With(registry).NewHistogramVec(prometheus.HistogramOpts{ Namespace: ns, Subsystem: sub, Name: "response_duration_seconds", Help: "Histogram of times to first byte in response bodies.", Buckets: durationBuckets, }, httpLabels) } // scanConfigForHosts scans the HTTP app configuration to build a set of allowed hosts // for metrics collection, similar to how auto-HTTPS scans for domain names. func (m *Metrics) scanConfigForHosts(app *App) { if !m.PerHost { return } m.allowedHosts = make(map[string]struct{}) m.hasHTTPSServer = false for _, srv := range app.Servers { // Check if this server has TLS enabled serverHasTLS := len(srv.TLSConnPolicies) > 0 if serverHasTLS { m.hasHTTPSServer = true } // Collect hosts from route matchers for _, route := range srv.Routes { for _, matcherSet := range route.MatcherSets { for _, matcher := range matcherSet { if hm, ok := matcher.(*MatchHost); ok { for _, host := range *hm { // Only allow non-fuzzy hosts to prevent unbounded cardinality if !hm.fuzzy(host) { m.allowedHosts[strings.ToLower(host)] = struct{}{} } } } } } } } } // shouldAllowHostMetrics determines if metrics should be collected for the given host. // This implements the cardinality protection by only allowing metrics for: // 1. Explicitly configured hosts // 2. Catch-all requests on HTTPS servers (if AllowCatchAllHosts is true or auto-enabled) // 3. Catch-all requests on HTTP servers only if explicitly allowed func (m *Metrics) shouldAllowHostMetrics(host string, isHTTPS bool) bool { if !m.PerHost { return true // host won't be used in labels anyway } normalizedHost := strings.ToLower(host) // Always allow explicitly configured hosts if _, exists := m.allowedHosts[normalizedHost]; exists { return true } // For catch-all requests (not in allowed hosts) allowCatchAll := m.ObserveCatchallHosts || (isHTTPS && m.hasHTTPSServer) return allowCatchAll } // serverNameFromContext extracts the current server name from the context. // Returns "UNKNOWN" if none is available (should probably never happen). func serverNameFromContext(ctx context.Context) string { srv, ok := ctx.Value(ServerCtxKey).(*Server) if !ok || srv == nil || srv.name == "" { return "UNKNOWN" } return srv.name } // metricsInstrumentedRoute wraps a compiled route Handler with metrics // instrumentation. It wraps the entire compiled route chain once, // collecting metrics only once per route match. type metricsInstrumentedRoute struct { handler string next Handler metrics *Metrics } func newMetricsInstrumentedRoute(ctx caddy.Context, handler string, next Handler, m *Metrics) *metricsInstrumentedRoute { m.init.Do(func() { initHTTPMetrics(ctx, m) }) return &metricsInstrumentedRoute{handler: handler, next: next, metrics: m} } func (h *metricsInstrumentedRoute) ServeHTTP(w http.ResponseWriter, r *http.Request) error { server := serverNameFromContext(r.Context()) labels := prometheus.Labels{"server": server, "handler": h.handler} method := metrics.SanitizeMethod(r.Method) // the "code" value is set later, but initialized here to eliminate the possibility // of a panic statusLabels := prometheus.Labels{"server": server, "handler": h.handler, "method": method, "code": ""} // Determine if this is an HTTPS request isHTTPS := r.TLS != nil if h.metrics.PerHost { // Apply cardinality protection for host metrics if h.metrics.shouldAllowHostMetrics(r.Host, isHTTPS) { labels["host"] = strings.ToLower(r.Host) statusLabels["host"] = strings.ToLower(r.Host) } else { // Use a catch-all label for unallowed hosts to prevent cardinality explosion labels["host"] = "_other" statusLabels["host"] = "_other" } } inFlight := h.metrics.httpMetrics.requestInFlight.With(labels) inFlight.Inc() defer inFlight.Dec() start := time.Now() // This is a _bit_ of a hack - it depends on the ShouldBufferFunc always // being called when the headers are written. // Effectively the same behaviour as promhttp.InstrumentHandlerTimeToWriteHeader. writeHeaderRecorder := ShouldBufferFunc(func(status int, header http.Header) bool { statusLabels["code"] = metrics.SanitizeCode(status) ttfb := time.Since(start).Seconds() h.metrics.httpMetrics.responseDuration.With(statusLabels).Observe(ttfb) return false }) wrec := NewResponseRecorder(w, nil, writeHeaderRecorder) err := h.next.ServeHTTP(wrec, r) dur := time.Since(start).Seconds() h.metrics.httpMetrics.requestCount.With(labels).Inc() observeRequest := func(status int) { // If the code hasn't been set yet, and we didn't encounter an error, we're // probably falling through with an empty handler. if statusLabels["code"] == "" { // we still sanitize it, even though it's likely to be 0. A 200 is // returned on fallthrough so we want to reflect that. statusLabels["code"] = metrics.SanitizeCode(status) } h.metrics.httpMetrics.requestDuration.With(statusLabels).Observe(dur) h.metrics.httpMetrics.requestSize.With(statusLabels).Observe(float64(computeApproximateRequestSize(r))) h.metrics.httpMetrics.responseSize.With(statusLabels).Observe(float64(wrec.Size())) } if err != nil { var handlerErr HandlerError if errors.As(err, &handlerErr) { observeRequest(handlerErr.StatusCode) } h.metrics.httpMetrics.requestErrors.With(labels).Inc() return err } observeRequest(wrec.Status()) return nil } // taken from https://github.com/prometheus/client_golang/blob/6007b2b5cae01203111de55f753e76d8dac1f529/prometheus/promhttp/instrument_server.go#L298 func computeApproximateRequestSize(r *http.Request) int { s := 0 if r.URL != nil { s += len(r.URL.String()) } s += len(r.Method) s += len(r.Proto) for name, values := range r.Header { s += len(name) for _, value := range values { s += len(value) } } s += len(r.Host) // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. if r.ContentLength != -1 { s += int(r.ContentLength) } return s } ================================================ FILE: modules/caddyhttp/metrics_test.go ================================================ package caddyhttp import ( "context" "crypto/tls" "errors" "net/http" "net/http/httptest" "strings" "sync" "testing" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/caddyserver/caddy/v2" ) func TestServerNameFromContext(t *testing.T) { ctx := context.Background() expected := "UNKNOWN" if actual := serverNameFromContext(ctx); actual != expected { t.Errorf("Not equal: expected %q, but got %q", expected, actual) } in := "foo" ctx = context.WithValue(ctx, ServerCtxKey, &Server{name: in}) if actual := serverNameFromContext(ctx); actual != in { t.Errorf("Not equal: expected %q, but got %q", in, actual) } } func TestMetricsInstrumentedHandler(t *testing.T) { ctx, _ := caddy.NewContext(caddy.Context{Context: context.Background()}) metrics := &Metrics{ init: sync.Once{}, httpMetrics: &httpMetrics{}, } handlerErr := errors.New("oh noes") response := []byte("hello world!") h := HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { if actual := testutil.ToFloat64(metrics.httpMetrics.requestInFlight); actual != 1.0 { t.Errorf("Not same: expected %#v, but got %#v", 1.0, actual) } if handlerErr == nil { w.Write(response) } return handlerErr }) ih := newMetricsInstrumentedRoute(ctx, "bar", h, metrics) r := httptest.NewRequest("GET", "/", nil) w := httptest.NewRecorder() if actual := ih.ServeHTTP(w, r); actual != handlerErr { t.Errorf("Not same: expected %#v, but got %#v", handlerErr, actual) } if actual := testutil.ToFloat64(metrics.httpMetrics.requestInFlight); actual != 0.0 { t.Errorf("Not same: expected %#v, but got %#v", 0.0, actual) } handlerErr = nil if err := ih.ServeHTTP(w, r); err != nil { t.Errorf("Received unexpected error: %v", err) } // an empty handler - no errors, no header written emptyHandler := HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { return nil }) ih = newMetricsInstrumentedRoute(ctx, "empty", emptyHandler, metrics) r = httptest.NewRequest("GET", "/", nil) w = httptest.NewRecorder() if err := ih.ServeHTTP(w, r); err != nil { t.Errorf("Received unexpected error: %v", err) } if actual := w.Result().StatusCode; actual != 200 { t.Errorf("Not same: expected status code %#v, but got %#v", 200, actual) } if actual := w.Result().Header; len(actual) != 0 { t.Errorf("Not empty: expected headers to be empty, but got %#v", actual) } // handler returning an error with an HTTP status errHandler := HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { return Error(http.StatusTooManyRequests, nil) }) ih = newMetricsInstrumentedRoute(ctx, "foo", errHandler, metrics) r = httptest.NewRequest("GET", "/", nil) w = httptest.NewRecorder() if err := ih.ServeHTTP(w, r); err == nil { t.Errorf("expected error to be propagated") } expected := ` # HELP caddy_http_request_duration_seconds Histogram of round-trip request durations. # TYPE caddy_http_request_duration_seconds histogram caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.005"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.01"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.025"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.05"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.1"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.25"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="0.5"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="2.5"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="5"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="10"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="+Inf"} 1 caddy_http_request_duration_seconds_count{code="429",handler="foo",method="GET",server="UNKNOWN"} 1 # HELP caddy_http_request_size_bytes Total size of the request. Includes body # TYPE caddy_http_request_size_bytes histogram caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="256"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="1024"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="4096"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="16384"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="65536"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="262144"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="1.048576e+06"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="4.194304e+06"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="+Inf"} 1 caddy_http_request_size_bytes_sum{code="200",handler="bar",method="GET",server="UNKNOWN"} 23 caddy_http_request_size_bytes_count{code="200",handler="bar",method="GET",server="UNKNOWN"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="256"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="1024"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="4096"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="16384"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="65536"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="262144"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="1.048576e+06"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="4.194304e+06"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="+Inf"} 1 caddy_http_request_size_bytes_sum{code="200",handler="empty",method="GET",server="UNKNOWN"} 23 caddy_http_request_size_bytes_count{code="200",handler="empty",method="GET",server="UNKNOWN"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="256"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1024"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="4096"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="16384"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="65536"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="262144"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1.048576e+06"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="4.194304e+06"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="+Inf"} 1 caddy_http_request_size_bytes_sum{code="429",handler="foo",method="GET",server="UNKNOWN"} 23 caddy_http_request_size_bytes_count{code="429",handler="foo",method="GET",server="UNKNOWN"} 1 # HELP caddy_http_response_size_bytes Size of the returned response. # TYPE caddy_http_response_size_bytes histogram caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="256"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="1024"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="4096"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="16384"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="65536"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="262144"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="1.048576e+06"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="4.194304e+06"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",method="GET",server="UNKNOWN",le="+Inf"} 1 caddy_http_response_size_bytes_sum{code="200",handler="bar",method="GET",server="UNKNOWN"} 12 caddy_http_response_size_bytes_count{code="200",handler="bar",method="GET",server="UNKNOWN"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="256"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="1024"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="4096"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="16384"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="65536"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="262144"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="1.048576e+06"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="4.194304e+06"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",method="GET",server="UNKNOWN",le="+Inf"} 1 caddy_http_response_size_bytes_sum{code="200",handler="empty",method="GET",server="UNKNOWN"} 0 caddy_http_response_size_bytes_count{code="200",handler="empty",method="GET",server="UNKNOWN"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="256"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1024"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="4096"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="16384"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="65536"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="262144"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="1.048576e+06"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="4.194304e+06"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",method="GET",server="UNKNOWN",le="+Inf"} 1 caddy_http_response_size_bytes_sum{code="429",handler="foo",method="GET",server="UNKNOWN"} 0 caddy_http_response_size_bytes_count{code="429",handler="foo",method="GET",server="UNKNOWN"} 1 # HELP caddy_http_request_errors_total Number of requests resulting in middleware errors. # TYPE caddy_http_request_errors_total counter caddy_http_request_errors_total{handler="bar",server="UNKNOWN"} 1 caddy_http_request_errors_total{handler="foo",server="UNKNOWN"} 1 ` if err := testutil.GatherAndCompare(ctx.GetMetricsRegistry(), strings.NewReader(expected), "caddy_http_request_size_bytes", "caddy_http_response_size_bytes", // caddy_http_request_duration_seconds_sum will vary based on how long the test took to run, // so we check just the _bucket and _count metrics "caddy_http_request_duration_seconds_bucket", "caddy_http_request_duration_seconds_count", "caddy_http_request_errors_total", ); err != nil { t.Errorf("received unexpected error: %s", err) } } func TestMetricsInstrumentedHandlerPerHost(t *testing.T) { ctx, _ := caddy.NewContext(caddy.Context{Context: context.Background()}) metrics := &Metrics{ PerHost: true, ObserveCatchallHosts: true, // Allow all hosts for testing init: sync.Once{}, httpMetrics: &httpMetrics{}, allowedHosts: make(map[string]struct{}), } handlerErr := errors.New("oh noes") response := []byte("hello world!") h := HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { if actual := testutil.ToFloat64(metrics.httpMetrics.requestInFlight); actual != 1.0 { t.Errorf("Not same: expected %#v, but got %#v", 1.0, actual) } if handlerErr == nil { w.Write(response) } return handlerErr }) ih := newMetricsInstrumentedRoute(ctx, "bar", h, metrics) r := httptest.NewRequest("GET", "/", nil) w := httptest.NewRecorder() if actual := ih.ServeHTTP(w, r); actual != handlerErr { t.Errorf("Not same: expected %#v, but got %#v", handlerErr, actual) } if actual := testutil.ToFloat64(metrics.httpMetrics.requestInFlight); actual != 0.0 { t.Errorf("Not same: expected %#v, but got %#v", 0.0, actual) } handlerErr = nil if err := ih.ServeHTTP(w, r); err != nil { t.Errorf("Received unexpected error: %v", err) } // an empty handler - no errors, no header written emptyHandler := HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { return nil }) ih = newMetricsInstrumentedRoute(ctx, "empty", emptyHandler, metrics) r = httptest.NewRequest("GET", "/", nil) w = httptest.NewRecorder() if err := ih.ServeHTTP(w, r); err != nil { t.Errorf("Received unexpected error: %v", err) } if actual := w.Result().StatusCode; actual != 200 { t.Errorf("Not same: expected status code %#v, but got %#v", 200, actual) } if actual := w.Result().Header; len(actual) != 0 { t.Errorf("Not empty: expected headers to be empty, but got %#v", actual) } // handler returning an error with an HTTP status errHandler := HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { return Error(http.StatusTooManyRequests, nil) }) ih = newMetricsInstrumentedRoute(ctx, "foo", errHandler, metrics) r = httptest.NewRequest("GET", "/", nil) w = httptest.NewRecorder() if err := ih.ServeHTTP(w, r); err == nil { t.Errorf("expected error to be propagated") } expected := ` # HELP caddy_http_request_duration_seconds Histogram of round-trip request durations. # TYPE caddy_http_request_duration_seconds histogram caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.005"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.01"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.025"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.05"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.1"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.25"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="0.5"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="2.5"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="5"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="10"} 1 caddy_http_request_duration_seconds_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1 caddy_http_request_duration_seconds_count{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 1 # HELP caddy_http_request_size_bytes Total size of the request. Includes body # TYPE caddy_http_request_size_bytes histogram caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="256"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1 caddy_http_request_size_bytes_sum{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN"} 23 caddy_http_request_size_bytes_count{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="256"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1 caddy_http_request_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1 caddy_http_request_size_bytes_sum{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN"} 23 caddy_http_request_size_bytes_count{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="256"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1 caddy_http_request_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1 caddy_http_request_size_bytes_sum{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 23 caddy_http_request_size_bytes_count{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 1 # HELP caddy_http_response_size_bytes Size of the returned response. # TYPE caddy_http_response_size_bytes histogram caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="256"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1 caddy_http_response_size_bytes_sum{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN"} 12 caddy_http_response_size_bytes_count{code="200",handler="bar",host="example.com",method="GET",server="UNKNOWN"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="256"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1 caddy_http_response_size_bytes_bucket{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1 caddy_http_response_size_bytes_sum{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN"} 0 caddy_http_response_size_bytes_count{code="200",handler="empty",host="example.com",method="GET",server="UNKNOWN"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="256"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1024"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="4096"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="16384"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="65536"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="262144"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="1.048576e+06"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="4.194304e+06"} 1 caddy_http_response_size_bytes_bucket{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN",le="+Inf"} 1 caddy_http_response_size_bytes_sum{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 0 caddy_http_response_size_bytes_count{code="429",handler="foo",host="example.com",method="GET",server="UNKNOWN"} 1 # HELP caddy_http_request_errors_total Number of requests resulting in middleware errors. # TYPE caddy_http_request_errors_total counter caddy_http_request_errors_total{handler="bar",host="example.com",server="UNKNOWN"} 1 caddy_http_request_errors_total{handler="foo",host="example.com",server="UNKNOWN"} 1 ` if err := testutil.GatherAndCompare(ctx.GetMetricsRegistry(), strings.NewReader(expected), "caddy_http_request_size_bytes", "caddy_http_response_size_bytes", // caddy_http_request_duration_seconds_sum will vary based on how long the test took to run, // so we check just the _bucket and _count metrics "caddy_http_request_duration_seconds_bucket", "caddy_http_request_duration_seconds_count", "caddy_http_request_errors_total", ); err != nil { t.Errorf("received unexpected error: %s", err) } } func TestMetricsCardinalityProtection(t *testing.T) { ctx, _ := caddy.NewContext(caddy.Context{Context: context.Background()}) // Test 1: Without AllowCatchAllHosts, arbitrary hosts should be mapped to "_other" metrics := &Metrics{ PerHost: true, ObserveCatchallHosts: false, // Default - should map unknown hosts to "_other" init: sync.Once{}, httpMetrics: &httpMetrics{}, allowedHosts: make(map[string]struct{}), } // Add one allowed host metrics.allowedHosts["allowed.com"] = struct{}{} h := HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { w.Write([]byte("hello")) return nil }) ih := newMetricsInstrumentedRoute(ctx, "test", h, metrics) // Test request to allowed host r1 := httptest.NewRequest("GET", "http://allowed.com/", nil) r1.Host = "allowed.com" w1 := httptest.NewRecorder() ih.ServeHTTP(w1, r1) // Test request to unknown host (should be mapped to "_other") r2 := httptest.NewRequest("GET", "http://attacker.com/", nil) r2.Host = "attacker.com" w2 := httptest.NewRecorder() ih.ServeHTTP(w2, r2) // Test request to another unknown host (should also be mapped to "_other") r3 := httptest.NewRequest("GET", "http://evil.com/", nil) r3.Host = "evil.com" w3 := httptest.NewRecorder() ih.ServeHTTP(w3, r3) // Check that metrics contain: // - One entry for "allowed.com" // - One entry for "_other" (aggregating attacker.com and evil.com) expected := ` # HELP caddy_http_requests_total Counter of HTTP(S) requests made. # TYPE caddy_http_requests_total counter caddy_http_requests_total{handler="test",host="_other",server="UNKNOWN"} 2 caddy_http_requests_total{handler="test",host="allowed.com",server="UNKNOWN"} 1 ` if err := testutil.GatherAndCompare(ctx.GetMetricsRegistry(), strings.NewReader(expected), "caddy_http_requests_total", ); err != nil { t.Errorf("Cardinality protection test failed: %s", err) } } func TestMetricsHTTPSCatchAll(t *testing.T) { ctx, _ := caddy.NewContext(caddy.Context{Context: context.Background()}) // Test that HTTPS requests allow catch-all even when AllowCatchAllHosts is false metrics := &Metrics{ PerHost: true, ObserveCatchallHosts: false, hasHTTPSServer: true, // Simulate having HTTPS servers init: sync.Once{}, httpMetrics: &httpMetrics{}, allowedHosts: make(map[string]struct{}), // Empty - no explicitly allowed hosts } h := HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { w.Write([]byte("hello")) return nil }) ih := newMetricsInstrumentedRoute(ctx, "test", h, metrics) // Test HTTPS request (should be allowed even though not in allowedHosts) r1 := httptest.NewRequest("GET", "https://unknown.com/", nil) r1.Host = "unknown.com" r1.TLS = &tls.ConnectionState{} // Mark as TLS/HTTPS w1 := httptest.NewRecorder() ih.ServeHTTP(w1, r1) // Test HTTP request (should be mapped to "_other") r2 := httptest.NewRequest("GET", "http://unknown.com/", nil) r2.Host = "unknown.com" // No TLS field = HTTP request w2 := httptest.NewRecorder() ih.ServeHTTP(w2, r2) // Check that HTTPS request gets real host, HTTP gets "_other" expected := ` # HELP caddy_http_requests_total Counter of HTTP(S) requests made. # TYPE caddy_http_requests_total counter caddy_http_requests_total{handler="test",host="_other",server="UNKNOWN"} 1 caddy_http_requests_total{handler="test",host="unknown.com",server="UNKNOWN"} 1 ` if err := testutil.GatherAndCompare(ctx.GetMetricsRegistry(), strings.NewReader(expected), "caddy_http_requests_total", ); err != nil { t.Errorf("HTTPS catch-all test failed: %s", err) } } func TestMetricsInstrumentedRoute(t *testing.T) { ctx, _ := caddy.NewContext(caddy.Context{Context: context.Background()}) m := &Metrics{ init: sync.Once{}, httpMetrics: &httpMetrics{}, } handlerErr := errors.New("oh noes") response := []byte("hello world!") innerHandler := HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { if actual := testutil.ToFloat64(m.httpMetrics.requestInFlight); actual != 1.0 { t.Errorf("Expected requestInFlight to be 1.0, got %v", actual) } if handlerErr == nil { w.Write(response) } return handlerErr }) ih := newMetricsInstrumentedRoute(ctx, "test_handler", innerHandler, m) r := httptest.NewRequest("GET", "/", nil) w := httptest.NewRecorder() // Test with error if actual := ih.ServeHTTP(w, r); actual != handlerErr { t.Errorf("Expected error %v, got %v", handlerErr, actual) } if actual := testutil.ToFloat64(m.httpMetrics.requestInFlight); actual != 0.0 { t.Errorf("Expected requestInFlight to be 0.0 after request, got %v", actual) } if actual := testutil.ToFloat64(m.httpMetrics.requestErrors); actual != 1.0 { t.Errorf("Expected requestErrors to be 1.0, got %v", actual) } // Test without error handlerErr = nil w = httptest.NewRecorder() if err := ih.ServeHTTP(w, r); err != nil { t.Errorf("Unexpected error: %v", err) } } func BenchmarkMetricsInstrumentedRoute(b *testing.B) { ctx, _ := caddy.NewContext(caddy.Context{Context: context.Background()}) m := &Metrics{ init: sync.Once{}, httpMetrics: &httpMetrics{}, } noopHandler := HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { w.Write([]byte("ok")) return nil }) ih := newMetricsInstrumentedRoute(ctx, "bench_handler", noopHandler, m) r := httptest.NewRequest("GET", "/", nil) w := httptest.NewRecorder() b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { ih.ServeHTTP(w, r) } } // BenchmarkSingleRouteMetrics simulates the new behavior where metrics // are collected once for the entire route. func BenchmarkSingleRouteMetrics(b *testing.B) { ctx, _ := caddy.NewContext(caddy.Context{Context: context.Background()}) m := &Metrics{ init: sync.Once{}, httpMetrics: &httpMetrics{}, } // Build a chain of 5 plain middleware handlers (no per-handler metrics) var next Handler = HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { return nil }) for i := 0; i < 5; i++ { capturedNext := next next = HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { return capturedNext.ServeHTTP(w, r) }) } // Wrap the entire chain with a single route-level metrics handler ih := newMetricsInstrumentedRoute(ctx, "handler", next, m) r := httptest.NewRequest("GET", "/", nil) w := httptest.NewRecorder() b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { ih.ServeHTTP(w, r) } } ================================================ FILE: modules/caddyhttp/proxyprotocol/listenerwrapper.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package proxyprotocol import ( "net" "net/netip" "time" goproxy "github.com/pires/go-proxyproto" "github.com/caddyserver/caddy/v2" ) // ListenerWrapper provides PROXY protocol support to Caddy by implementing // the caddy.ListenerWrapper interface. If a connection is received via Unix // socket, it's trusted. Otherwise, it's checked against the Allow/Deny lists, // then it's handled by the FallbackPolicy. // // It must be loaded before the `tls` listener because the PROXY protocol // encapsulates the TLS data. // // Credit goes to https://github.com/mastercactapus/caddy2-proxyprotocol for having // initially implemented this as a plugin. type ListenerWrapper struct { // Timeout specifies an optional maximum time for // the PROXY header to be received. // If zero, timeout is disabled. Default is 5s. Timeout caddy.Duration `json:"timeout,omitempty"` // Allow is an optional list of CIDR ranges to // allow/require PROXY headers from. Allow []string `json:"allow,omitempty"` allow []netip.Prefix // Deny is an optional list of CIDR ranges to // deny PROXY headers from. Deny []string `json:"deny,omitempty"` deny []netip.Prefix // FallbackPolicy specifies the policy to use if the downstream // IP address is not in the Allow list nor is in the Deny list. // // NOTE: The generated docs which describe the value of this // field is wrong because of how this type unmarshals JSON in a // custom way. The field expects a string, not a number. // // Accepted values are: IGNORE, USE, REJECT, REQUIRE, SKIP // // - IGNORE: address from PROXY header, but accept connection // // - USE: address from PROXY header // // - REJECT: connection when PROXY header is sent // Note: even though the first read on the connection returns an error if // a PROXY header is present, subsequent reads do not. It is the task of // the code using the connection to handle that case properly. // // - REQUIRE: connection to send PROXY header, reject if not present // Note: even though the first read on the connection returns an error if // a PROXY header is not present, subsequent reads do not. It is the task // of the code using the connection to handle that case properly. // // - SKIP: accepts a connection without requiring the PROXY header. // Note: an example usage can be found in the SkipProxyHeaderForCIDR // function. // // Default: IGNORE // // Policy definitions are here: https://pkg.go.dev/github.com/pires/go-proxyproto@v0.7.0#Policy FallbackPolicy Policy `json:"fallback_policy,omitempty"` policy goproxy.ConnPolicyFunc } // Provision sets up the listener wrapper. func (pp *ListenerWrapper) Provision(ctx caddy.Context) error { for _, cidr := range pp.Allow { ipnet, err := netip.ParsePrefix(cidr) if err != nil { return err } pp.allow = append(pp.allow, ipnet) } for _, cidr := range pp.Deny { ipnet, err := netip.ParsePrefix(cidr) if err != nil { return err } pp.deny = append(pp.deny, ipnet) } pp.policy = func(options goproxy.ConnPolicyOptions) (goproxy.Policy, error) { // trust unix sockets if network := options.Upstream.Network(); caddy.IsUnixNetwork(network) || caddy.IsFdNetwork(network) { return goproxy.USE, nil } ret := pp.FallbackPolicy host, _, err := net.SplitHostPort(options.Upstream.String()) if err != nil { return goproxy.REJECT, err } ip, err := netip.ParseAddr(host) if err != nil { return goproxy.REJECT, err } for _, ipnet := range pp.deny { if ipnet.Contains(ip) { return goproxy.REJECT, nil } } for _, ipnet := range pp.allow { if ipnet.Contains(ip) { ret = PolicyUSE break } } return policyToGoProxyPolicy[ret], nil } return nil } // WrapListener adds PROXY protocol support to the listener. func (pp *ListenerWrapper) WrapListener(l net.Listener) net.Listener { pl := &goproxy.Listener{ Listener: l, ReadHeaderTimeout: time.Duration(pp.Timeout), } pl.ConnPolicy = pp.policy return pl } ================================================ FILE: modules/caddyhttp/proxyprotocol/module.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package proxyprotocol import ( "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(ListenerWrapper{}) } func (ListenerWrapper) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.listeners.proxy_protocol", New: func() caddy.Module { return new(ListenerWrapper) }, } } // UnmarshalCaddyfile sets up the listener Listenerwrapper from Caddyfile tokens. Syntax: // // proxy_protocol { // timeout // allow // deny // fallback_policy // } func (w *ListenerWrapper) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume wrapper name // No same-line options are supported if d.NextArg() { return d.ArgErr() } for d.NextBlock(0) { switch d.Val() { case "timeout": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("parsing proxy_protocol timeout duration: %v", err) } w.Timeout = caddy.Duration(dur) case "allow": w.Allow = append(w.Allow, d.RemainingArgs()...) case "deny": w.Deny = append(w.Deny, d.RemainingArgs()...) case "fallback_policy": if !d.NextArg() { return d.ArgErr() } p, err := parsePolicy(d.Val()) if err != nil { return d.WrapErr(err) } w.FallbackPolicy = p default: return d.ArgErr() } } return nil } // Interface guards var ( _ caddy.Provisioner = (*ListenerWrapper)(nil) _ caddy.Module = (*ListenerWrapper)(nil) _ caddy.ListenerWrapper = (*ListenerWrapper)(nil) _ caddyfile.Unmarshaler = (*ListenerWrapper)(nil) ) ================================================ FILE: modules/caddyhttp/proxyprotocol/policy.go ================================================ package proxyprotocol import ( "errors" "fmt" "strings" goproxy "github.com/pires/go-proxyproto" ) type Policy int // as defined in: https://pkg.go.dev/github.com/pires/go-proxyproto@v0.7.0#Policy const ( // IGNORE address from PROXY header, but accept connection PolicyIGNORE Policy = iota // USE address from PROXY header PolicyUSE // REJECT connection when PROXY header is sent // Note: even though the first read on the connection returns an error if // a PROXY header is present, subsequent reads do not. It is the task of // the code using the connection to handle that case properly. PolicyREJECT // REQUIRE connection to send PROXY header, reject if not present // Note: even though the first read on the connection returns an error if // a PROXY header is not present, subsequent reads do not. It is the task // of the code using the connection to handle that case properly. PolicyREQUIRE // SKIP accepts a connection without requiring the PROXY header // Note: an example usage can be found in the SkipProxyHeaderForCIDR // function. PolicySKIP ) var policyToGoProxyPolicy = map[Policy]goproxy.Policy{ PolicyUSE: goproxy.USE, PolicyIGNORE: goproxy.IGNORE, PolicyREJECT: goproxy.REJECT, PolicyREQUIRE: goproxy.REQUIRE, PolicySKIP: goproxy.SKIP, } var policyMap = map[Policy]string{ PolicyUSE: "USE", PolicyIGNORE: "IGNORE", PolicyREJECT: "REJECT", PolicyREQUIRE: "REQUIRE", PolicySKIP: "SKIP", } var policyMapRev = map[string]Policy{ "USE": PolicyUSE, "IGNORE": PolicyIGNORE, "REJECT": PolicyREJECT, "REQUIRE": PolicyREQUIRE, "SKIP": PolicySKIP, } // MarshalText implements the text marshaller method. func (x Policy) MarshalText() ([]byte, error) { return []byte(policyMap[x]), nil } // UnmarshalText implements the text unmarshaller method. func (x *Policy) UnmarshalText(text []byte) error { name := string(text) tmp, err := parsePolicy(name) if err != nil { return err } *x = tmp return nil } func parsePolicy(name string) (Policy, error) { if x, ok := policyMapRev[strings.ToUpper(name)]; ok { return x, nil } return Policy(0), fmt.Errorf("%s is %w", name, errInvalidPolicy) } var errInvalidPolicy = errors.New("invalid policy") ================================================ FILE: modules/caddyhttp/push/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package push import ( "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers" ) func init() { httpcaddyfile.RegisterHandlerDirective("push", parseCaddyfile) } // parseCaddyfile sets up the push handler. Syntax: // // push [] [] { // [GET|HEAD] // headers { // [+] [ []] // - // } // } // // A single resource can be specified inline without opening a // block for the most common/simple case. Or, a block can be // opened and multiple resources can be specified, one per // line, optionally preceded by the method. The headers // subdirective can be used to customize the headers that // are set on each (synthetic) push request, using the same // syntax as the 'header' directive for request headers. // Placeholders are accepted in resource and header field // name and value and replacement tokens. func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { h.Next() // consume directive name handler := new(Handler) // inline resources if h.NextArg() { handler.Resources = append(handler.Resources, Resource{Target: h.Val()}) } // optional block for h.NextBlock(0) { switch h.Val() { case "headers": if h.NextArg() { return nil, h.ArgErr() } for nesting := h.Nesting(); h.NextBlock(nesting); { var err error // include current token, which we treat as an argument here // nolint:prealloc args := []string{h.Val()} args = append(args, h.RemainingArgs()...) if handler.Headers == nil { handler.Headers = new(HeaderConfig) } switch len(args) { case 1: err = headers.CaddyfileHeaderOp(&handler.Headers.HeaderOps, args[0], "", nil) case 2: err = headers.CaddyfileHeaderOp(&handler.Headers.HeaderOps, args[0], args[1], nil) case 3: err = headers.CaddyfileHeaderOp(&handler.Headers.HeaderOps, args[0], args[1], &args[2]) default: return nil, h.ArgErr() } if err != nil { return nil, h.Err(err.Error()) } } case "GET", "HEAD": method := h.Val() if !h.NextArg() { return nil, h.ArgErr() } target := h.Val() handler.Resources = append(handler.Resources, Resource{ Method: method, Target: target, }) default: handler.Resources = append(handler.Resources, Resource{Target: h.Val()}) } } return handler, nil } ================================================ FILE: modules/caddyhttp/push/handler.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package push import ( "fmt" "net/http" "strings" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers" ) func init() { caddy.RegisterModule(Handler{}) } // Handler is a middleware for HTTP/2 server push. Note that // HTTP/2 server push has been deprecated by some clients and // its use is discouraged unless you can accurately predict // which resources actually need to be pushed to the client; // it can be difficult to know what the client already has // cached. Pushing unnecessary resources results in worse // performance. Consider using HTTP 103 Early Hints instead. // // This handler supports pushing from Link headers; in other // words, if the eventual response has Link headers, this // handler will push the resources indicated by those headers, // even without specifying any resources in its config. type Handler struct { // The resources to push. Resources []Resource `json:"resources,omitempty"` // Headers to modify for the push requests. Headers *HeaderConfig `json:"headers,omitempty"` logger *zap.Logger } // CaddyModule returns the Caddy module information. func (Handler) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.push", New: func() caddy.Module { return new(Handler) }, } } // Provision sets up h. func (h *Handler) Provision(ctx caddy.Context) error { h.logger = ctx.Logger() if h.Headers != nil { err := h.Headers.Provision(ctx) if err != nil { return fmt.Errorf("provisioning header operations: %v", err) } } return nil } func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { pusher, ok := w.(http.Pusher) if !ok { return next.ServeHTTP(w, r) } // short-circuit recursive pushes if _, ok := r.Header[pushHeader]; ok { return next.ServeHTTP(w, r) } repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) server := r.Context().Value(caddyhttp.ServerCtxKey).(*caddyhttp.Server) shouldLogCredentials := server.Logs != nil && server.Logs.ShouldLogCredentials // create header for push requests hdr := h.initializePushHeaders(r, repl) // push first! for _, resource := range h.Resources { if c := h.logger.Check(zapcore.DebugLevel, "pushing resource"); c != nil { c.Write( zap.String("uri", r.RequestURI), zap.String("push_method", resource.Method), zap.String("push_target", resource.Target), zap.Object("push_headers", caddyhttp.LoggableHTTPHeader{ Header: hdr, ShouldLogCredentials: shouldLogCredentials, }), ) } err := pusher.Push(repl.ReplaceAll(resource.Target, "."), &http.PushOptions{ Method: resource.Method, Header: hdr, }) if err != nil { // usually this means either that push is not // supported or concurrent streams are full break } } // wrap the response writer so that we can initiate push of any resources // described in Link header fields before the response is written lp := linkPusher{ ResponseWriterWrapper: &caddyhttp.ResponseWriterWrapper{ResponseWriter: w}, handler: h, pusher: pusher, header: hdr, request: r, } // serve only after pushing! if err := next.ServeHTTP(lp, r); err != nil { return err } return nil } func (h Handler) initializePushHeaders(r *http.Request, repl *caddy.Replacer) http.Header { hdr := make(http.Header) // prevent recursive pushes hdr.Set(pushHeader, "1") // set initial header fields; since exactly how headers should // be implemented for server push is not well-understood, we // are being conservative for now like httpd is: // https://httpd.apache.org/docs/2.4/en/howto/http2.html#push // we only copy some well-known, safe headers that are likely // crucial when requesting certain kinds of content for _, fieldName := range safeHeaders { if vals, ok := r.Header[fieldName]; ok { hdr[fieldName] = vals } } // user can customize the push request headers if h.Headers != nil { h.Headers.ApplyTo(hdr, repl) } return hdr } // servePreloadLinks parses Link headers from upstream and pushes // resources described by them. If a resource has the "nopush" // attribute or describes an external entity (meaning, the resource // URI includes a scheme), it will not be pushed. func (h Handler) servePreloadLinks(pusher http.Pusher, hdr http.Header, resources []string) { for _, resource := range resources { for _, resource := range parseLinkHeader(resource) { if _, ok := resource.params["nopush"]; ok { continue } if isRemoteResource(resource.uri) { continue } err := pusher.Push(resource.uri, &http.PushOptions{ Header: hdr, }) if err != nil { return } } } } // Resource represents a request for a resource to push. type Resource struct { // Method is the request method, which must be GET or HEAD. // Default is GET. Method string `json:"method,omitempty"` // Target is the path to the resource being pushed. Target string `json:"target,omitempty"` } // HeaderConfig configures headers for synthetic push requests. type HeaderConfig struct { headers.HeaderOps } // linkPusher is a http.ResponseWriter that intercepts // the WriteHeader() call to ensure that any resources // described by Link response headers get pushed before // the response is allowed to be written. type linkPusher struct { *caddyhttp.ResponseWriterWrapper handler Handler pusher http.Pusher header http.Header request *http.Request } func (lp linkPusher) WriteHeader(statusCode int) { if links, ok := lp.ResponseWriter.Header()["Link"]; ok { // only initiate these pushes if it hasn't been done yet if val := caddyhttp.GetVar(lp.request.Context(), pushedLink); val == nil { if c := lp.handler.logger.Check(zapcore.DebugLevel, "pushing Link resources"); c != nil { c.Write(zap.Strings("linked", links)) } caddyhttp.SetVar(lp.request.Context(), pushedLink, true) lp.handler.servePreloadLinks(lp.pusher, lp.header, links) } } lp.ResponseWriter.WriteHeader(statusCode) } // isRemoteResource returns true if resource starts with // a scheme or is a protocol-relative URI. func isRemoteResource(resource string) bool { return strings.HasPrefix(resource, "//") || strings.HasPrefix(resource, "http://") || strings.HasPrefix(resource, "https://") } // safeHeaders is a list of header fields that are // safe to copy to push requests implicitly. It is // assumed that requests for certain kinds of content // would fail without these fields present. var safeHeaders = []string{ "Accept-Encoding", "Accept-Language", "Accept", "Cache-Control", "User-Agent", } // pushHeader is a header field that gets added to push requests // in order to avoid recursive/infinite pushes. const pushHeader = "Caddy-Push" // pushedLink is the key for the variable on the request // context that we use to remember whether we have already // pushed resources from Link headers yet; otherwise, if // multiple push handlers are invoked, it would repeat the // pushing of Link headers. const pushedLink = "http.handlers.push.pushed_link" // Interface guards var ( _ caddy.Provisioner = (*Handler)(nil) _ caddyhttp.MiddlewareHandler = (*Handler)(nil) _ http.ResponseWriter = (*linkPusher)(nil) _ http.Pusher = (*linkPusher)(nil) ) ================================================ FILE: modules/caddyhttp/push/link.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package push import ( "strings" ) // linkResource contains the results of a parsed Link header. type linkResource struct { uri string params map[string]string } // parseLinkHeader is responsible for parsing Link header // and returning list of found resources. // // Accepted formats are: // // Link: ; as=script // Link: ; as=script,; as=style // Link: ; // // where begins with a forward slash (/). func parseLinkHeader(header string) []linkResource { resources := []linkResource{} if header == "" { return resources } for link := range strings.SplitSeq(header, comma) { l := linkResource{params: make(map[string]string)} li, ri := strings.Index(link, "<"), strings.Index(link, ">") if li == -1 || ri == -1 { continue } l.uri = strings.TrimSpace(link[li+1 : ri]) for param := range strings.SplitSeq(strings.TrimSpace(link[ri+1:]), semicolon) { before, after, isCut := strings.Cut(strings.TrimSpace(param), equal) key := strings.TrimSpace(before) if key == "" { continue } if isCut { l.params[key] = strings.TrimSpace(after) } else { l.params[key] = key } } resources = append(resources, l) } return resources } const ( comma = "," semicolon = ";" equal = "=" ) ================================================ FILE: modules/caddyhttp/push/link_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package push import ( "reflect" "testing" ) func TestParseLinkHeader(t *testing.T) { testCases := []struct { header string expectedResources []linkResource }{ { header: "; as=script", expectedResources: []linkResource{{uri: "/resource", params: map[string]string{"as": "script"}}}, }, { header: "", expectedResources: []linkResource{{uri: "/resource", params: map[string]string{}}}, }, { header: "; nopush", expectedResources: []linkResource{{uri: "/resource", params: map[string]string{"nopush": "nopush"}}}, }, { header: ";nopush;rel=next", expectedResources: []linkResource{{uri: "/resource", params: map[string]string{"nopush": "nopush", "rel": "next"}}}, }, { header: ";nopush;rel=next,;nopush", expectedResources: []linkResource{ {uri: "/resource", params: map[string]string{"nopush": "nopush", "rel": "next"}}, {uri: "/resource2", params: map[string]string{"nopush": "nopush"}}, }, }, { header: ",", expectedResources: []linkResource{ {uri: "/resource", params: map[string]string{}}, {uri: "/resource2", params: map[string]string{}}, }, }, { header: "malformed", expectedResources: []linkResource{}, }, { header: " ; ", expectedResources: []linkResource{{uri: "/resource", params: map[string]string{}}}, }, } for i, test := range testCases { actualResources := parseLinkHeader(test.header) if !reflect.DeepEqual(actualResources, test.expectedResources) { t.Errorf("Test %d (header: %s) - expected resources %v, got %v", i, test.header, test.expectedResources, actualResources) } } } ================================================ FILE: modules/caddyhttp/replacer.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "bytes" "context" "crypto/ecdsa" "crypto/ed25519" "crypto/rsa" "crypto/sha256" "crypto/tls" "crypto/x509" "encoding/asn1" "encoding/base64" "encoding/pem" "fmt" "io" "net" "net/http" "net/netip" "net/textproto" "net/url" "path" "strconv" "strings" "time" "github.com/google/uuid" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddytls" ) // NewTestReplacer creates a replacer for an http.Request // for use in tests that are not in this package func NewTestReplacer(req *http.Request) *caddy.Replacer { repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) *req = *req.WithContext(ctx) addHTTPVarsToReplacer(repl, req, nil) return repl } func addHTTPVarsToReplacer(repl *caddy.Replacer, req *http.Request, w http.ResponseWriter) { SetVar(req.Context(), "start_time", time.Now()) SetVar(req.Context(), "uuid", new(requestID)) httpVars := func(key string) (any, bool) { if req != nil { // query string parameters if strings.HasPrefix(key, reqURIQueryReplPrefix) { vals := req.URL.Query()[key[len(reqURIQueryReplPrefix):]] // always return true, since the query param might // be present only in some requests return strings.Join(vals, ","), true } // request header fields if strings.HasPrefix(key, reqHeaderReplPrefix) { field := key[len(reqHeaderReplPrefix):] vals := req.Header[textproto.CanonicalMIMEHeaderKey(field)] // always return true, since the header field might // be present only in some requests return strings.Join(vals, ","), true } // cookies if strings.HasPrefix(key, reqCookieReplPrefix) { name := key[len(reqCookieReplPrefix):] for _, cookie := range req.Cookies() { if strings.EqualFold(name, cookie.Name) { // always return true, since the cookie might // be present only in some requests return cookie.Value, true } } } // http.request.tls.* if strings.HasPrefix(key, reqTLSReplPrefix) { return getReqTLSReplacement(req, key) } switch key { case "http.request.method": return req.Method, true case "http.request.scheme": if req.TLS != nil { return "https", true } return "http", true case "http.request.proto": return req.Proto, true case "http.request.host": host, _, err := net.SplitHostPort(req.Host) if err != nil { return req.Host, true // OK; there probably was no port } return host, true case "http.request.port": _, port, _ := net.SplitHostPort(req.Host) if portNum, err := strconv.Atoi(port); err == nil { return portNum, true } return port, true case "http.request.hostport": return req.Host, true case "http.request.local": localAddr, _ := req.Context().Value(http.LocalAddrContextKey).(net.Addr) return localAddr.String(), true case "http.request.local.host": localAddr, _ := req.Context().Value(http.LocalAddrContextKey).(net.Addr) host, _, err := net.SplitHostPort(localAddr.String()) if err != nil { // localAddr is host:port for tcp and udp sockets and /unix/socket.path // for unix sockets. net.SplitHostPort only operates on tcp and udp sockets, // not unix sockets and will fail with the latter. // We assume when net.SplitHostPort fails, localAddr is a unix socket and thus // already "split" and save to return. return localAddr, true } return host, true case "http.request.local.port": localAddr, _ := req.Context().Value(http.LocalAddrContextKey).(net.Addr) _, port, _ := net.SplitHostPort(localAddr.String()) if portNum, err := strconv.Atoi(port); err == nil { return portNum, true } return port, true case "http.request.remote": if req.TLS != nil && !req.TLS.HandshakeComplete { // without a complete handshake (QUIC "early data") we can't trust the remote IP address to not be spoofed return nil, true } return req.RemoteAddr, true case "http.request.remote.host": if req.TLS != nil && !req.TLS.HandshakeComplete { // without a complete handshake (QUIC "early data") we can't trust the remote IP address to not be spoofed return nil, true } host, _, err := net.SplitHostPort(req.RemoteAddr) if err != nil { // req.RemoteAddr is host:port for tcp and udp sockets and /unix/socket.path // for unix sockets. net.SplitHostPort only operates on tcp and udp sockets, // not unix sockets and will fail with the latter. // We assume when net.SplitHostPort fails, req.RemoteAddr is a unix socket // and thus already "split" and save to return. return req.RemoteAddr, true } return host, true case "http.request.remote.port": _, port, _ := net.SplitHostPort(req.RemoteAddr) if portNum, err := strconv.Atoi(port); err == nil { return portNum, true } return port, true // current URI, including any internal rewrites case "http.request.uri": return req.URL.RequestURI(), true case "http.request.uri_escaped": return url.QueryEscape(req.URL.RequestURI()), true case "http.request.uri.path": return req.URL.Path, true case "http.request.uri.path_escaped": return url.QueryEscape(req.URL.Path), true case "http.request.uri.path.file": _, file := path.Split(req.URL.Path) return file, true case "http.request.uri.path.dir": dir, _ := path.Split(req.URL.Path) return dir, true case "http.request.uri.path.file.base": return strings.TrimSuffix(path.Base(req.URL.Path), path.Ext(req.URL.Path)), true case "http.request.uri.path.file.ext": return path.Ext(req.URL.Path), true case "http.request.uri.query": return req.URL.RawQuery, true case "http.request.uri.query_escaped": return url.QueryEscape(req.URL.RawQuery), true case "http.request.uri.prefixed_query": if req.URL.RawQuery == "" { return "", true } return "?" + req.URL.RawQuery, true case "http.request.duration": start := GetVar(req.Context(), "start_time").(time.Time) return time.Since(start), true case "http.request.duration_ms": start := GetVar(req.Context(), "start_time").(time.Time) return time.Since(start).Seconds() * 1e3, true // multiply seconds to preserve decimal (see #4666) case "http.request.uuid": // fetch the UUID for this request id := GetVar(req.Context(), "uuid").(*requestID) // set it to this request's access log extra := req.Context().Value(ExtraLogFieldsCtxKey).(*ExtraLogFields) extra.Set(zap.String("uuid", id.String())) return id.String(), true case "http.request.body": if req.Body == nil { return "", true } // normally net/http will close the body for us, but since we // are replacing it with a fake one, we have to ensure we close // the real body ourselves when we're done defer req.Body.Close() // read the request body into a buffer (can't pool because we // don't know its lifetime and would have to make a copy anyway) buf := new(bytes.Buffer) _, _ = io.Copy(buf, req.Body) // can't handle error, so just ignore it req.Body = io.NopCloser(buf) // replace real body with buffered data return buf.String(), true case "http.request.body_base64": if req.Body == nil { return "", true } // normally net/http will close the body for us, but since we // are replacing it with a fake one, we have to ensure we close // the real body ourselves when we're done defer req.Body.Close() // read the request body into a buffer (can't pool because we // don't know its lifetime and would have to make a copy anyway) buf := new(bytes.Buffer) _, _ = io.Copy(buf, req.Body) // can't handle error, so just ignore it req.Body = io.NopCloser(buf) // replace real body with buffered data return base64.StdEncoding.EncodeToString(buf.Bytes()), true // original request, before any internal changes case "http.request.orig_method": or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request) return or.Method, true case "http.request.orig_uri": or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request) return or.RequestURI, true case "http.request.orig_uri.path": or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request) return or.URL.Path, true case "http.request.orig_uri.path.file": or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request) _, file := path.Split(or.URL.Path) return file, true case "http.request.orig_uri.path.dir": or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request) dir, _ := path.Split(or.URL.Path) return dir, true case "http.request.orig_uri.query": or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request) return or.URL.RawQuery, true case "http.request.orig_uri.prefixed_query": or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request) if or.URL.RawQuery == "" { return "", true } return "?" + or.URL.RawQuery, true } // remote IP range/prefix (e.g. keep top 24 bits of 1.2.3.4 => "1.2.3.0/24") // syntax: "/V4,V6" where V4 = IPv4 bits, and V6 = IPv6 bits; if no comma, then same bit length used for both // (EXPERIMENTAL) if strings.HasPrefix(key, "http.request.remote.host/") { host, _, err := net.SplitHostPort(req.RemoteAddr) if err != nil { host = req.RemoteAddr // assume no port, I guess? } addr, err := netip.ParseAddr(host) if err != nil { return host, true // not an IP address } // extract the bits from the end of the placeholder (start after "/") then split on "," bitsBoth := key[strings.Index(key, "/")+1:] ipv4BitsStr, ipv6BitsStr, cutOK := strings.Cut(bitsBoth, ",") bitsStr := ipv4BitsStr if addr.Is6() && cutOK { bitsStr = ipv6BitsStr } // convert to integer then compute prefix bits, err := strconv.Atoi(bitsStr) if err != nil { return "", true } prefix, err := addr.Prefix(bits) if err != nil { return "", true } return prefix.String(), true } // hostname labels (case insensitive, so normalize to lowercase) if strings.HasPrefix(key, reqHostLabelsReplPrefix) { idxStr := key[len(reqHostLabelsReplPrefix):] idx, err := strconv.Atoi(idxStr) if err != nil || idx < 0 { return "", false } reqHost, _, err := net.SplitHostPort(req.Host) if err != nil { reqHost = req.Host // OK; assume there was no port } hostLabels := strings.Split(reqHost, ".") if idx >= len(hostLabels) { return "", true } return strings.ToLower(hostLabels[len(hostLabels)-idx-1]), true } // path parts if strings.HasPrefix(key, reqURIPathReplPrefix) { idxStr := key[len(reqURIPathReplPrefix):] idx, err := strconv.Atoi(idxStr) if err != nil { return "", false } pathParts := strings.Split(req.URL.Path, "/") if len(pathParts) > 0 && pathParts[0] == "" { pathParts = pathParts[1:] } if idx < 0 { return "", false } if idx >= len(pathParts) { return "", true } return pathParts[idx], true } // orig uri path parts if strings.HasPrefix(key, reqOrigURIPathReplPrefix) { idxStr := key[len(reqOrigURIPathReplPrefix):] idx, err := strconv.Atoi(idxStr) if err != nil { return "", false } or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request) pathParts := strings.Split(or.URL.Path, "/") if len(pathParts) > 0 && pathParts[0] == "" { pathParts = pathParts[1:] } if idx < 0 { return "", false } if idx >= len(pathParts) { return "", true } return pathParts[idx], true } // middleware variables if strings.HasPrefix(key, varsReplPrefix) { varName := key[len(varsReplPrefix):] raw := GetVar(req.Context(), varName) // variables can be dynamic, so always return true // even when it may not be set; treat as empty then return raw, true } } if w != nil { // response header fields if strings.HasPrefix(key, respHeaderReplPrefix) { field := key[len(respHeaderReplPrefix):] vals := w.Header()[textproto.CanonicalMIMEHeaderKey(field)] // always return true, since the header field might // be present only in some responses return strings.Join(vals, ","), true } } switch key { case "http.shutting_down": server := req.Context().Value(ServerCtxKey).(*Server) server.shutdownAtMu.RLock() defer server.shutdownAtMu.RUnlock() return !server.shutdownAt.IsZero(), true case "http.time_until_shutdown": server := req.Context().Value(ServerCtxKey).(*Server) server.shutdownAtMu.RLock() defer server.shutdownAtMu.RUnlock() if server.shutdownAt.IsZero() { return nil, true } return time.Until(server.shutdownAt), true } return nil, false } repl.Map(httpVars) } func getReqTLSReplacement(req *http.Request, key string) (any, bool) { if req == nil || req.TLS == nil { return nil, false } if len(key) < len(reqTLSReplPrefix) { return nil, false } field := strings.ToLower(key[len(reqTLSReplPrefix):]) if strings.HasPrefix(field, "client.") { cert := getTLSPeerCert(req.TLS) if cert == nil { // Instead of returning (nil, false) here, we set it to a dummy // value to fix #7530. This way, even if there is no client cert, // evaluating placeholders with ReplaceKnown() will still remove // the placeholder, which would be expected. It is not expected // for the placeholder to sometimes get removed based on whether // the client presented a cert. We also do not return true here // because we probably should remain accurate about whether a // placeholder is, in fact, known or not. // (This allocation may be slightly inefficient.) cert = new(x509.Certificate) } // subject alternate names (SANs) if strings.HasPrefix(field, "client.san.") { field = field[len("client.san."):] var fieldName string var fieldValue any switch { case strings.HasPrefix(field, "dns_names"): fieldName = "dns_names" fieldValue = cert.DNSNames case strings.HasPrefix(field, "emails"): fieldName = "emails" fieldValue = cert.EmailAddresses case strings.HasPrefix(field, "ips"): fieldName = "ips" fieldValue = cert.IPAddresses case strings.HasPrefix(field, "uris"): fieldName = "uris" fieldValue = cert.URIs default: return nil, false } field = field[len(fieldName):] // if no index was specified, return the whole list if field == "" { return fieldValue, true } if len(field) < 2 || field[0] != '.' { return nil, false } field = field[1:] // trim '.' between field name and index // get the numeric index idx, err := strconv.Atoi(field) if err != nil || idx < 0 { return nil, false } // access the indexed element and return it switch v := fieldValue.(type) { case []string: if idx >= len(v) { return nil, true } return v[idx], true case []net.IP: if idx >= len(v) { return nil, true } return v[idx], true case []*url.URL: if idx >= len(v) { return nil, true } return v[idx], true } } switch field { case "client.fingerprint": return fmt.Sprintf("%x", sha256.Sum256(cert.Raw)), true case "client.public_key", "client.public_key_sha256": if cert.PublicKey == nil { return nil, true } pubKeyBytes, err := marshalPublicKey(cert.PublicKey) if err != nil { return nil, true } if strings.HasSuffix(field, "_sha256") { return fmt.Sprintf("%x", sha256.Sum256(pubKeyBytes)), true } return fmt.Sprintf("%x", pubKeyBytes), true case "client.issuer": return cert.Issuer, true case "client.serial": return cert.SerialNumber, true case "client.subject": return cert.Subject, true case "client.certificate_pem": block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw} return pem.EncodeToMemory(&block), true case "client.certificate_der_base64": return base64.StdEncoding.EncodeToString(cert.Raw), true default: return nil, false } } switch field { case "version": return caddytls.ProtocolName(req.TLS.Version), true case "cipher_suite": return tls.CipherSuiteName(req.TLS.CipherSuite), true case "resumed": return req.TLS.DidResume, true case "proto": return req.TLS.NegotiatedProtocol, true case "proto_mutual": // req.TLS.NegotiatedProtocolIsMutual is deprecated - it's always true. return true, true case "server_name": return req.TLS.ServerName, true case "ech": return req.TLS.ECHAccepted, true } return nil, false } // marshalPublicKey returns the byte encoding of pubKey. func marshalPublicKey(pubKey any) ([]byte, error) { switch key := pubKey.(type) { case *rsa.PublicKey: return asn1.Marshal(key) case *ecdsa.PublicKey: e, err := key.ECDH() if err != nil { return nil, err } return e.Bytes(), nil case ed25519.PublicKey: return key, nil } return nil, fmt.Errorf("unrecognized public key type: %T", pubKey) } // getTLSPeerCert retrieves the first peer certificate from a TLS session. // Returns nil if no peer cert is in use. func getTLSPeerCert(cs *tls.ConnectionState) *x509.Certificate { if len(cs.PeerCertificates) == 0 { return nil } return cs.PeerCertificates[0] } type requestID struct { value string } // Lazy generates UUID string or return cached value if present func (rid *requestID) String() string { if rid.value == "" { if id, err := uuid.NewRandom(); err == nil { rid.value = id.String() } } return rid.value } const ( reqCookieReplPrefix = "http.request.cookie." reqHeaderReplPrefix = "http.request.header." reqHostLabelsReplPrefix = "http.request.host.labels." reqTLSReplPrefix = "http.request.tls." reqURIPathReplPrefix = "http.request.uri.path." reqURIQueryReplPrefix = "http.request.uri.query." respHeaderReplPrefix = "http.response.header." varsReplPrefix = "http.vars." reqOrigURIPathReplPrefix = "http.request.orig_uri.path." ) ================================================ FILE: modules/caddyhttp/replacer_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "context" "crypto/tls" "crypto/x509" "encoding/pem" "net" "net/http" "net/http/httptest" "testing" "github.com/caddyserver/caddy/v2" ) func TestHTTPVarReplacement(t *testing.T) { req, _ := http.NewRequest(http.MethodGet, "/foo/bar.tar.gz?a=1&b=2", nil) repl := caddy.NewReplacer() localAddr, _ := net.ResolveTCPAddr("tcp", "192.168.159.1:80") ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) ctx = context.WithValue(ctx, http.LocalAddrContextKey, localAddr) req = req.WithContext(ctx) req.Host = "example.com:80" req.RemoteAddr = "192.168.159.32:1234" clientCert := []byte(`-----BEGIN CERTIFICATE----- MIIB9jCCAV+gAwIBAgIBAjANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1DYWRk eSBUZXN0IENBMB4XDTE4MDcyNDIxMzUwNVoXDTI4MDcyMTIxMzUwNVowHTEbMBkG A1UEAwwSY2xpZW50LmxvY2FsZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB iQKBgQDFDEpzF0ew68teT3xDzcUxVFaTII+jXH1ftHXxxP4BEYBU4q90qzeKFneF z83I0nC0WAQ45ZwHfhLMYHFzHPdxr6+jkvKPASf0J2v2HDJuTM1bHBbik5Ls5eq+ fVZDP8o/VHKSBKxNs8Goc2NTsr5b07QTIpkRStQK+RJALk4x9QIDAQABo0swSTAJ BgNVHRMEAjAAMAsGA1UdDwQEAwIHgDAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8A AAEwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADgYEANSjz2Sk+ eqp31wM9il1n+guTNyxJd+FzVAH+hCZE5K+tCgVDdVFUlDEHHbS/wqb2PSIoouLV 3Q9fgDkiUod+uIK0IynzIKvw+Cjg+3nx6NQ0IM0zo8c7v398RzB4apbXKZyeeqUH 9fNwfEi+OoXR6s+upSKobCmLGLGi9Na5s5g= -----END CERTIFICATE-----`) block, _ := pem.Decode(clientCert) if block == nil { t.Fatalf("failed to decode PEM certificate") } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { t.Fatalf("failed to decode PEM certificate: %v", err) } req.TLS = &tls.ConnectionState{ Version: tls.VersionTLS13, HandshakeComplete: true, ServerName: "example.com", CipherSuite: tls.TLS_AES_256_GCM_SHA384, PeerCertificates: []*x509.Certificate{cert}, NegotiatedProtocol: "h2", NegotiatedProtocolIsMutual: true, } res := httptest.NewRecorder() addHTTPVarsToReplacer(repl, req, res) for i, tc := range []struct { get string expect string }{ { get: "http.request.scheme", expect: "https", }, { get: "http.request.method", expect: http.MethodGet, }, { get: "http.request.host", expect: "example.com", }, { get: "http.request.port", expect: "80", }, { get: "http.request.hostport", expect: "example.com:80", }, { get: "http.request.local.host", expect: "192.168.159.1", }, { get: "http.request.local.port", expect: "80", }, { get: "http.request.local", expect: "192.168.159.1:80", }, { get: "http.request.remote.host", expect: "192.168.159.32", }, { get: "http.request.remote.host/24", expect: "192.168.159.0/24", }, { get: "http.request.remote.host/24,32", expect: "192.168.159.0/24", }, { get: "http.request.remote.host/999", expect: "", }, { get: "http.request.remote.port", expect: "1234", }, { get: "http.request.host.labels.0", expect: "com", }, { get: "http.request.host.labels.1", expect: "example", }, { get: "http.request.host.labels.2", expect: "", }, { get: "http.request.uri", expect: "/foo/bar.tar.gz?a=1&b=2", }, { get: "http.request.uri_escaped", expect: "%2Ffoo%2Fbar.tar.gz%3Fa%3D1%26b%3D2", }, { get: "http.request.uri.path", expect: "/foo/bar.tar.gz", }, { get: "http.request.uri.path_escaped", expect: "%2Ffoo%2Fbar.tar.gz", }, { get: "http.request.uri.path.file", expect: "bar.tar.gz", }, { get: "http.request.uri.path.file.base", expect: "bar.tar", }, { // not ideal, but also most correct, given that files can have dots (example: index..html) TODO: maybe this isn't right.. get: "http.request.uri.path.file.ext", expect: ".gz", }, { get: "http.request.uri.query", expect: "a=1&b=2", }, { get: "http.request.uri.query_escaped", expect: "a%3D1%26b%3D2", }, { get: "http.request.uri.query.a", expect: "1", }, { get: "http.request.uri.query.b", expect: "2", }, { get: "http.request.uri.prefixed_query", expect: "?a=1&b=2", }, { get: "http.request.tls.cipher_suite", expect: "TLS_AES_256_GCM_SHA384", }, { get: "http.request.tls.proto", expect: "h2", }, { get: "http.request.tls.proto_mutual", expect: "true", }, { get: "http.request.tls.resumed", expect: "false", }, { get: "http.request.tls.server_name", expect: "example.com", }, { get: "http.request.tls.version", expect: "tls1.3", }, { get: "http.request.tls.client.fingerprint", expect: "9f57b7b497cceacc5459b76ac1c3afedbc12b300e728071f55f84168ff0f7702", }, { get: "http.request.tls.client.issuer", expect: "CN=Caddy Test CA", }, { get: "http.request.tls.client.serial", expect: "2", }, { get: "http.request.tls.client.subject", expect: "CN=client.localdomain", }, { get: "http.request.tls.client.san.dns_names", expect: "[localhost]", }, { get: "http.request.tls.client.san.dns_names.0", expect: "localhost", }, { get: "http.request.tls.client.san.dns_names.1", expect: "", }, { get: "http.request.tls.client.san.ips", expect: "[127.0.0.1]", }, { get: "http.request.tls.client.san.ips.0", expect: "127.0.0.1", }, { get: "http.request.tls.client.certificate_pem", expect: string(clientCert) + "\n", // returned value comes with a newline appended to it }, } { actual, got := repl.GetString(tc.get) if !got { t.Errorf("Test %d: Expected to recognize the placeholder name, but didn't", i) } if actual != tc.expect { t.Errorf("Test %d: Expected %s to be '%s' but got '%s'", i, tc.get, tc.expect, actual) } } } ================================================ FILE: modules/caddyhttp/requestbody/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package requestbody import ( "time" "github.com/dustin/go-humanize" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { httpcaddyfile.RegisterHandlerDirective("request_body", parseCaddyfile) } func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { h.Next() // consume directive name rb := new(RequestBody) // configuration should be in a block for h.NextBlock(0) { switch h.Val() { case "max_size": var sizeStr string if !h.AllArgs(&sizeStr) { return nil, h.ArgErr() } size, err := humanize.ParseBytes(sizeStr) if err != nil { return nil, h.Errf("parsing max_size: %v", err) } rb.MaxSize = int64(size) case "read_timeout": var timeoutStr string if !h.AllArgs(&timeoutStr) { return nil, h.ArgErr() } timeout, err := time.ParseDuration(timeoutStr) if err != nil { return nil, h.Errf("parsing read_timeout: %v", err) } rb.ReadTimeout = timeout case "write_timeout": var timeoutStr string if !h.AllArgs(&timeoutStr) { return nil, h.ArgErr() } timeout, err := time.ParseDuration(timeoutStr) if err != nil { return nil, h.Errf("parsing write_timeout: %v", err) } rb.WriteTimeout = timeout case "set": var setStr string if !h.AllArgs(&setStr) { return nil, h.ArgErr() } rb.Set = setStr default: return nil, h.Errf("unrecognized request_body subdirective '%s'", h.Val()) } } return rb, nil } ================================================ FILE: modules/caddyhttp/requestbody/requestbody.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package requestbody import ( "errors" "io" "net/http" "strings" "time" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(RequestBody{}) } // RequestBody is a middleware for manipulating the request body. type RequestBody struct { // The maximum number of bytes to allow reading from the body by a later handler. // If more bytes are read, an error with HTTP status 413 is returned. MaxSize int64 `json:"max_size,omitempty"` // EXPERIMENTAL. Subject to change/removal. ReadTimeout time.Duration `json:"read_timeout,omitempty"` // EXPERIMENTAL. Subject to change/removal. WriteTimeout time.Duration `json:"write_timeout,omitempty"` // This field permit to replace body on the fly // EXPERIMENTAL. Subject to change/removal. Set string `json:"set,omitempty"` logger *zap.Logger } // CaddyModule returns the Caddy module information. func (RequestBody) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.request_body", New: func() caddy.Module { return new(RequestBody) }, } } func (rb *RequestBody) Provision(ctx caddy.Context) error { rb.logger = ctx.Logger() return nil } func (rb RequestBody) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { if rb.Set != "" { if r.Body != nil { err := r.Body.Close() if err != nil { return err } } repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) replacedBody := repl.ReplaceAll(rb.Set, "") r.Body = io.NopCloser(strings.NewReader(replacedBody)) r.ContentLength = int64(len(replacedBody)) } if r.Body == nil { return next.ServeHTTP(w, r) } if rb.MaxSize > 0 { r.Body = errorWrapper{http.MaxBytesReader(w, r.Body, rb.MaxSize)} } if rb.ReadTimeout > 0 || rb.WriteTimeout > 0 { //nolint:bodyclose rc := http.NewResponseController(w) if rb.ReadTimeout > 0 { if err := rc.SetReadDeadline(time.Now().Add(rb.ReadTimeout)); err != nil { if c := rb.logger.Check(zapcore.ErrorLevel, "could not set read deadline"); c != nil { c.Write(zap.Error(err)) } } } if rb.WriteTimeout > 0 { if err := rc.SetWriteDeadline(time.Now().Add(rb.WriteTimeout)); err != nil { if c := rb.logger.Check(zapcore.ErrorLevel, "could not set write deadline"); c != nil { c.Write(zap.Error(err)) } } } } return next.ServeHTTP(w, r) } // errorWrapper wraps errors that are returned from Read() // so that they can be associated with a proper status code. type errorWrapper struct { io.ReadCloser } func (ew errorWrapper) Read(p []byte) (n int, err error) { n, err = ew.ReadCloser.Read(p) var mbe *http.MaxBytesError if errors.As(err, &mbe) { err = caddyhttp.Error(http.StatusRequestEntityTooLarge, err) } return n, err } // Interface guard var _ caddyhttp.MiddlewareHandler = (*RequestBody)(nil) ================================================ FILE: modules/caddyhttp/responsematchers.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "net/http" "strconv" "strings" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) // ResponseMatcher is a type which can determine if an // HTTP response matches some criteria. type ResponseMatcher struct { // If set, one of these status codes would be required. // A one-digit status can be used to represent all codes // in that class (e.g. 3 for all 3xx codes). StatusCode []int `json:"status_code,omitempty"` // If set, each header specified must be one of the // specified values, with the same logic used by the // [request header matcher](/docs/json/apps/http/servers/routes/match/header/). Headers http.Header `json:"headers,omitempty"` } // Match returns true if the given statusCode and hdr match rm. func (rm ResponseMatcher) Match(statusCode int, hdr http.Header) bool { if !rm.matchStatusCode(statusCode) { return false } return matchHeaders(hdr, rm.Headers, "", []string{}, nil) } func (rm ResponseMatcher) matchStatusCode(statusCode int) bool { if rm.StatusCode == nil { return true } for _, code := range rm.StatusCode { if StatusCodeMatches(statusCode, code) { return true } } return false } // ParseNamedResponseMatcher parses the tokens of a named response matcher. // // @name { // header [] // status // } // // Or, single line syntax: // // @name [header []] | [status ] func ParseNamedResponseMatcher(d *caddyfile.Dispenser, matchers map[string]ResponseMatcher) error { d.Next() // consume matcher name definitionName := d.Val() if _, ok := matchers[definitionName]; ok { return d.Errf("matcher is defined more than once: %s", definitionName) } matcher := ResponseMatcher{} for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); { switch d.Val() { case "header": if matcher.Headers == nil { matcher.Headers = http.Header{} } // reuse the header request matcher's unmarshaler headerMatcher := MatchHeader(matcher.Headers) err := headerMatcher.UnmarshalCaddyfile(d.NewFromNextSegment()) if err != nil { return err } matcher.Headers = http.Header(headerMatcher) case "status": if matcher.StatusCode == nil { matcher.StatusCode = []int{} } args := d.RemainingArgs() if len(args) == 0 { return d.ArgErr() } for _, arg := range args { if len(arg) == 3 && strings.HasSuffix(arg, "xx") { arg = arg[:1] } statusNum, err := strconv.Atoi(arg) if err != nil { return d.Errf("bad status value '%s': %v", arg, err) } matcher.StatusCode = append(matcher.StatusCode, statusNum) } default: return d.Errf("unrecognized response matcher %s", d.Val()) } } matchers[definitionName] = matcher return nil } ================================================ FILE: modules/caddyhttp/responsematchers_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "net/http" "testing" ) func TestResponseMatcher(t *testing.T) { for i, tc := range []struct { require ResponseMatcher status int hdr http.Header // make sure these are canonical cased (std lib will do that in a real request) expect bool }{ { require: ResponseMatcher{}, status: 200, expect: true, }, { require: ResponseMatcher{ StatusCode: []int{200}, }, status: 200, expect: true, }, { require: ResponseMatcher{ StatusCode: []int{2}, }, status: 200, expect: true, }, { require: ResponseMatcher{ StatusCode: []int{201}, }, status: 200, expect: false, }, { require: ResponseMatcher{ StatusCode: []int{2}, }, status: 301, expect: false, }, { require: ResponseMatcher{ StatusCode: []int{3}, }, status: 301, expect: true, }, { require: ResponseMatcher{ StatusCode: []int{3}, }, status: 399, expect: true, }, { require: ResponseMatcher{ StatusCode: []int{3}, }, status: 400, expect: false, }, { require: ResponseMatcher{ StatusCode: []int{3, 4}, }, status: 400, expect: true, }, { require: ResponseMatcher{ StatusCode: []int{3, 401}, }, status: 401, expect: true, }, { require: ResponseMatcher{ Headers: http.Header{ "Foo": []string{"bar"}, }, }, hdr: http.Header{"Foo": []string{"bar"}}, expect: true, }, { require: ResponseMatcher{ Headers: http.Header{ "Foo2": []string{"bar"}, }, }, hdr: http.Header{"Foo": []string{"bar"}}, expect: false, }, { require: ResponseMatcher{ Headers: http.Header{ "Foo": []string{"bar", "baz"}, }, }, hdr: http.Header{"Foo": []string{"baz"}}, expect: true, }, { require: ResponseMatcher{ Headers: http.Header{ "Foo": []string{"bar"}, "Foo2": []string{"baz"}, }, }, hdr: http.Header{"Foo": []string{"baz"}}, expect: false, }, { require: ResponseMatcher{ Headers: http.Header{ "Foo": []string{"bar"}, "Foo2": []string{"baz"}, }, }, hdr: http.Header{"Foo": []string{"bar"}, "Foo2": []string{"baz"}}, expect: true, }, { require: ResponseMatcher{ Headers: http.Header{ "Foo": []string{"foo*"}, }, }, hdr: http.Header{"Foo": []string{"foobar"}}, expect: true, }, { require: ResponseMatcher{ Headers: http.Header{ "Foo": []string{"foo*"}, }, }, hdr: http.Header{"Foo": []string{"foobar"}}, expect: true, }, } { actual := tc.require.Match(tc.status, tc.hdr) if actual != tc.expect { t.Errorf("Test %d %v: Expected %t, got %t for HTTP %d %v", i, tc.require, tc.expect, actual, tc.status, tc.hdr) continue } } } ================================================ FILE: modules/caddyhttp/responsewriter.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "bufio" "bytes" "fmt" "io" "net" "net/http" ) // ResponseWriterWrapper wraps an underlying ResponseWriter and // promotes its Pusher method as well. To use this type, embed // a pointer to it within your own struct type that implements // the http.ResponseWriter interface, then call methods on the // embedded value. type ResponseWriterWrapper struct { http.ResponseWriter } // Push implements http.Pusher. It simply calls the underlying // ResponseWriter's Push method if there is one, or returns // ErrNotImplemented otherwise. func (rww *ResponseWriterWrapper) Push(target string, opts *http.PushOptions) error { if pusher, ok := rww.ResponseWriter.(http.Pusher); ok { return pusher.Push(target, opts) } return ErrNotImplemented } // ReadFrom implements io.ReaderFrom. It retries to use io.ReaderFrom if available, // then fallback to io.Copy. // see: https://github.com/caddyserver/caddy/issues/6546 func (rww *ResponseWriterWrapper) ReadFrom(r io.Reader) (n int64, err error) { if rf, ok := rww.ResponseWriter.(io.ReaderFrom); ok { return rf.ReadFrom(r) } return io.Copy(rww.ResponseWriter, r) } // Unwrap returns the underlying ResponseWriter, necessary for // http.ResponseController to work correctly. func (rww *ResponseWriterWrapper) Unwrap() http.ResponseWriter { return rww.ResponseWriter } // ErrNotImplemented is returned when an underlying // ResponseWriter does not implement the required method. var ErrNotImplemented = fmt.Errorf("method not implemented") type responseRecorder struct { *ResponseWriterWrapper statusCode int buf *bytes.Buffer shouldBuffer ShouldBufferFunc size int wroteHeader bool stream bool readSize *int } // NewResponseRecorder returns a new ResponseRecorder that can be // used instead of a standard http.ResponseWriter. The recorder is // useful for middlewares which need to buffer a response and // potentially process its entire body before actually writing the // response to the underlying writer. Of course, buffering the entire // body has a memory overhead, but sometimes there is no way to avoid // buffering the whole response, hence the existence of this type. // Still, if at all practical, handlers should strive to stream // responses by wrapping Write and WriteHeader methods instead of // buffering whole response bodies. // // Buffering is actually optional. The shouldBuffer function will // be called just before the headers are written. If it returns // true, the headers and body will be buffered by this recorder // and not written to the underlying writer; if false, the headers // will be written immediately and the body will be streamed out // directly to the underlying writer. If shouldBuffer is nil, // the response will never be buffered and will always be streamed // directly to the writer. // // You can know if shouldBuffer returned true by calling Buffered(). // // The provided buffer buf should be obtained from a pool for best // performance (see the sync.Pool type). // // Proper usage of a recorder looks like this: // // rec := caddyhttp.NewResponseRecorder(w, buf, shouldBuffer) // err := next.ServeHTTP(rec, req) // if err != nil { // return err // } // if !rec.Buffered() { // return nil // } // // process the buffered response here // // The header map is not buffered; i.e. the ResponseRecorder's Header() // method returns the same header map of the underlying ResponseWriter. // This is a crucial design decision to allow HTTP trailers to be // flushed properly (https://github.com/caddyserver/caddy/issues/3236). // // Once you are ready to write the response, there are two ways you can // do it. The easier way is to have the recorder do it: // // rec.WriteResponse() // // This writes the recorded response headers as well as the buffered body. // Or, you may wish to do it yourself, especially if you manipulated the // buffered body. First you will need to write the headers with the // recorded status code, then write the body (this example writes the // recorder's body buffer, but you might have your own body to write // instead): // // w.WriteHeader(rec.Status()) // io.Copy(w, rec.Buffer()) // // As a special case, 1xx responses are not buffered nor recorded // because they are not the final response; they are passed through // directly to the underlying ResponseWriter. func NewResponseRecorder(w http.ResponseWriter, buf *bytes.Buffer, shouldBuffer ShouldBufferFunc) ResponseRecorder { return &responseRecorder{ ResponseWriterWrapper: &ResponseWriterWrapper{ResponseWriter: w}, buf: buf, shouldBuffer: shouldBuffer, } } // WriteHeader writes the headers with statusCode to the wrapped // ResponseWriter unless the response is to be buffered instead. // 1xx responses are never buffered. func (rr *responseRecorder) WriteHeader(statusCode int) { if rr.wroteHeader { return } // save statusCode always, in case HTTP middleware upgrades websocket // connections by manually setting headers and writing status 101 rr.statusCode = statusCode // decide whether we should buffer the response if rr.shouldBuffer == nil { rr.stream = true } else { rr.stream = !rr.shouldBuffer(rr.statusCode, rr.ResponseWriterWrapper.Header()) } // 1xx responses aren't final; just informational if statusCode < 100 || statusCode > 199 { rr.wroteHeader = true } // if informational or not buffered, immediately write header if rr.stream || (100 <= statusCode && statusCode <= 199) { rr.ResponseWriterWrapper.WriteHeader(statusCode) } } func (rr *responseRecorder) Write(data []byte) (int, error) { rr.WriteHeader(http.StatusOK) var n int var err error if rr.stream { n, err = rr.ResponseWriterWrapper.Write(data) } else { n, err = rr.buf.Write(data) } rr.size += n return n, err } func (rr *responseRecorder) ReadFrom(r io.Reader) (int64, error) { rr.WriteHeader(http.StatusOK) var n int64 var err error if rr.stream { n, err = rr.ResponseWriterWrapper.ReadFrom(r) } else { n, err = rr.buf.ReadFrom(r) } rr.size += int(n) return n, err } // Status returns the status code that was written, if any. func (rr *responseRecorder) Status() int { return rr.statusCode } // Size returns the number of bytes written, // not including the response headers. func (rr *responseRecorder) Size() int { return rr.size } // Buffer returns the body buffer that rr was created with. // You should still have your original pointer, though. func (rr *responseRecorder) Buffer() *bytes.Buffer { return rr.buf } // Buffered returns whether rr has decided to buffer the response. func (rr *responseRecorder) Buffered() bool { return !rr.stream } func (rr *responseRecorder) WriteResponse() error { if rr.statusCode == 0 { // could happen if no handlers actually wrote anything, // and this prevents a panic; status must be > 0 rr.WriteHeader(http.StatusOK) } if rr.stream { return nil } rr.ResponseWriterWrapper.WriteHeader(rr.statusCode) _, err := io.Copy(rr.ResponseWriterWrapper, rr.buf) return err } // FlushError will suppress actual flushing if the response is buffered. See: // https://github.com/caddyserver/caddy/issues/6144 func (rr *responseRecorder) FlushError() error { if rr.stream { //nolint:bodyclose return http.NewResponseController(rr.ResponseWriterWrapper).Flush() } return nil } // Private interface so it can only be used in this package // #TODO: maybe export it later func (rr *responseRecorder) setReadSize(size *int) { rr.readSize = size } func (rr *responseRecorder) Hijack() (net.Conn, *bufio.ReadWriter, error) { //nolint:bodyclose conn, brw, err := http.NewResponseController(rr.ResponseWriterWrapper).Hijack() if err != nil { return nil, nil, err } // Per http documentation, returned bufio.Writer is empty, but bufio.Read maybe not conn = &hijackedConn{conn, rr} brw.Writer.Reset(conn) buffered := brw.Reader.Buffered() if buffered != 0 { conn.(*hijackedConn).updateReadSize(buffered) data, _ := brw.Peek(buffered) brw.Reader.Reset(io.MultiReader(bytes.NewReader(data), conn)) // peek to make buffered data appear, as Reset will make it 0 _, _ = brw.Peek(buffered) } else { brw.Reader.Reset(conn) } return conn, brw, nil } // used to track the size of hijacked response writers type hijackedConn struct { net.Conn rr *responseRecorder } func (hc *hijackedConn) updateReadSize(n int) { if hc.rr.readSize != nil { *hc.rr.readSize += n } } func (hc *hijackedConn) Read(p []byte) (int, error) { n, err := hc.Conn.Read(p) hc.updateReadSize(n) return n, err } func (hc *hijackedConn) WriteTo(w io.Writer) (int64, error) { n, err := io.Copy(w, hc.Conn) hc.updateReadSize(int(n)) return n, err } func (hc *hijackedConn) Write(p []byte) (int, error) { n, err := hc.Conn.Write(p) hc.rr.size += n return n, err } func (hc *hijackedConn) ReadFrom(r io.Reader) (int64, error) { n, err := io.Copy(hc.Conn, r) hc.rr.size += int(n) return n, err } // ResponseRecorder is a http.ResponseWriter that records // responses instead of writing them to the client. See // docs for NewResponseRecorder for proper usage. type ResponseRecorder interface { http.ResponseWriter Status() int Buffer() *bytes.Buffer Buffered() bool Size() int WriteResponse() error } // ShouldBufferFunc is a function that returns true if the // response should be buffered, given the pending HTTP status // code and response headers. type ShouldBufferFunc func(status int, header http.Header) bool // Interface guards var ( _ http.ResponseWriter = (*ResponseWriterWrapper)(nil) _ ResponseRecorder = (*responseRecorder)(nil) // Implementing ReaderFrom can be such a significant // optimization that it should probably be required! // see PR #5022 (25%-50% speedup) _ io.ReaderFrom = (*ResponseWriterWrapper)(nil) _ io.ReaderFrom = (*responseRecorder)(nil) _ io.ReaderFrom = (*hijackedConn)(nil) _ io.WriterTo = (*hijackedConn)(nil) ) ================================================ FILE: modules/caddyhttp/responsewriter_test.go ================================================ package caddyhttp import ( "bytes" "io" "net/http" "strings" "testing" ) type responseWriterSpy interface { http.ResponseWriter Written() string CalledReadFrom() bool } var ( _ responseWriterSpy = (*baseRespWriter)(nil) _ responseWriterSpy = (*readFromRespWriter)(nil) ) // a barebones http.ResponseWriter mock type baseRespWriter []byte func (brw *baseRespWriter) Write(d []byte) (int, error) { *brw = append(*brw, d...) return len(d), nil } func (brw *baseRespWriter) Header() http.Header { return nil } func (brw *baseRespWriter) WriteHeader(statusCode int) {} func (brw *baseRespWriter) Written() string { return string(*brw) } func (brw *baseRespWriter) CalledReadFrom() bool { return false } // an http.ResponseWriter mock that supports ReadFrom type readFromRespWriter struct { baseRespWriter called bool } func (rf *readFromRespWriter) ReadFrom(r io.Reader) (int64, error) { rf.called = true return io.Copy(&rf.baseRespWriter, r) } func (rf *readFromRespWriter) CalledReadFrom() bool { return rf.called } func TestResponseWriterWrapperReadFrom(t *testing.T) { tests := map[string]struct { responseWriter responseWriterSpy wantReadFrom bool }{ "no ReadFrom": { responseWriter: &baseRespWriter{}, wantReadFrom: false, }, "has ReadFrom": { responseWriter: &readFromRespWriter{}, wantReadFrom: true, }, } for name, tt := range tests { t.Run(name, func(t *testing.T) { // what we expect middlewares to do: type myWrapper struct { *ResponseWriterWrapper } wrapped := myWrapper{ ResponseWriterWrapper: &ResponseWriterWrapper{ResponseWriter: tt.responseWriter}, } const srcData = "boo!" // hides everything but Read, since strings.Reader implements WriteTo it would // take precedence over our ReadFrom. src := struct{ io.Reader }{strings.NewReader(srcData)} if _, err := io.Copy(wrapped, src); err != nil { t.Errorf("%s: Copy() err = %v", name, err) } if got := tt.responseWriter.Written(); got != srcData { t.Errorf("%s: data = %q, want %q", name, got, srcData) } if tt.responseWriter.CalledReadFrom() != tt.wantReadFrom { if tt.wantReadFrom { t.Errorf("%s: ReadFrom() should have been called", name) } else { t.Errorf("%s: ReadFrom() should not have been called", name) } } }) } } func TestResponseWriterWrapperUnwrap(t *testing.T) { w := &ResponseWriterWrapper{&baseRespWriter{}} if _, ok := w.Unwrap().(*baseRespWriter); !ok { t.Errorf("Unwrap() doesn't return the underlying ResponseWriter") } } func TestResponseRecorderReadFrom(t *testing.T) { tests := map[string]struct { responseWriter responseWriterSpy shouldBuffer bool wantReadFrom bool }{ "buffered plain": { responseWriter: &baseRespWriter{}, shouldBuffer: true, wantReadFrom: false, }, "streamed plain": { responseWriter: &baseRespWriter{}, shouldBuffer: false, wantReadFrom: false, }, "buffered ReadFrom": { responseWriter: &readFromRespWriter{}, shouldBuffer: true, wantReadFrom: false, }, "streamed ReadFrom": { responseWriter: &readFromRespWriter{}, shouldBuffer: false, wantReadFrom: true, }, } for name, tt := range tests { t.Run(name, func(t *testing.T) { var buf bytes.Buffer rr := NewResponseRecorder(tt.responseWriter, &buf, func(status int, header http.Header) bool { return tt.shouldBuffer }) const srcData = "boo!" // hides everything but Read, since strings.Reader implements WriteTo it would // take precedence over our ReadFrom. src := struct{ io.Reader }{strings.NewReader(srcData)} if _, err := io.Copy(rr, src); err != nil { t.Errorf("Copy() err = %v", err) } wantStreamed := srcData wantBuffered := "" if tt.shouldBuffer { wantStreamed = "" wantBuffered = srcData } if got := tt.responseWriter.Written(); got != wantStreamed { t.Errorf("streamed data = %q, want %q", got, wantStreamed) } if got := buf.String(); got != wantBuffered { t.Errorf("buffered data = %q, want %q", got, wantBuffered) } if tt.responseWriter.CalledReadFrom() != tt.wantReadFrom { if tt.wantReadFrom { t.Errorf("ReadFrom() should have been called") } else { t.Errorf("ReadFrom() should not have been called") } } }) } } ================================================ FILE: modules/caddyhttp/reverseproxy/addresses.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "fmt" "net" "net/url" "strings" "github.com/caddyserver/caddy/v2" ) type parsedAddr struct { network, scheme, host, port string valid bool } func (p parsedAddr) dialAddr() string { if !p.valid { return "" } // for simplest possible config, we only need to include // the network portion if the user specified one if p.network != "" { return caddy.JoinNetworkAddress(p.network, p.host, p.port) } // if the host is a placeholder, then we don't want to join with an empty port, // because that would just append an extra ':' at the end of the address. if p.port == "" && strings.Contains(p.host, "{") { return p.host } return net.JoinHostPort(p.host, p.port) } func (p parsedAddr) rangedPort() bool { return strings.Contains(p.port, "-") } func (p parsedAddr) replaceablePort() bool { return strings.Contains(p.port, "{") && strings.Contains(p.port, "}") } func (p parsedAddr) isUnix() bool { return caddy.IsUnixNetwork(p.network) } // parseUpstreamDialAddress parses configuration inputs for // the dial address, including support for a scheme in front // as a shortcut for the port number, and a network type, // for example 'unix' to dial a unix socket. func parseUpstreamDialAddress(upstreamAddr string) (parsedAddr, error) { var network, scheme, host, port string if strings.Contains(upstreamAddr, "://") { // we get a parsing error if a placeholder is specified // so we return a more user-friendly error message instead // to explain what to do instead if strings.Contains(upstreamAddr, "{") { return parsedAddr{}, fmt.Errorf("due to parsing difficulties, placeholders are not allowed when an upstream address contains a scheme") } toURL, err := url.Parse(upstreamAddr) if err != nil { // if the error seems to be due to a port range, // try to replace the port range with a dummy // single port so that url.Parse() will succeed if strings.Contains(err.Error(), "invalid port") && strings.Contains(err.Error(), "-") { index := strings.LastIndex(upstreamAddr, ":") if index == -1 { return parsedAddr{}, fmt.Errorf("parsing upstream URL: %v", err) } portRange := upstreamAddr[index+1:] if strings.Count(portRange, "-") != 1 { return parsedAddr{}, fmt.Errorf("parsing upstream URL: parse \"%v\": port range invalid: %v", upstreamAddr, portRange) } toURL, err = url.Parse(strings.ReplaceAll(upstreamAddr, portRange, "0")) if err != nil { return parsedAddr{}, fmt.Errorf("parsing upstream URL: %v", err) } port = portRange } else { return parsedAddr{}, fmt.Errorf("parsing upstream URL: %v", err) } } if port == "" { port = toURL.Port() } // there is currently no way to perform a URL rewrite between choosing // a backend and proxying to it, so we cannot allow extra components // in backend URLs if toURL.Path != "" || toURL.RawQuery != "" || toURL.Fragment != "" { return parsedAddr{}, fmt.Errorf("for now, URLs for proxy upstreams only support scheme, host, and port components") } // ensure the port and scheme aren't in conflict if toURL.Scheme == "http" && port == "443" { return parsedAddr{}, fmt.Errorf("upstream address has conflicting scheme (http://) and port (:443, the HTTPS port)") } if toURL.Scheme == "https" && port == "80" { return parsedAddr{}, fmt.Errorf("upstream address has conflicting scheme (https://) and port (:80, the HTTP port)") } if toURL.Scheme == "h2c" && port == "443" { return parsedAddr{}, fmt.Errorf("upstream address has conflicting scheme (h2c://) and port (:443, the HTTPS port)") } // if port is missing, attempt to infer from scheme if port == "" { switch toURL.Scheme { case "", "http", "h2c": port = "80" case "https": port = "443" } } scheme, host = toURL.Scheme, toURL.Hostname() } else { var err error network, host, port, err = caddy.SplitNetworkAddress(upstreamAddr) if err != nil { host = upstreamAddr } // we can assume a port if only a hostname is specified, but use of a // placeholder without a port likely means a port will be filled in if port == "" && !strings.Contains(host, "{") && !caddy.IsUnixNetwork(network) && !caddy.IsFdNetwork(network) { port = "80" } } // special case network to support both unix and h2c at the same time if network == "unix+h2c" { network = "unix" scheme = "h2c" } return parsedAddr{network, scheme, host, port, true}, nil } ================================================ FILE: modules/caddyhttp/reverseproxy/addresses_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import "testing" func TestParseUpstreamDialAddress(t *testing.T) { for i, tc := range []struct { input string expectHostPort string expectScheme string expectErr bool }{ { input: "foo", expectHostPort: "foo:80", }, { input: "foo:1234", expectHostPort: "foo:1234", }, { input: "127.0.0.1", expectHostPort: "127.0.0.1:80", }, { input: "127.0.0.1:1234", expectHostPort: "127.0.0.1:1234", }, { input: "[::1]", expectHostPort: "[::1]:80", }, { input: "[::1]:1234", expectHostPort: "[::1]:1234", }, { input: "{foo}", expectHostPort: "{foo}", }, { input: "{foo}:80", expectHostPort: "{foo}:80", }, { input: "{foo}:{bar}", expectHostPort: "{foo}:{bar}", }, { input: "http://foo", expectHostPort: "foo:80", expectScheme: "http", }, { input: "http://foo:1234", expectHostPort: "foo:1234", expectScheme: "http", }, { input: "http://127.0.0.1", expectHostPort: "127.0.0.1:80", expectScheme: "http", }, { input: "http://127.0.0.1:1234", expectHostPort: "127.0.0.1:1234", expectScheme: "http", }, { input: "http://[::1]", expectHostPort: "[::1]:80", expectScheme: "http", }, { input: "http://[::1]:80", expectHostPort: "[::1]:80", expectScheme: "http", }, { input: "https://foo", expectHostPort: "foo:443", expectScheme: "https", }, { input: "https://foo:1234", expectHostPort: "foo:1234", expectScheme: "https", }, { input: "https://127.0.0.1", expectHostPort: "127.0.0.1:443", expectScheme: "https", }, { input: "https://127.0.0.1:1234", expectHostPort: "127.0.0.1:1234", expectScheme: "https", }, { input: "https://[::1]", expectHostPort: "[::1]:443", expectScheme: "https", }, { input: "https://[::1]:1234", expectHostPort: "[::1]:1234", expectScheme: "https", }, { input: "h2c://foo", expectHostPort: "foo:80", expectScheme: "h2c", }, { input: "h2c://foo:1234", expectHostPort: "foo:1234", expectScheme: "h2c", }, { input: "h2c://127.0.0.1", expectHostPort: "127.0.0.1:80", expectScheme: "h2c", }, { input: "h2c://127.0.0.1:1234", expectHostPort: "127.0.0.1:1234", expectScheme: "h2c", }, { input: "h2c://[::1]", expectHostPort: "[::1]:80", expectScheme: "h2c", }, { input: "h2c://[::1]:1234", expectHostPort: "[::1]:1234", expectScheme: "h2c", }, { input: "localhost:1001-1009", expectHostPort: "localhost:1001-1009", }, { input: "{host}:1001-1009", expectHostPort: "{host}:1001-1009", }, { input: "http://localhost:1001-1009", expectHostPort: "localhost:1001-1009", expectScheme: "http", }, { input: "https://localhost:1001-1009", expectHostPort: "localhost:1001-1009", expectScheme: "https", }, { input: "unix//var/php.sock", expectHostPort: "unix//var/php.sock", }, { input: "unix+h2c//var/grpc.sock", expectHostPort: "unix//var/grpc.sock", expectScheme: "h2c", }, { input: "unix/{foo}", expectHostPort: "unix/{foo}", }, { input: "unix+h2c/{foo}", expectHostPort: "unix/{foo}", expectScheme: "h2c", }, { input: "unix//foo/{foo}/bar", expectHostPort: "unix//foo/{foo}/bar", }, { input: "unix+h2c//foo/{foo}/bar", expectHostPort: "unix//foo/{foo}/bar", expectScheme: "h2c", }, { input: "http://{foo}", expectErr: true, }, { input: "http:// :80", expectErr: true, }, { input: "http://localhost/path", expectErr: true, }, { input: "http://localhost?key=value", expectErr: true, }, { input: "http://localhost#fragment", expectErr: true, }, { input: "http://localhost:8001-8002-8003", expectErr: true, }, { input: "http://localhost:8001-8002/foo:bar", expectErr: true, }, { input: "http://localhost:8001-8002/foo:1", expectErr: true, }, { input: "http://localhost:8001-8002/foo:1-2", expectErr: true, }, { input: "http://localhost:8001-8002#foo:1", expectErr: true, }, { input: "http://foo:443", expectErr: true, }, { input: "https://foo:80", expectErr: true, }, { input: "h2c://foo:443", expectErr: true, }, { input: `unix/c:\absolute\path`, expectHostPort: `unix/c:\absolute\path`, }, { input: `unix+h2c/c:\absolute\path`, expectHostPort: `unix/c:\absolute\path`, expectScheme: "h2c", }, { input: "unix/c:/absolute/path", expectHostPort: "unix/c:/absolute/path", }, { input: "unix+h2c/c:/absolute/path", expectHostPort: "unix/c:/absolute/path", expectScheme: "h2c", }, } { actualAddr, err := parseUpstreamDialAddress(tc.input) if tc.expectErr && err == nil { t.Errorf("Test %d: Expected error but got %v", i, err) } if !tc.expectErr && err != nil { t.Errorf("Test %d: Expected no error but got %v", i, err) } if actualAddr.dialAddr() != tc.expectHostPort { t.Errorf("Test %d: input %s: Expected host and port '%s' but got '%s'", i, tc.input, tc.expectHostPort, actualAddr.dialAddr()) } if actualAddr.scheme != tc.expectScheme { t.Errorf("Test %d: Expected scheme '%s' but got '%s'", i, tc.expectScheme, actualAddr.scheme) } } } ================================================ FILE: modules/caddyhttp/reverseproxy/admin.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "encoding/json" "fmt" "net/http" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(adminUpstreams{}) } // adminUpstreams is a module that provides the // /reverse_proxy/upstreams endpoint for the Caddy admin // API. This allows for checking the health of configured // reverse proxy upstreams in the pool. type adminUpstreams struct{} // upstreamStatus holds the status of a particular upstream type upstreamStatus struct { Address string `json:"address"` NumRequests int `json:"num_requests"` Fails int `json:"fails"` } // CaddyModule returns the Caddy module information. func (adminUpstreams) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "admin.api.reverse_proxy", New: func() caddy.Module { return new(adminUpstreams) }, } } // Routes returns a route for the /reverse_proxy/upstreams endpoint. func (al adminUpstreams) Routes() []caddy.AdminRoute { return []caddy.AdminRoute{ { Pattern: "/reverse_proxy/upstreams", Handler: caddy.AdminHandlerFunc(al.handleUpstreams), }, } } // handleUpstreams reports the status of the reverse proxy // upstream pool. func (adminUpstreams) handleUpstreams(w http.ResponseWriter, r *http.Request) error { if r.Method != http.MethodGet { return caddy.APIError{ HTTPStatus: http.StatusMethodNotAllowed, Err: fmt.Errorf("method not allowed"), } } // Prep for a JSON response w.Header().Set("Content-Type", "application/json") enc := json.NewEncoder(w) // Collect the results to respond with results := []upstreamStatus{} knownHosts := make(map[string]struct{}) // Iterate over the static upstream pool (needs to be fast) var rangeErr error hosts.Range(func(key, val any) bool { address, ok := key.(string) if !ok { rangeErr = caddy.APIError{ HTTPStatus: http.StatusInternalServerError, Err: fmt.Errorf("could not type assert upstream address"), } return false } upstream, ok := val.(*Host) if !ok { rangeErr = caddy.APIError{ HTTPStatus: http.StatusInternalServerError, Err: fmt.Errorf("could not type assert upstream struct"), } return false } knownHosts[address] = struct{}{} results = append(results, upstreamStatus{ Address: address, NumRequests: upstream.NumRequests(), Fails: upstream.Fails(), }) return true }) currentInFlight := getInFlightRequests() for address, count := range currentInFlight { if _, exists := knownHosts[address]; !exists && count > 0 { results = append(results, upstreamStatus{ Address: address, NumRequests: int(count), Fails: 0, }) } } if rangeErr != nil { return rangeErr } // Also include dynamic upstreams dynamicHostsMu.RLock() for address, entry := range dynamicHosts { results = append(results, upstreamStatus{ Address: address, NumRequests: entry.host.NumRequests(), Fails: entry.host.Fails(), }) } dynamicHostsMu.RUnlock() err := enc.Encode(results) if err != nil { return caddy.APIError{ HTTPStatus: http.StatusInternalServerError, Err: err, } } return nil } ================================================ FILE: modules/caddyhttp/reverseproxy/admin_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "encoding/json" "net/http" "net/http/httptest" "testing" "time" ) // adminHandlerFixture sets up the global host state for an admin endpoint test // and returns a cleanup function that must be deferred by the caller. // // staticAddrs are inserted into the UsagePool (as a static upstream would be). // dynamicAddrs are inserted into the dynamicHosts map (as a dynamic upstream would be). func adminHandlerFixture(t *testing.T, staticAddrs, dynamicAddrs []string) func() { t.Helper() for _, addr := range staticAddrs { u := &Upstream{Dial: addr} u.fillHost() } dynamicHostsMu.Lock() for _, addr := range dynamicAddrs { dynamicHosts[addr] = dynamicHostEntry{host: new(Host), lastSeen: time.Now()} } dynamicHostsMu.Unlock() return func() { // Remove static entries from the UsagePool. for _, addr := range staticAddrs { _, _ = hosts.Delete(addr) } // Remove dynamic entries. dynamicHostsMu.Lock() for _, addr := range dynamicAddrs { delete(dynamicHosts, addr) } dynamicHostsMu.Unlock() } } // callAdminUpstreams fires a GET against handleUpstreams and returns the // decoded response body. func callAdminUpstreams(t *testing.T) []upstreamStatus { t.Helper() req := httptest.NewRequest(http.MethodGet, "/reverse_proxy/upstreams", nil) w := httptest.NewRecorder() handler := adminUpstreams{} if err := handler.handleUpstreams(w, req); err != nil { t.Fatalf("handleUpstreams returned unexpected error: %v", err) } if w.Code != http.StatusOK { t.Fatalf("expected 200, got %d", w.Code) } if ct := w.Header().Get("Content-Type"); ct != "application/json" { t.Fatalf("expected Content-Type application/json, got %q", ct) } var results []upstreamStatus if err := json.NewDecoder(w.Body).Decode(&results); err != nil { t.Fatalf("failed to decode response: %v", err) } return results } // resultsByAddress indexes a slice of upstreamStatus by address for easier // lookup in assertions. func resultsByAddress(statuses []upstreamStatus) map[string]upstreamStatus { m := make(map[string]upstreamStatus, len(statuses)) for _, s := range statuses { m[s.Address] = s } return m } // TestAdminUpstreamsMethodNotAllowed verifies that non-GET methods are rejected. func TestAdminUpstreamsMethodNotAllowed(t *testing.T) { for _, method := range []string{http.MethodPost, http.MethodPut, http.MethodDelete} { req := httptest.NewRequest(method, "/reverse_proxy/upstreams", nil) w := httptest.NewRecorder() err := (adminUpstreams{}).handleUpstreams(w, req) if err == nil { t.Errorf("method %s: expected an error, got nil", method) continue } apiErr, ok := err.(interface{ HTTPStatus() int }) if !ok { // caddy.APIError stores the code in HTTPStatus field, access via the // exported interface it satisfies indirectly; just check non-nil. continue } if code := apiErr.HTTPStatus(); code != http.StatusMethodNotAllowed { t.Errorf("method %s: expected 405, got %d", method, code) } } } // TestAdminUpstreamsEmpty verifies that an empty response is valid JSON when // no upstreams are registered. func TestAdminUpstreamsEmpty(t *testing.T) { resetDynamicHosts() results := callAdminUpstreams(t) if results == nil { t.Error("expected non-nil (empty) slice, got nil") } if len(results) != 0 { t.Errorf("expected 0 results with empty pools, got %d", len(results)) } } // TestAdminUpstreamsStaticOnly verifies that static upstreams (from the // UsagePool) appear in the response with correct addresses. func TestAdminUpstreamsStaticOnly(t *testing.T) { resetDynamicHosts() cleanup := adminHandlerFixture(t, []string{"10.0.0.1:80", "10.0.0.2:80"}, nil, ) defer cleanup() results := callAdminUpstreams(t) byAddr := resultsByAddress(results) for _, addr := range []string{"10.0.0.1:80", "10.0.0.2:80"} { if _, ok := byAddr[addr]; !ok { t.Errorf("expected static upstream %q in response", addr) } } if len(results) != 2 { t.Errorf("expected exactly 2 results, got %d", len(results)) } } // TestAdminUpstreamsDynamicOnly verifies that dynamic upstreams (from // dynamicHosts) appear in the response with correct addresses. func TestAdminUpstreamsDynamicOnly(t *testing.T) { resetDynamicHosts() cleanup := adminHandlerFixture(t, nil, []string{"10.0.1.1:80", "10.0.1.2:80"}, ) defer cleanup() results := callAdminUpstreams(t) byAddr := resultsByAddress(results) for _, addr := range []string{"10.0.1.1:80", "10.0.1.2:80"} { if _, ok := byAddr[addr]; !ok { t.Errorf("expected dynamic upstream %q in response", addr) } } if len(results) != 2 { t.Errorf("expected exactly 2 results, got %d", len(results)) } } // TestAdminUpstreamsBothPools verifies that static and dynamic upstreams are // both present in the same response and that there is no overlap or omission. func TestAdminUpstreamsBothPools(t *testing.T) { resetDynamicHosts() cleanup := adminHandlerFixture(t, []string{"10.0.2.1:80"}, []string{"10.0.2.2:80"}, ) defer cleanup() results := callAdminUpstreams(t) if len(results) != 2 { t.Fatalf("expected 2 results (1 static + 1 dynamic), got %d", len(results)) } byAddr := resultsByAddress(results) if _, ok := byAddr["10.0.2.1:80"]; !ok { t.Error("static upstream missing from response") } if _, ok := byAddr["10.0.2.2:80"]; !ok { t.Error("dynamic upstream missing from response") } } // TestAdminUpstreamsNoOverlapBetweenPools verifies that an address registered // only as a static upstream does not also appear as a dynamic entry, and // vice-versa. func TestAdminUpstreamsNoOverlapBetweenPools(t *testing.T) { resetDynamicHosts() cleanup := adminHandlerFixture(t, []string{"10.0.3.1:80"}, []string{"10.0.3.2:80"}, ) defer cleanup() results := callAdminUpstreams(t) seen := make(map[string]int) for _, r := range results { seen[r.Address]++ } for addr, count := range seen { if count > 1 { t.Errorf("address %q appeared %d times; expected exactly once", addr, count) } } } // TestAdminUpstreamsReportsFailCounts verifies that fail counts accumulated on // a dynamic upstream's Host are reflected in the response. func TestAdminUpstreamsReportsFailCounts(t *testing.T) { resetDynamicHosts() const addr = "10.0.4.1:80" h := new(Host) _ = h.countFail(3) dynamicHostsMu.Lock() dynamicHosts[addr] = dynamicHostEntry{host: h, lastSeen: time.Now()} dynamicHostsMu.Unlock() defer func() { dynamicHostsMu.Lock() delete(dynamicHosts, addr) dynamicHostsMu.Unlock() }() results := callAdminUpstreams(t) byAddr := resultsByAddress(results) status, ok := byAddr[addr] if !ok { t.Fatalf("expected %q in response", addr) } if status.Fails != 3 { t.Errorf("expected Fails=3, got %d", status.Fails) } } // TestAdminUpstreamsReportsNumRequests verifies that the active request count // for a static upstream is reflected in the response. func TestAdminUpstreamsReportsNumRequests(t *testing.T) { resetDynamicHosts() const addr = "10.0.4.2:80" u := &Upstream{Dial: addr} u.fillHost() defer func() { _, _ = hosts.Delete(addr) }() _ = u.Host.countRequest(2) defer func() { _ = u.Host.countRequest(-2) }() results := callAdminUpstreams(t) byAddr := resultsByAddress(results) status, ok := byAddr[addr] if !ok { t.Fatalf("expected %q in response", addr) } if status.NumRequests != 2 { t.Errorf("expected NumRequests=2, got %d", status.NumRequests) } } ================================================ FILE: modules/caddyhttp/reverseproxy/ascii.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Most of the code in this file was initially borrowed from the Go // standard library and modified; It had this copyright notice: // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Original source, copied because the package was marked internal: // https://github.com/golang/go/blob/5c489514bc5e61ad9b5b07bd7d8ec65d66a0512a/src/net/http/internal/ascii/print.go package reverseproxy // asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t // are equal, ASCII-case-insensitively. func asciiEqualFold(s, t string) bool { if len(s) != len(t) { return false } for i := 0; i < len(s); i++ { if asciiLower(s[i]) != asciiLower(t[i]) { return false } } return true } // asciiLower returns the ASCII lowercase version of b. func asciiLower(b byte) byte { if 'A' <= b && b <= 'Z' { return b + ('a' - 'A') } return b } // asciiIsPrint returns whether s is ASCII and printable according to // https://tools.ietf.org/html/rfc20#section-4.2. func asciiIsPrint(s string) bool { for i := 0; i < len(s); i++ { if s[i] < ' ' || s[i] > '~' { return false } } return true } ================================================ FILE: modules/caddyhttp/reverseproxy/ascii_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Most of the code in this file was initially borrowed from the Go // standard library and modified; It had this copyright notice: // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Original source, copied because the package was marked internal: // https://github.com/golang/go/blob/5c489514bc5e61ad9b5b07bd7d8ec65d66a0512a/src/net/http/internal/ascii/print_test.go package reverseproxy import "testing" func TestEqualFold(t *testing.T) { tests := []struct { name string a, b string want bool }{ { name: "empty", want: true, }, { name: "simple match", a: "CHUNKED", b: "chunked", want: true, }, { name: "same string", a: "chunked", b: "chunked", want: true, }, { name: "Unicode Kelvin symbol", a: "chunKed", // This "K" is 'KELVIN SIGN' (\u212A) b: "chunked", want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := asciiEqualFold(tt.a, tt.b); got != tt.want { t.Errorf("AsciiEqualFold(%q,%q): got %v want %v", tt.a, tt.b, got, tt.want) } }) } } func TestIsPrint(t *testing.T) { tests := []struct { name string in string want bool }{ { name: "empty", want: true, }, { name: "ASCII low", in: "This is a space: ' '", want: true, }, { name: "ASCII high", in: "This is a tilde: '~'", want: true, }, { name: "ASCII low non-print", in: "This is a unit separator: \x1F", want: false, }, { name: "Ascii high non-print", in: "This is a Delete: \x7F", want: false, }, { name: "Unicode letter", in: "Today it's 280K outside: it's freezing!", // This "K" is 'KELVIN SIGN' (\u212A) want: false, }, { name: "Unicode emoji", in: "Gophers like 🧀", want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := asciiIsPrint(tt.in); got != tt.want { t.Errorf("IsASCIIPrint(%q): got %v want %v", tt.in, got, tt.want) } }) } } ================================================ FILE: modules/caddyhttp/reverseproxy/buffering_test.go ================================================ package reverseproxy import ( "io" "testing" ) type zeroReader struct{} func (zeroReader) Read(p []byte) (int, error) { for i := range p { p[i] = 0 } return len(p), nil } func TestBuffering(t *testing.T) { var ( h Handler zr zeroReader ) type args struct { body io.ReadCloser limit int64 } tests := []struct { name string args args resultCheck func(io.ReadCloser, int64, args) bool }{ { name: "0 limit, body is returned as is", args: args{ body: io.NopCloser(&zr), limit: 0, }, resultCheck: func(res io.ReadCloser, read int64, args args) bool { return res == args.body && read == args.limit && read == 0 }, }, { name: "negative limit, body is read completely", args: args{ body: io.NopCloser(io.LimitReader(&zr, 100)), limit: -1, }, resultCheck: func(res io.ReadCloser, read int64, args args) bool { brc, ok := res.(bodyReadCloser) return ok && brc.body == nil && brc.buf.Len() == 100 && read == 100 }, }, { name: "positive limit, body is read partially", args: args{ body: io.NopCloser(io.LimitReader(&zr, 100)), limit: 50, }, resultCheck: func(res io.ReadCloser, read int64, args args) bool { brc, ok := res.(bodyReadCloser) return ok && brc.body != nil && brc.buf.Len() == 50 && read == 50 }, }, { name: "positive limit, body is read completely", args: args{ body: io.NopCloser(io.LimitReader(&zr, 100)), limit: 101, }, resultCheck: func(res io.ReadCloser, read int64, args args) bool { brc, ok := res.(bodyReadCloser) return ok && brc.body == nil && brc.buf.Len() == 100 && read == 100 }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { res, read := h.bufferedBody(tt.args.body, tt.args.limit) if !tt.resultCheck(res, read, tt.args) { t.Error("Handler.bufferedBody() test failed") return } }) } } ================================================ FILE: modules/caddyhttp/reverseproxy/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "fmt" "net" "net/http" "reflect" "strconv" "strings" "github.com/dustin/go-humanize" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/internal" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers" "github.com/caddyserver/caddy/v2/modules/caddyhttp/rewrite" "github.com/caddyserver/caddy/v2/modules/caddytls" "github.com/caddyserver/caddy/v2/modules/internal/network" ) func init() { httpcaddyfile.RegisterHandlerDirective("reverse_proxy", parseCaddyfile) httpcaddyfile.RegisterHandlerDirective("copy_response", parseCopyResponseCaddyfile) httpcaddyfile.RegisterHandlerDirective("copy_response_headers", parseCopyResponseHeadersCaddyfile) } func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { rp := new(Handler) err := rp.UnmarshalCaddyfile(h.Dispenser) if err != nil { return nil, err } err = rp.FinalizeUnmarshalCaddyfile(h) if err != nil { return nil, err } return rp, nil } // UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax: // // reverse_proxy [] [] { // # backends // to // dynamic [...] // // # load balancing // lb_policy [] // lb_retries // lb_try_duration // lb_try_interval // lb_retry_match // // # active health checking // health_uri // health_port // health_interval // health_passes // health_fails // health_timeout // health_status // health_body // health_method // health_request_body // health_follow_redirects // health_headers { // [] // } // // # passive health checking // fail_duration // max_fails // unhealthy_status // unhealthy_latency // unhealthy_request_count // // # streaming // flush_interval // request_buffers // response_buffers // stream_timeout // stream_close_delay // verbose_logs // // # request manipulation // trusted_proxies [private_ranges] // header_up [+|-] [ []] // header_down [+|-] [ []] // method // rewrite // // # round trip // transport { // ... // } // // # optionally intercept responses from upstream // @name { // status // header [] // } // replace_status [] // handle_response [] { // // // # special directives only available in handle_response // copy_response [] [] { // status // } // copy_response_headers [] { // include // exclude // } // } // } // // Proxy upstream addresses should be network dial addresses such // as `host:port`, or a URL such as `scheme://host:port`. Scheme // and port may be inferred from other parts of the address/URL; if // either are missing, defaults to HTTP. // // The FinalizeUnmarshalCaddyfile method should be called after this // to finalize parsing of "handle_response" blocks, if possible. func (h *Handler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { // currently, all backends must use the same scheme/protocol (the // underlying JSON does not yet support per-backend transports) var commonScheme string // we'll wait until the very end of parsing before // validating and encoding the transport var transport http.RoundTripper var transportModuleName string // collect the response matchers defined as subdirectives // prefixed with "@" for use with "handle_response" blocks h.responseMatchers = make(map[string]caddyhttp.ResponseMatcher) // appendUpstream creates an upstream for address and adds // it to the list. appendUpstream := func(address string) error { pa, err := parseUpstreamDialAddress(address) if err != nil { return d.WrapErr(err) } // the underlying JSON does not yet support different // transports (protocols or schemes) to each backend, // so we remember the last one we see and compare them switch pa.scheme { case "wss": return d.Errf("the scheme wss:// is only supported in browsers; use https:// instead") case "ws": return d.Errf("the scheme ws:// is only supported in browsers; use http:// instead") case "https", "http", "h2c", "": // Do nothing or handle the valid schemes default: return d.Errf("unsupported URL scheme %s://", pa.scheme) } if commonScheme != "" && pa.scheme != commonScheme { return d.Errf("for now, all proxy upstreams must use the same scheme (transport protocol); expecting '%s://' but got '%s://'", commonScheme, pa.scheme) } commonScheme = pa.scheme // if the port of upstream address contains a placeholder, only wrap it with the `Upstream` struct, // delaying actual resolution of the address until request time. if pa.replaceablePort() { h.Upstreams = append(h.Upstreams, &Upstream{Dial: pa.dialAddr()}) return nil } parsedAddr, err := caddy.ParseNetworkAddress(pa.dialAddr()) if err != nil { return d.WrapErr(err) } if pa.isUnix() || !pa.rangedPort() { // unix networks don't have ports h.Upstreams = append(h.Upstreams, &Upstream{ Dial: pa.dialAddr(), }) } else { // expand a port range into multiple upstreams for i := parsedAddr.StartPort; i <= parsedAddr.EndPort; i++ { h.Upstreams = append(h.Upstreams, &Upstream{ Dial: caddy.JoinNetworkAddress("", parsedAddr.Host, fmt.Sprint(i)), }) } } return nil } d.Next() // consume the directive name for _, up := range d.RemainingArgs() { err := appendUpstream(up) if err != nil { return fmt.Errorf("parsing upstream '%s': %w", up, err) } } for d.NextBlock(0) { // if the subdirective has an "@" prefix then we // parse it as a response matcher for use with "handle_response" if strings.HasPrefix(d.Val(), matcherPrefix) { err := caddyhttp.ParseNamedResponseMatcher(d.NewFromNextSegment(), h.responseMatchers) if err != nil { return err } continue } switch d.Val() { case "to": args := d.RemainingArgs() if len(args) == 0 { return d.ArgErr() } for _, up := range args { err := appendUpstream(up) if err != nil { return fmt.Errorf("parsing upstream '%s': %w", up, err) } } case "dynamic": if !d.NextArg() { return d.ArgErr() } if h.DynamicUpstreams != nil { return d.Err("dynamic upstreams already specified") } dynModule := d.Val() modID := "http.reverse_proxy.upstreams." + dynModule unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return err } source, ok := unm.(UpstreamSource) if !ok { return d.Errf("module %s (%T) is not an UpstreamSource", modID, unm) } h.DynamicUpstreamsRaw = caddyconfig.JSONModuleObject(source, "source", dynModule, nil) case "lb_policy": if !d.NextArg() { return d.ArgErr() } if h.LoadBalancing != nil && h.LoadBalancing.SelectionPolicyRaw != nil { return d.Err("load balancing selection policy already specified") } name := d.Val() modID := "http.reverse_proxy.selection_policies." + name unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return err } sel, ok := unm.(Selector) if !ok { return d.Errf("module %s (%T) is not a reverseproxy.Selector", modID, unm) } if h.LoadBalancing == nil { h.LoadBalancing = new(LoadBalancing) } h.LoadBalancing.SelectionPolicyRaw = caddyconfig.JSONModuleObject(sel, "policy", name, nil) case "lb_retries": if !d.NextArg() { return d.ArgErr() } tries, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("bad lb_retries number '%s': %v", d.Val(), err) } if h.LoadBalancing == nil { h.LoadBalancing = new(LoadBalancing) } h.LoadBalancing.Retries = tries case "lb_try_duration": if !d.NextArg() { return d.ArgErr() } if h.LoadBalancing == nil { h.LoadBalancing = new(LoadBalancing) } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad duration value %s: %v", d.Val(), err) } h.LoadBalancing.TryDuration = caddy.Duration(dur) case "lb_try_interval": if !d.NextArg() { return d.ArgErr() } if h.LoadBalancing == nil { h.LoadBalancing = new(LoadBalancing) } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad interval value '%s': %v", d.Val(), err) } h.LoadBalancing.TryInterval = caddy.Duration(dur) case "lb_retry_match": matcherSet, err := caddyhttp.ParseCaddyfileNestedMatcherSet(d) if err != nil { return d.Errf("failed to parse lb_retry_match: %v", err) } if h.LoadBalancing == nil { h.LoadBalancing = new(LoadBalancing) } h.LoadBalancing.RetryMatchRaw = append(h.LoadBalancing.RetryMatchRaw, matcherSet) case "health_uri": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Active == nil { h.HealthChecks.Active = new(ActiveHealthChecks) } h.HealthChecks.Active.URI = d.Val() case "health_path": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Active == nil { h.HealthChecks.Active = new(ActiveHealthChecks) } h.HealthChecks.Active.Path = d.Val() caddy.Log().Named("config.adapter.caddyfile").Warn("the 'health_path' subdirective is deprecated, please use 'health_uri' instead!") case "health_upstream": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Active == nil { h.HealthChecks.Active = new(ActiveHealthChecks) } _, port, err := net.SplitHostPort(d.Val()) if err != nil { return d.Errf("health_upstream is malformed '%s': %v", d.Val(), err) } _, err = strconv.Atoi(port) if err != nil { return d.Errf("bad port number '%s': %v", d.Val(), err) } h.HealthChecks.Active.Upstream = d.Val() case "health_port": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Active == nil { h.HealthChecks.Active = new(ActiveHealthChecks) } if h.HealthChecks.Active.Upstream != "" { return d.Errf("the 'health_port' subdirective is ignored if 'health_upstream' is used!") } portNum, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("bad port number '%s': %v", d.Val(), err) } h.HealthChecks.Active.Port = portNum case "health_headers": healthHeaders := make(http.Header) for nesting := d.Nesting(); d.NextBlock(nesting); { key := d.Val() values := d.RemainingArgs() if len(values) == 0 { values = append(values, "") } healthHeaders[key] = append(healthHeaders[key], values...) } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Active == nil { h.HealthChecks.Active = new(ActiveHealthChecks) } h.HealthChecks.Active.Headers = healthHeaders case "health_method": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Active == nil { h.HealthChecks.Active = new(ActiveHealthChecks) } h.HealthChecks.Active.Method = d.Val() case "health_request_body": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Active == nil { h.HealthChecks.Active = new(ActiveHealthChecks) } h.HealthChecks.Active.Body = d.Val() case "health_interval": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Active == nil { h.HealthChecks.Active = new(ActiveHealthChecks) } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad interval value %s: %v", d.Val(), err) } h.HealthChecks.Active.Interval = caddy.Duration(dur) case "health_timeout": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Active == nil { h.HealthChecks.Active = new(ActiveHealthChecks) } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad timeout value %s: %v", d.Val(), err) } h.HealthChecks.Active.Timeout = caddy.Duration(dur) case "health_status": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Active == nil { h.HealthChecks.Active = new(ActiveHealthChecks) } val := d.Val() if len(val) == 3 && strings.HasSuffix(val, "xx") { val = val[:1] } statusNum, err := strconv.Atoi(val) if err != nil { return d.Errf("bad status value '%s': %v", d.Val(), err) } h.HealthChecks.Active.ExpectStatus = statusNum case "health_body": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Active == nil { h.HealthChecks.Active = new(ActiveHealthChecks) } h.HealthChecks.Active.ExpectBody = d.Val() case "health_follow_redirects": if d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Active == nil { h.HealthChecks.Active = new(ActiveHealthChecks) } h.HealthChecks.Active.FollowRedirects = true case "health_passes": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Active == nil { h.HealthChecks.Active = new(ActiveHealthChecks) } passes, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("invalid passes count '%s': %v", d.Val(), err) } h.HealthChecks.Active.Passes = passes case "health_fails": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Active == nil { h.HealthChecks.Active = new(ActiveHealthChecks) } fails, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("invalid fails count '%s': %v", d.Val(), err) } h.HealthChecks.Active.Fails = fails case "max_fails": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Passive == nil { h.HealthChecks.Passive = new(PassiveHealthChecks) } maxFails, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("invalid maximum fail count '%s': %v", d.Val(), err) } h.HealthChecks.Passive.MaxFails = maxFails case "fail_duration": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Passive == nil { h.HealthChecks.Passive = new(PassiveHealthChecks) } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad duration value '%s': %v", d.Val(), err) } h.HealthChecks.Passive.FailDuration = caddy.Duration(dur) case "unhealthy_request_count": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Passive == nil { h.HealthChecks.Passive = new(PassiveHealthChecks) } maxConns, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("invalid maximum connection count '%s': %v", d.Val(), err) } h.HealthChecks.Passive.UnhealthyRequestCount = maxConns case "unhealthy_status": args := d.RemainingArgs() if len(args) == 0 { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Passive == nil { h.HealthChecks.Passive = new(PassiveHealthChecks) } for _, arg := range args { if len(arg) == 3 && strings.HasSuffix(arg, "xx") { arg = arg[:1] } statusNum, err := strconv.Atoi(arg) if err != nil { return d.Errf("bad status value '%s': %v", d.Val(), err) } h.HealthChecks.Passive.UnhealthyStatus = append(h.HealthChecks.Passive.UnhealthyStatus, statusNum) } case "unhealthy_latency": if !d.NextArg() { return d.ArgErr() } if h.HealthChecks == nil { h.HealthChecks = new(HealthChecks) } if h.HealthChecks.Passive == nil { h.HealthChecks.Passive = new(PassiveHealthChecks) } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad duration value '%s': %v", d.Val(), err) } h.HealthChecks.Passive.UnhealthyLatency = caddy.Duration(dur) case "flush_interval": if !d.NextArg() { return d.ArgErr() } if fi, err := strconv.Atoi(d.Val()); err == nil { h.FlushInterval = caddy.Duration(fi) } else { dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad duration value '%s': %v", d.Val(), err) } h.FlushInterval = caddy.Duration(dur) } case "request_buffers", "response_buffers": subdir := d.Val() if !d.NextArg() { return d.ArgErr() } val := d.Val() var size int64 if val == "unlimited" { size = -1 } else { usize, err := humanize.ParseBytes(val) if err != nil { return d.Errf("invalid byte size '%s': %v", val, err) } size = int64(usize) } if d.NextArg() { return d.ArgErr() } switch subdir { case "request_buffers": h.RequestBuffers = size case "response_buffers": h.ResponseBuffers = size } case "stream_timeout": if !d.NextArg() { return d.ArgErr() } if fi, err := strconv.Atoi(d.Val()); err == nil { h.StreamTimeout = caddy.Duration(fi) } else { dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad duration value '%s': %v", d.Val(), err) } h.StreamTimeout = caddy.Duration(dur) } case "stream_close_delay": if !d.NextArg() { return d.ArgErr() } if fi, err := strconv.Atoi(d.Val()); err == nil { h.StreamCloseDelay = caddy.Duration(fi) } else { dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad duration value '%s': %v", d.Val(), err) } h.StreamCloseDelay = caddy.Duration(dur) } case "trusted_proxies": for d.NextArg() { if d.Val() == "private_ranges" { h.TrustedProxies = append(h.TrustedProxies, internal.PrivateRangesCIDR()...) continue } h.TrustedProxies = append(h.TrustedProxies, d.Val()) } case "header_up": var err error if h.Headers == nil { h.Headers = new(headers.Handler) } if h.Headers.Request == nil { h.Headers.Request = new(headers.HeaderOps) } args := d.RemainingArgs() switch len(args) { case 1: err = headers.CaddyfileHeaderOp(h.Headers.Request, args[0], "", nil) case 2: // some lint checks, I guess if strings.EqualFold(args[0], "host") && (args[1] == "{hostport}" || args[1] == "{http.request.hostport}") { caddy.Log().Named("caddyfile").Warn("Unnecessary header_up Host: the reverse proxy's default behavior is to pass headers to the upstream") } if strings.EqualFold(args[0], "x-forwarded-for") && (args[1] == "{remote}" || args[1] == "{http.request.remote}" || args[1] == "{remote_host}" || args[1] == "{http.request.remote.host}") { caddy.Log().Named("caddyfile").Warn("Unnecessary header_up X-Forwarded-For: the reverse proxy's default behavior is to pass headers to the upstream") } if strings.EqualFold(args[0], "x-forwarded-proto") && (args[1] == "{scheme}" || args[1] == "{http.request.scheme}") { caddy.Log().Named("caddyfile").Warn("Unnecessary header_up X-Forwarded-Proto: the reverse proxy's default behavior is to pass headers to the upstream") } if strings.EqualFold(args[0], "x-forwarded-host") && (args[1] == "{host}" || args[1] == "{http.request.host}" || args[1] == "{hostport}" || args[1] == "{http.request.hostport}") { caddy.Log().Named("caddyfile").Warn("Unnecessary header_up X-Forwarded-Host: the reverse proxy's default behavior is to pass headers to the upstream") } err = headers.CaddyfileHeaderOp(h.Headers.Request, args[0], args[1], nil) case 3: err = headers.CaddyfileHeaderOp(h.Headers.Request, args[0], args[1], &args[2]) default: return d.ArgErr() } if err != nil { return d.Err(err.Error()) } case "header_down": var err error if h.Headers == nil { h.Headers = new(headers.Handler) } if h.Headers.Response == nil { h.Headers.Response = &headers.RespHeaderOps{ HeaderOps: new(headers.HeaderOps), } } args := d.RemainingArgs() switch len(args) { case 1: err = headers.CaddyfileHeaderOp(h.Headers.Response.HeaderOps, args[0], "", nil) case 2: err = headers.CaddyfileHeaderOp(h.Headers.Response.HeaderOps, args[0], args[1], nil) case 3: err = headers.CaddyfileHeaderOp(h.Headers.Response.HeaderOps, args[0], args[1], &args[2]) default: return d.ArgErr() } if err != nil { return d.Err(err.Error()) } case "method": if !d.NextArg() { return d.ArgErr() } if h.Rewrite == nil { h.Rewrite = &rewrite.Rewrite{} } h.Rewrite.Method = d.Val() if d.NextArg() { return d.ArgErr() } case "rewrite": if !d.NextArg() { return d.ArgErr() } if h.Rewrite == nil { h.Rewrite = &rewrite.Rewrite{} } h.Rewrite.URI = d.Val() if d.NextArg() { return d.ArgErr() } case "transport": if !d.NextArg() { return d.ArgErr() } if h.TransportRaw != nil { return d.Err("transport already specified") } transportModuleName = d.Val() modID := "http.reverse_proxy.transport." + transportModuleName unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return err } rt, ok := unm.(http.RoundTripper) if !ok { return d.Errf("module %s (%T) is not a RoundTripper", modID, unm) } transport = rt case "handle_response": // delegate the parsing of handle_response to the caller, // since we need the httpcaddyfile.Helper to parse subroutes. // See h.FinalizeUnmarshalCaddyfile h.handleResponseSegments = append(h.handleResponseSegments, d.NewFromNextSegment()) case "replace_status": args := d.RemainingArgs() if len(args) != 1 && len(args) != 2 { return d.Errf("must have one or two arguments: an optional response matcher, and a status code") } responseHandler := caddyhttp.ResponseHandler{} if len(args) == 2 { if !strings.HasPrefix(args[0], matcherPrefix) { return d.Errf("must use a named response matcher, starting with '@'") } foundMatcher, ok := h.responseMatchers[args[0]] if !ok { return d.Errf("no named response matcher defined with name '%s'", args[0][1:]) } responseHandler.Match = &foundMatcher responseHandler.StatusCode = caddyhttp.WeakString(args[1]) } else if len(args) == 1 { responseHandler.StatusCode = caddyhttp.WeakString(args[0]) } // make sure there's no block, cause it doesn't make sense if nesting := d.Nesting(); d.NextBlock(nesting) { return d.Errf("cannot define routes for 'replace_status', use 'handle_response' instead.") } h.HandleResponse = append( h.HandleResponse, responseHandler, ) case "verbose_logs": if h.VerboseLogs { return d.Err("verbose_logs already specified") } h.VerboseLogs = true default: return d.Errf("unrecognized subdirective %s", d.Val()) } } // if the scheme inferred from the backends' addresses is // HTTPS, we will need a non-nil transport to enable TLS, // or if H2C, to set the transport versions. if (commonScheme == "https" || commonScheme == "h2c") && transport == nil { transport = new(HTTPTransport) transportModuleName = "http" } // verify transport configuration, and finally encode it if transport != nil { if te, ok := transport.(TLSTransport); ok { if commonScheme == "https" && !te.TLSEnabled() { err := te.EnableTLS(new(TLSConfig)) if err != nil { return err } } if commonScheme == "http" && te.TLSEnabled() { return d.Errf("upstream address scheme is HTTP but transport is configured for HTTP+TLS (HTTPS)") } if h2ct, ok := transport.(H2CTransport); ok && commonScheme == "h2c" { err := h2ct.EnableH2C() if err != nil { return err } } } else if commonScheme == "https" { return d.Errf("upstreams are configured for HTTPS but transport module does not support TLS: %T", transport) } // no need to encode empty default transport if !reflect.DeepEqual(transport, new(HTTPTransport)) { h.TransportRaw = caddyconfig.JSONModuleObject(transport, "protocol", transportModuleName, nil) } } return nil } // FinalizeUnmarshalCaddyfile finalizes the Caddyfile parsing which // requires having an httpcaddyfile.Helper to function, to parse subroutes. func (h *Handler) FinalizeUnmarshalCaddyfile(helper httpcaddyfile.Helper) error { for _, d := range h.handleResponseSegments { // consume the "handle_response" token d.Next() args := d.RemainingArgs() // TODO: Remove this check at some point in the future if len(args) == 2 { return d.Errf("configuring 'handle_response' for status code replacement is no longer supported. Use 'replace_status' instead.") } if len(args) > 1 { return d.Errf("too many arguments for 'handle_response': %s", args) } var matcher *caddyhttp.ResponseMatcher if len(args) == 1 { // the first arg should always be a matcher. if !strings.HasPrefix(args[0], matcherPrefix) { return d.Errf("must use a named response matcher, starting with '@'") } foundMatcher, ok := h.responseMatchers[args[0]] if !ok { return d.Errf("no named response matcher defined with name '%s'", args[0][1:]) } matcher = &foundMatcher } // parse the block as routes handler, err := httpcaddyfile.ParseSegmentAsSubroute(helper.WithDispenser(d.NewFromNextSegment())) if err != nil { return err } subroute, ok := handler.(*caddyhttp.Subroute) if !ok { return helper.Errf("segment was not parsed as a subroute") } h.HandleResponse = append( h.HandleResponse, caddyhttp.ResponseHandler{ Match: matcher, Routes: subroute.Routes, }, ) } // move the handle_response entries without a matcher to the end. // we can't use sort.SliceStable because it will reorder the rest of the // entries which may be undesirable because we don't have a good // heuristic to use for sorting. withoutMatchers := []caddyhttp.ResponseHandler{} withMatchers := []caddyhttp.ResponseHandler{} for _, hr := range h.HandleResponse { if hr.Match == nil { withoutMatchers = append(withoutMatchers, hr) } else { withMatchers = append(withMatchers, hr) } } h.HandleResponse = append(withMatchers, withoutMatchers...) // clean up the bits we only needed for adapting h.handleResponseSegments = nil h.responseMatchers = nil return nil } // UnmarshalCaddyfile deserializes Caddyfile tokens into h. // // transport http { // read_buffer // write_buffer // max_response_header // network_proxy { // ... // } // dial_timeout // dial_fallback_delay // response_header_timeout // expect_continue_timeout // resolvers // tls // tls_client_auth | // tls_insecure_skip_verify // tls_timeout // tls_trusted_ca_certs // tls_trust_pool { // ... // } // tls_server_name // tls_renegotiation // tls_except_ports // keepalive [off|] // keepalive_interval // keepalive_idle_conns // keepalive_idle_conns_per_host // versions // compression off // max_conns_per_host // max_idle_conns_per_host // } func (h *HTTPTransport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume transport name for d.NextBlock(0) { switch d.Val() { case "read_buffer": if !d.NextArg() { return d.ArgErr() } size, err := humanize.ParseBytes(d.Val()) if err != nil { return d.Errf("invalid read buffer size '%s': %v", d.Val(), err) } h.ReadBufferSize = int(size) case "write_buffer": if !d.NextArg() { return d.ArgErr() } size, err := humanize.ParseBytes(d.Val()) if err != nil { return d.Errf("invalid write buffer size '%s': %v", d.Val(), err) } h.WriteBufferSize = int(size) case "read_timeout": if !d.NextArg() { return d.ArgErr() } timeout, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("invalid read timeout duration '%s': %v", d.Val(), err) } h.ReadTimeout = caddy.Duration(timeout) case "write_timeout": if !d.NextArg() { return d.ArgErr() } timeout, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("invalid write timeout duration '%s': %v", d.Val(), err) } h.WriteTimeout = caddy.Duration(timeout) case "max_response_header": if !d.NextArg() { return d.ArgErr() } size, err := humanize.ParseBytes(d.Val()) if err != nil { return d.Errf("invalid max response header size '%s': %v", d.Val(), err) } h.MaxResponseHeaderSize = int64(size) case "proxy_protocol": if !d.NextArg() { return d.ArgErr() } switch proxyProtocol := d.Val(); proxyProtocol { case "v1", "v2": h.ProxyProtocol = proxyProtocol default: return d.Errf("invalid proxy protocol version '%s'", proxyProtocol) } case "forward_proxy_url": caddy.Log().Warn("The 'forward_proxy_url' field is deprecated. Use 'network_proxy ' instead.") if !d.NextArg() { return d.ArgErr() } u := network.ProxyFromURL{URL: d.Val()} h.NetworkProxyRaw = caddyconfig.JSONModuleObject(u, "from", "url", nil) case "network_proxy": if !d.NextArg() { return d.ArgErr() } modStem := d.Val() modID := "caddy.network_proxy." + modStem unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return err } h.NetworkProxyRaw = caddyconfig.JSONModuleObject(unm, "from", modStem, nil) case "dial_timeout": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad timeout value '%s': %v", d.Val(), err) } h.DialTimeout = caddy.Duration(dur) case "dial_fallback_delay": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad fallback delay value '%s': %v", d.Val(), err) } h.FallbackDelay = caddy.Duration(dur) case "response_header_timeout": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad timeout value '%s': %v", d.Val(), err) } h.ResponseHeaderTimeout = caddy.Duration(dur) case "expect_continue_timeout": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad timeout value '%s': %v", d.Val(), err) } h.ExpectContinueTimeout = caddy.Duration(dur) case "resolvers": if h.Resolver == nil { h.Resolver = new(UpstreamResolver) } h.Resolver.Addresses = d.RemainingArgs() if len(h.Resolver.Addresses) == 0 { return d.Errf("must specify at least one resolver address") } case "tls": if h.TLS == nil { h.TLS = new(TLSConfig) } case "tls_client_auth": if h.TLS == nil { h.TLS = new(TLSConfig) } args := d.RemainingArgs() switch len(args) { case 1: h.TLS.ClientCertificateAutomate = args[0] case 2: h.TLS.ClientCertificateFile = args[0] h.TLS.ClientCertificateKeyFile = args[1] default: return d.ArgErr() } case "tls_insecure_skip_verify": if d.NextArg() { return d.ArgErr() } if h.TLS == nil { h.TLS = new(TLSConfig) } h.TLS.InsecureSkipVerify = true case "tls_curves": args := d.RemainingArgs() if len(args) == 0 { return d.ArgErr() } if h.TLS == nil { h.TLS = new(TLSConfig) } h.TLS.Curves = args case "tls_timeout": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad timeout value '%s': %v", d.Val(), err) } if h.TLS == nil { h.TLS = new(TLSConfig) } h.TLS.HandshakeTimeout = caddy.Duration(dur) case "tls_trusted_ca_certs": caddy.Log().Warn("The 'tls_trusted_ca_certs' field is deprecated. Use the 'tls_trust_pool' field instead.") args := d.RemainingArgs() if len(args) == 0 { return d.ArgErr() } if h.TLS == nil { h.TLS = new(TLSConfig) } if len(h.TLS.CARaw) != 0 { return d.Err("cannot specify both 'tls_trust_pool' and 'tls_trusted_ca_certs") } h.TLS.RootCAPEMFiles = args case "tls_server_name": if !d.NextArg() { return d.ArgErr() } if h.TLS == nil { h.TLS = new(TLSConfig) } h.TLS.ServerName = d.Val() case "tls_renegotiation": if h.TLS == nil { h.TLS = new(TLSConfig) } if !d.NextArg() { return d.ArgErr() } switch renegotiation := d.Val(); renegotiation { case "never", "once", "freely": h.TLS.Renegotiation = renegotiation default: return d.ArgErr() } case "tls_except_ports": if h.TLS == nil { h.TLS = new(TLSConfig) } h.TLS.ExceptPorts = d.RemainingArgs() if len(h.TLS.ExceptPorts) == 0 { return d.ArgErr() } case "keepalive": if !d.NextArg() { return d.ArgErr() } if h.KeepAlive == nil { h.KeepAlive = new(KeepAlive) } if d.Val() == "off" { var disable bool h.KeepAlive.Enabled = &disable break } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad duration value '%s': %v", d.Val(), err) } h.KeepAlive.IdleConnTimeout = caddy.Duration(dur) case "keepalive_interval": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad interval value '%s': %v", d.Val(), err) } if h.KeepAlive == nil { h.KeepAlive = new(KeepAlive) } h.KeepAlive.ProbeInterval = caddy.Duration(dur) case "keepalive_idle_conns": if !d.NextArg() { return d.ArgErr() } num, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("bad integer value '%s': %v", d.Val(), err) } if h.KeepAlive == nil { h.KeepAlive = new(KeepAlive) } h.KeepAlive.MaxIdleConns = num case "keepalive_idle_conns_per_host": if !d.NextArg() { return d.ArgErr() } num, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("bad integer value '%s': %v", d.Val(), err) } if h.KeepAlive == nil { h.KeepAlive = new(KeepAlive) } h.KeepAlive.MaxIdleConnsPerHost = num case "versions": h.Versions = d.RemainingArgs() if len(h.Versions) == 0 { return d.ArgErr() } case "compression": if d.NextArg() { if d.Val() == "off" { var disable bool h.Compression = &disable } } case "max_conns_per_host": if !d.NextArg() { return d.ArgErr() } num, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("bad integer value '%s': %v", d.Val(), err) } h.MaxConnsPerHost = num case "tls_trust_pool": if !d.NextArg() { return d.ArgErr() } modStem := d.Val() modID := "tls.ca_pool.source." + modStem unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return err } ca, ok := unm.(caddytls.CA) if !ok { return d.Errf("module %s is not a caddytls.CA", modID) } if h.TLS == nil { h.TLS = new(TLSConfig) } if len(h.TLS.RootCAPEMFiles) != 0 { return d.Err("cannot specify both 'tls_trust_pool' and 'tls_trusted_ca_certs'") } if h.TLS.CARaw != nil { return d.Err("cannot specify \"tls_trust_pool\" twice in caddyfile") } h.TLS.CARaw = caddyconfig.JSONModuleObject(ca, "provider", modStem, nil) case "local_address": if !d.NextArg() { return d.ArgErr() } h.LocalAddress = d.Val() default: return d.Errf("unrecognized subdirective %s", d.Val()) } } return nil } func parseCopyResponseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { crh := new(CopyResponseHandler) err := crh.UnmarshalCaddyfile(h.Dispenser) if err != nil { return nil, err } return crh, nil } // UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax: // // copy_response [] [] { // status // } func (h *CopyResponseHandler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume directive name args := d.RemainingArgs() if len(args) == 1 { if num, err := strconv.Atoi(args[0]); err == nil && num > 0 { h.StatusCode = caddyhttp.WeakString(args[0]) return nil } } for d.NextBlock(0) { switch d.Val() { case "status": if !d.NextArg() { return d.ArgErr() } h.StatusCode = caddyhttp.WeakString(d.Val()) default: return d.Errf("unrecognized subdirective '%s'", d.Val()) } } return nil } func parseCopyResponseHeadersCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { crh := new(CopyResponseHeadersHandler) err := crh.UnmarshalCaddyfile(h.Dispenser) if err != nil { return nil, err } return crh, nil } // UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax: // // copy_response_headers [] { // include // exclude // } func (h *CopyResponseHeadersHandler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume directive name args := d.RemainingArgs() if len(args) > 0 { return d.ArgErr() } for d.NextBlock(0) { switch d.Val() { case "include": h.Include = append(h.Include, d.RemainingArgs()...) case "exclude": h.Exclude = append(h.Exclude, d.RemainingArgs()...) default: return d.Errf("unrecognized subdirective '%s'", d.Val()) } } return nil } // UnmarshalCaddyfile deserializes Caddyfile tokens into h. // // dynamic srv [] { // service // proto // name // refresh // resolvers // dial_timeout // dial_fallback_delay // grace_period // } func (u *SRVUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume upstream source name args := d.RemainingArgs() if len(args) > 1 { return d.ArgErr() } if len(args) > 0 { u.Name = args[0] } for d.NextBlock(0) { switch d.Val() { case "service": if !d.NextArg() { return d.ArgErr() } if u.Service != "" { return d.Errf("srv service has already been specified") } u.Service = d.Val() case "proto": if !d.NextArg() { return d.ArgErr() } if u.Proto != "" { return d.Errf("srv proto has already been specified") } u.Proto = d.Val() case "name": if !d.NextArg() { return d.ArgErr() } if u.Name != "" { return d.Errf("srv name has already been specified") } u.Name = d.Val() case "refresh": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("parsing refresh interval duration: %v", err) } u.Refresh = caddy.Duration(dur) case "resolvers": if u.Resolver == nil { u.Resolver = new(UpstreamResolver) } u.Resolver.Addresses = d.RemainingArgs() if len(u.Resolver.Addresses) == 0 { return d.Errf("must specify at least one resolver address") } case "dial_timeout": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad timeout value '%s': %v", d.Val(), err) } u.DialTimeout = caddy.Duration(dur) case "dial_fallback_delay": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad delay value '%s': %v", d.Val(), err) } u.FallbackDelay = caddy.Duration(dur) case "grace_period": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad grace period value '%s': %v", d.Val(), err) } u.GracePeriod = caddy.Duration(dur) default: return d.Errf("unrecognized srv option '%s'", d.Val()) } } return nil } // UnmarshalCaddyfile deserializes Caddyfile tokens into h. // // dynamic a [ // port // refresh // resolvers // dial_timeout // dial_fallback_delay // versions ipv4|ipv6 // } func (u *AUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume upstream source name args := d.RemainingArgs() if len(args) > 2 { return d.ArgErr() } if len(args) > 0 { u.Name = args[0] if len(args) == 2 { u.Port = args[1] } } for d.NextBlock(0) { switch d.Val() { case "name": if !d.NextArg() { return d.ArgErr() } if u.Name != "" { return d.Errf("a name has already been specified") } u.Name = d.Val() case "port": if !d.NextArg() { return d.ArgErr() } if u.Port != "" { return d.Errf("a port has already been specified") } u.Port = d.Val() case "refresh": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("parsing refresh interval duration: %v", err) } u.Refresh = caddy.Duration(dur) case "resolvers": if u.Resolver == nil { u.Resolver = new(UpstreamResolver) } u.Resolver.Addresses = d.RemainingArgs() if len(u.Resolver.Addresses) == 0 { return d.Errf("must specify at least one resolver address") } case "dial_timeout": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad timeout value '%s': %v", d.Val(), err) } u.DialTimeout = caddy.Duration(dur) case "dial_fallback_delay": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad delay value '%s': %v", d.Val(), err) } u.FallbackDelay = caddy.Duration(dur) case "versions": args := d.RemainingArgs() if len(args) == 0 { return d.Errf("must specify at least one version") } if u.Versions == nil { u.Versions = &IPVersions{} } trueBool := true for _, arg := range args { switch arg { case "ipv4": u.Versions.IPv4 = &trueBool case "ipv6": u.Versions.IPv6 = &trueBool default: return d.Errf("unsupported version: '%s'", arg) } } default: return d.Errf("unrecognized a option '%s'", d.Val()) } } return nil } // UnmarshalCaddyfile deserializes Caddyfile tokens into h. // // dynamic multi { // [...] // } func (u *MultiUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume upstream source name if d.NextArg() { return d.ArgErr() } for d.NextBlock(0) { dynModule := d.Val() modID := "http.reverse_proxy.upstreams." + dynModule unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return err } source, ok := unm.(UpstreamSource) if !ok { return d.Errf("module %s (%T) is not an UpstreamSource", modID, unm) } u.SourcesRaw = append(u.SourcesRaw, caddyconfig.JSONModuleObject(source, "source", dynModule, nil)) } return nil } const matcherPrefix = "@" // Interface guards var ( _ caddyfile.Unmarshaler = (*Handler)(nil) _ caddyfile.Unmarshaler = (*HTTPTransport)(nil) _ caddyfile.Unmarshaler = (*SRVUpstreams)(nil) _ caddyfile.Unmarshaler = (*AUpstreams)(nil) _ caddyfile.Unmarshaler = (*MultiUpstreams)(nil) ) ================================================ FILE: modules/caddyhttp/reverseproxy/command.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "encoding/json" "fmt" "net/http" "strconv" "strings" "github.com/spf13/cobra" "go.uber.org/zap" caddycmd "github.com/caddyserver/caddy/v2/cmd" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers" "github.com/caddyserver/caddy/v2/modules/caddytls" ) func init() { caddycmd.RegisterCommand(caddycmd.Command{ Name: "reverse-proxy", Usage: `[--from ] [--to ] [--change-host-header] [--insecure] [--internal-certs] [--disable-redirects] [--header-up "Field: value"] [--header-down "Field: value"] [--access-log] [--debug]`, Short: "A quick and production-ready reverse proxy", Long: ` A simple but production-ready reverse proxy. Useful for quick deployments, demos, and development. Simply shuttles HTTP(S) traffic from the --from address to the --to address. Multiple --to addresses may be specified by repeating the flag. Unless otherwise specified in the addresses, the --from address will be assumed to be HTTPS if a hostname is given, and the --to address will be assumed to be HTTP. If the --from address has a host or IP, Caddy will attempt to serve the proxy over HTTPS with a certificate (unless overridden by the HTTP scheme or port). If serving HTTPS: --disable-redirects can be used to avoid binding to the HTTP port. --internal-certs can be used to force issuance certs using the internal CA instead of attempting to issue a public certificate. For proxying: --header-up can be used to set a request header to send to the upstream. --header-down can be used to set a response header to send back to the client. --change-host-header sets the Host header on the request to the address of the upstream, instead of defaulting to the incoming Host header. This is a shortcut for --header-up "Host: {http.reverse_proxy.upstream.hostport}". --insecure disables TLS verification with the upstream. WARNING: THIS DISABLES SECURITY BY NOT VERIFYING THE UPSTREAM'S CERTIFICATE. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringP("from", "f", "localhost", "Address on which to receive traffic") cmd.Flags().StringSliceP("to", "t", []string{}, "Upstream address(es) to which traffic should be sent") cmd.Flags().BoolP("change-host-header", "c", false, "Set upstream Host header to address of upstream") cmd.Flags().BoolP("insecure", "", false, "Disable TLS verification (WARNING: DISABLES SECURITY BY NOT VERIFYING TLS CERTIFICATES!)") cmd.Flags().BoolP("disable-redirects", "r", false, "Disable HTTP->HTTPS redirects") cmd.Flags().BoolP("internal-certs", "i", false, "Use internal CA for issuing certs") cmd.Flags().StringArrayP("header-up", "H", []string{}, "Set a request header to send to the upstream (format: \"Field: value\")") cmd.Flags().StringArrayP("header-down", "d", []string{}, "Set a response header to send back to the client (format: \"Field: value\")") cmd.Flags().BoolP("access-log", "", false, "Enable the access log") cmd.Flags().BoolP("debug", "v", false, "Enable verbose debug logs") cmd.RunE = caddycmd.WrapCommandFuncForCobra(cmdReverseProxy) }, }) } func cmdReverseProxy(fs caddycmd.Flags) (int, error) { caddy.TrapSignals() from := fs.String("from") changeHost := fs.Bool("change-host-header") insecure := fs.Bool("insecure") disableRedir := fs.Bool("disable-redirects") internalCerts := fs.Bool("internal-certs") accessLog := fs.Bool("access-log") debug := fs.Bool("debug") httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort) httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort) to, err := fs.GetStringSlice("to") if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid to flag: %v", err) } if len(to) == 0 { return caddy.ExitCodeFailedStartup, fmt.Errorf("--to is required") } // set up the downstream address; assume missing information from given parts fromAddr, err := httpcaddyfile.ParseAddress(from) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid downstream address %s: %v", from, err) } if fromAddr.Path != "" { return caddy.ExitCodeFailedStartup, fmt.Errorf("paths are not allowed: %s", from) } if fromAddr.Scheme == "" { if fromAddr.Port == httpPort || fromAddr.Host == "" { fromAddr.Scheme = "http" } else { fromAddr.Scheme = "https" } } if fromAddr.Port == "" { switch fromAddr.Scheme { case "http": fromAddr.Port = httpPort case "https": fromAddr.Port = httpsPort } } // set up the upstream address; assume missing information from given parts // mixing schemes isn't supported, so use first defined (if available) toAddresses := make([]string, len(to)) var toScheme string for i, toLoc := range to { addr, err := parseUpstreamDialAddress(toLoc) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid upstream address %s: %v", toLoc, err) } if addr.scheme != "" && toScheme == "" { toScheme = addr.scheme } toAddresses[i] = addr.dialAddr() } // proceed to build the handler and server ht := HTTPTransport{} if toScheme == "https" { ht.TLS = new(TLSConfig) if insecure { ht.TLS.InsecureSkipVerify = true } } upstreamPool := UpstreamPool{} for _, toAddr := range toAddresses { parsedAddr, err := caddy.ParseNetworkAddress(toAddr) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid upstream address %s: %v", toAddr, err) } if parsedAddr.StartPort == 0 && parsedAddr.EndPort == 0 { // unix networks don't have ports upstreamPool = append(upstreamPool, &Upstream{ Dial: toAddr, }) } else { // expand a port range into multiple upstreams for i := parsedAddr.StartPort; i <= parsedAddr.EndPort; i++ { upstreamPool = append(upstreamPool, &Upstream{ Dial: caddy.JoinNetworkAddress("", parsedAddr.Host, fmt.Sprint(i)), }) } } } handler := Handler{ TransportRaw: caddyconfig.JSONModuleObject(ht, "protocol", "http", nil), Upstreams: upstreamPool, } // set up header_up headerUp, err := fs.GetStringArray("header-up") if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid header flag: %v", err) } if len(headerUp) > 0 { reqHdr := make(http.Header) for i, h := range headerUp { key, val, found := strings.Cut(h, ":") key, val = strings.TrimSpace(key), strings.TrimSpace(val) if !found || key == "" || val == "" { return caddy.ExitCodeFailedStartup, fmt.Errorf("header-up %d: invalid format \"%s\" (expecting \"Field: value\")", i, h) } reqHdr.Set(key, val) } handler.Headers = &headers.Handler{ Request: &headers.HeaderOps{ Set: reqHdr, }, } } // set up header_down headerDown, err := fs.GetStringArray("header-down") if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid header flag: %v", err) } if len(headerDown) > 0 { respHdr := make(http.Header) for i, h := range headerDown { key, val, found := strings.Cut(h, ":") key, val = strings.TrimSpace(key), strings.TrimSpace(val) if !found || key == "" || val == "" { return caddy.ExitCodeFailedStartup, fmt.Errorf("header-down %d: invalid format \"%s\" (expecting \"Field: value\")", i, h) } respHdr.Set(key, val) } if handler.Headers == nil { handler.Headers = &headers.Handler{} } handler.Headers.Response = &headers.RespHeaderOps{ HeaderOps: &headers.HeaderOps{ Set: respHdr, }, } } if changeHost { if handler.Headers == nil { handler.Headers = new(headers.Handler) } if handler.Headers.Request == nil { handler.Headers.Request = new(headers.HeaderOps) } if handler.Headers.Request.Set == nil { handler.Headers.Request.Set = http.Header{} } handler.Headers.Request.Set.Set("Host", "{http.reverse_proxy.upstream.hostport}") } route := caddyhttp.Route{ HandlersRaw: []json.RawMessage{ caddyconfig.JSONModuleObject(handler, "handler", "reverse_proxy", nil), }, } if fromAddr.Host != "" { route.MatcherSetsRaw = []caddy.ModuleMap{ { "host": caddyconfig.JSON(caddyhttp.MatchHost{fromAddr.Host}, nil), }, } } server := &caddyhttp.Server{ Routes: caddyhttp.RouteList{route}, Listen: []string{":" + fromAddr.Port}, } if accessLog { server.Logs = &caddyhttp.ServerLogConfig{} } if fromAddr.Scheme == "http" { server.AutoHTTPS = &caddyhttp.AutoHTTPSConfig{Disabled: true} } else if disableRedir { server.AutoHTTPS = &caddyhttp.AutoHTTPSConfig{DisableRedir: true} } httpApp := caddyhttp.App{ Servers: map[string]*caddyhttp.Server{"proxy": server}, } appsRaw := caddy.ModuleMap{ "http": caddyconfig.JSON(httpApp, nil), } if internalCerts && fromAddr.Host != "" { tlsApp := caddytls.TLS{ Automation: &caddytls.AutomationConfig{ Policies: []*caddytls.AutomationPolicy{{ SubjectsRaw: []string{fromAddr.Host}, IssuersRaw: []json.RawMessage{json.RawMessage(`{"module":"internal"}`)}, }}, }, } appsRaw["tls"] = caddyconfig.JSON(tlsApp, nil) } var false bool cfg := &caddy.Config{ Admin: &caddy.AdminConfig{ Disabled: true, Config: &caddy.ConfigSettings{ Persist: &false, }, }, AppsRaw: appsRaw, } if debug { cfg.Logging = &caddy.Logging{ Logs: map[string]*caddy.CustomLog{ "default": {BaseLog: caddy.BaseLog{Level: zap.DebugLevel.CapitalString()}}, }, } } err = caddy.Run(cfg) if err != nil { return caddy.ExitCodeFailedStartup, err } caddy.Log().Info("caddy proxying", zap.String("from", fromAddr.String()), zap.Strings("to", toAddresses)) if len(toAddresses) > 1 { caddy.Log().Info("using default load balancing policy", zap.String("policy", "random")) } select {} } ================================================ FILE: modules/caddyhttp/reverseproxy/copyresponse.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "fmt" "net/http" "strconv" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(CopyResponseHandler{}) caddy.RegisterModule(CopyResponseHeadersHandler{}) } // CopyResponseHandler is a special HTTP handler which may // only be used within reverse_proxy's handle_response routes, // to copy the proxy response. EXPERIMENTAL, subject to change. type CopyResponseHandler struct { // To write the upstream response's body but with a different // status code, set this field to the desired status code. StatusCode caddyhttp.WeakString `json:"status_code,omitempty"` ctx caddy.Context } // CaddyModule returns the Caddy module information. func (CopyResponseHandler) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.copy_response", New: func() caddy.Module { return new(CopyResponseHandler) }, } } // Provision ensures that h is set up properly before use. func (h *CopyResponseHandler) Provision(ctx caddy.Context) error { h.ctx = ctx return nil } // ServeHTTP implements the Handler interface. func (h CopyResponseHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, _ caddyhttp.Handler) error { repl := req.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) hrc, ok := req.Context().Value(proxyHandleResponseContextCtxKey).(*handleResponseContext) // don't allow this to be used outside of handle_response routes if !ok { return caddyhttp.Error(http.StatusInternalServerError, fmt.Errorf("cannot use 'copy_response' outside of reverse_proxy's handle_response routes")) } // allow a custom status code to be written; otherwise the // status code from the upstream response is written if codeStr := h.StatusCode.String(); codeStr != "" { intVal, err := strconv.Atoi(repl.ReplaceAll(codeStr, "")) if err != nil { return caddyhttp.Error(http.StatusInternalServerError, err) } hrc.response.StatusCode = intVal } // make sure the reverse_proxy handler doesn't try to call // finalizeResponse again after we've already done it here. hrc.isFinalized = true // write the response return hrc.handler.finalizeResponse(rw, req, hrc.response, repl, hrc.start, hrc.logger) } // CopyResponseHeadersHandler is a special HTTP handler which may // only be used within reverse_proxy's handle_response routes, // to copy headers from the proxy response. EXPERIMENTAL; // subject to change. type CopyResponseHeadersHandler struct { // A list of header fields to copy from the response. // Cannot be defined at the same time as Exclude. Include []string `json:"include,omitempty"` // A list of header fields to skip copying from the response. // Cannot be defined at the same time as Include. Exclude []string `json:"exclude,omitempty"` includeMap map[string]struct{} excludeMap map[string]struct{} ctx caddy.Context } // CaddyModule returns the Caddy module information. func (CopyResponseHeadersHandler) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.copy_response_headers", New: func() caddy.Module { return new(CopyResponseHeadersHandler) }, } } // Validate ensures the h's configuration is valid. func (h *CopyResponseHeadersHandler) Validate() error { if len(h.Exclude) > 0 && len(h.Include) > 0 { return fmt.Errorf("cannot define both 'exclude' and 'include' lists at the same time") } return nil } // Provision ensures that h is set up properly before use. func (h *CopyResponseHeadersHandler) Provision(ctx caddy.Context) error { h.ctx = ctx // Optimize the include list by converting it to a map if len(h.Include) > 0 { h.includeMap = map[string]struct{}{} } for _, field := range h.Include { h.includeMap[http.CanonicalHeaderKey(field)] = struct{}{} } // Optimize the exclude list by converting it to a map if len(h.Exclude) > 0 { h.excludeMap = map[string]struct{}{} } for _, field := range h.Exclude { h.excludeMap[http.CanonicalHeaderKey(field)] = struct{}{} } return nil } // ServeHTTP implements the Handler interface. func (h CopyResponseHeadersHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, next caddyhttp.Handler) error { hrc, ok := req.Context().Value(proxyHandleResponseContextCtxKey).(*handleResponseContext) // don't allow this to be used outside of handle_response routes if !ok { return caddyhttp.Error(http.StatusInternalServerError, fmt.Errorf("cannot use 'copy_response_headers' outside of reverse_proxy's handle_response routes")) } for field, values := range hrc.response.Header { // Check the include list first, skip // the header if it's _not_ in this list. if len(h.includeMap) > 0 { if _, ok := h.includeMap[field]; !ok { continue } } // Then, check the exclude list, skip // the header if it _is_ in this list. if len(h.excludeMap) > 0 { if _, ok := h.excludeMap[field]; ok { continue } } // Copy all the values for the header. for _, value := range values { rw.Header().Add(field, value) } } return next.ServeHTTP(rw, req) } // Interface guards var ( _ caddyhttp.MiddlewareHandler = (*CopyResponseHandler)(nil) _ caddyfile.Unmarshaler = (*CopyResponseHandler)(nil) _ caddy.Provisioner = (*CopyResponseHandler)(nil) _ caddyhttp.MiddlewareHandler = (*CopyResponseHeadersHandler)(nil) _ caddyfile.Unmarshaler = (*CopyResponseHeadersHandler)(nil) _ caddy.Provisioner = (*CopyResponseHeadersHandler)(nil) _ caddy.Validator = (*CopyResponseHeadersHandler)(nil) ) ================================================ FILE: modules/caddyhttp/reverseproxy/dynamic_upstreams_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "sync" "testing" "time" "github.com/caddyserver/caddy/v2" ) // resetDynamicHosts clears global dynamic host state between tests. func resetDynamicHosts() { dynamicHostsMu.Lock() dynamicHosts = make(map[string]dynamicHostEntry) dynamicHostsMu.Unlock() // Reset the Once so cleanup goroutine tests can re-trigger if needed. dynamicHostsCleanerOnce = sync.Once{} } // TestFillDynamicHostCreatesEntry verifies that calling fillDynamicHost on a // new address inserts an entry into dynamicHosts and assigns a non-nil Host. func TestFillDynamicHostCreatesEntry(t *testing.T) { resetDynamicHosts() u := &Upstream{Dial: "192.0.2.1:80"} u.fillDynamicHost() if u.Host == nil { t.Fatal("expected Host to be set after fillDynamicHost") } dynamicHostsMu.RLock() entry, ok := dynamicHosts["192.0.2.1:80"] dynamicHostsMu.RUnlock() if !ok { t.Fatal("expected entry in dynamicHosts map") } if entry.host != u.Host { t.Error("dynamicHosts entry host should be the same pointer assigned to Upstream.Host") } if entry.lastSeen.IsZero() { t.Error("expected lastSeen to be set") } } // TestFillDynamicHostReusesSameHost verifies that two calls for the same address // return the exact same *Host pointer so that state (e.g. fail counts) is shared. func TestFillDynamicHostReusesSameHost(t *testing.T) { resetDynamicHosts() u1 := &Upstream{Dial: "192.0.2.2:80"} u1.fillDynamicHost() u2 := &Upstream{Dial: "192.0.2.2:80"} u2.fillDynamicHost() if u1.Host != u2.Host { t.Error("expected both upstreams to share the same *Host pointer") } } // TestFillDynamicHostUpdatesLastSeen verifies that a second call for the same // address advances the lastSeen timestamp. func TestFillDynamicHostUpdatesLastSeen(t *testing.T) { resetDynamicHosts() u := &Upstream{Dial: "192.0.2.3:80"} u.fillDynamicHost() dynamicHostsMu.RLock() first := dynamicHosts["192.0.2.3:80"].lastSeen dynamicHostsMu.RUnlock() // Ensure measurable time passes. time.Sleep(2 * time.Millisecond) u2 := &Upstream{Dial: "192.0.2.3:80"} u2.fillDynamicHost() dynamicHostsMu.RLock() second := dynamicHosts["192.0.2.3:80"].lastSeen dynamicHostsMu.RUnlock() if !second.After(first) { t.Error("expected lastSeen to be updated on second fillDynamicHost call") } } // TestFillDynamicHostIndependentAddresses verifies that different addresses get // independent Host entries. func TestFillDynamicHostIndependentAddresses(t *testing.T) { resetDynamicHosts() u1 := &Upstream{Dial: "192.0.2.4:80"} u1.fillDynamicHost() u2 := &Upstream{Dial: "192.0.2.5:80"} u2.fillDynamicHost() if u1.Host == u2.Host { t.Error("different addresses should have different *Host entries") } } // TestFillDynamicHostPreservesFailCount verifies that fail counts on a dynamic // host survive across multiple fillDynamicHost calls (simulating sequential // requests), which is the core behaviour fixed by this change. func TestFillDynamicHostPreservesFailCount(t *testing.T) { resetDynamicHosts() // First "request": provision and record a failure. u1 := &Upstream{Dial: "192.0.2.6:80"} u1.fillDynamicHost() _ = u1.Host.countFail(1) if u1.Host.Fails() != 1 { t.Fatalf("expected 1 fail, got %d", u1.Host.Fails()) } // Second "request": provision the same address again (new *Upstream, same address). u2 := &Upstream{Dial: "192.0.2.6:80"} u2.fillDynamicHost() if u2.Host.Fails() != 1 { t.Errorf("expected fail count to persist across fillDynamicHost calls, got %d", u2.Host.Fails()) } } // TestProvisionUpstreamDynamic verifies that provisionUpstream with dynamic=true // uses fillDynamicHost (not the UsagePool) and sets healthCheckPolicy / // MaxRequests correctly from handler config. func TestProvisionUpstreamDynamic(t *testing.T) { resetDynamicHosts() passive := &PassiveHealthChecks{ FailDuration: caddy.Duration(10 * time.Second), MaxFails: 3, UnhealthyRequestCount: 5, } h := Handler{ HealthChecks: &HealthChecks{ Passive: passive, }, } u := &Upstream{Dial: "192.0.2.7:80"} h.provisionUpstream(u, true) if u.Host == nil { t.Fatal("Host should be set after provisionUpstream") } if u.healthCheckPolicy != passive { t.Error("healthCheckPolicy should point to the handler's PassiveHealthChecks") } if u.MaxRequests != 5 { t.Errorf("expected MaxRequests=5 from UnhealthyRequestCount, got %d", u.MaxRequests) } // Must be in dynamicHosts, not in the static UsagePool. dynamicHostsMu.RLock() _, inDynamic := dynamicHosts["192.0.2.7:80"] dynamicHostsMu.RUnlock() if !inDynamic { t.Error("dynamic upstream should be stored in dynamicHosts") } _, inPool := hosts.References("192.0.2.7:80") if inPool { t.Error("dynamic upstream should NOT be stored in the static UsagePool") } } // TestProvisionUpstreamStatic verifies that provisionUpstream with dynamic=false // uses the UsagePool and does NOT insert into dynamicHosts. func TestProvisionUpstreamStatic(t *testing.T) { resetDynamicHosts() h := Handler{} u := &Upstream{Dial: "192.0.2.8:80"} h.provisionUpstream(u, false) if u.Host == nil { t.Fatal("Host should be set after provisionUpstream") } refs, inPool := hosts.References("192.0.2.8:80") if !inPool { t.Error("static upstream should be in the UsagePool") } if refs != 1 { t.Errorf("expected ref count 1, got %d", refs) } dynamicHostsMu.RLock() _, inDynamic := dynamicHosts["192.0.2.8:80"] dynamicHostsMu.RUnlock() if inDynamic { t.Error("static upstream should NOT be in dynamicHosts") } // Clean up the pool entry we just added. _, _ = hosts.Delete("192.0.2.8:80") } // TestDynamicHostHealthyConsultsFails verifies the end-to-end passive health // check path: after enough failures are recorded against a dynamic upstream's // shared *Host, Healthy() returns false for a newly provisioned *Upstream with // the same address. func TestDynamicHostHealthyConsultsFails(t *testing.T) { resetDynamicHosts() passive := &PassiveHealthChecks{ FailDuration: caddy.Duration(time.Minute), MaxFails: 2, } h := Handler{ HealthChecks: &HealthChecks{Passive: passive}, } // First request: provision and record two failures. u1 := &Upstream{Dial: "192.0.2.9:80"} h.provisionUpstream(u1, true) _ = u1.Host.countFail(1) _ = u1.Host.countFail(1) // Second request: fresh *Upstream, same address. u2 := &Upstream{Dial: "192.0.2.9:80"} h.provisionUpstream(u2, true) if u2.Healthy() { t.Error("upstream should be unhealthy after MaxFails failures have been recorded against its shared Host") } } // TestDynamicHostCleanupEvictsStaleEntries verifies that the cleanup sweep // removes entries whose lastSeen is older than dynamicHostIdleExpiry. func TestDynamicHostCleanupEvictsStaleEntries(t *testing.T) { resetDynamicHosts() const addr = "192.0.2.10:80" // Insert an entry directly with a lastSeen far in the past. dynamicHostsMu.Lock() dynamicHosts[addr] = dynamicHostEntry{ host: new(Host), lastSeen: time.Now().Add(-2 * dynamicHostIdleExpiry), } dynamicHostsMu.Unlock() // Run the cleanup logic inline (same logic as the goroutine). dynamicHostsMu.Lock() for a, entry := range dynamicHosts { if time.Since(entry.lastSeen) > dynamicHostIdleExpiry { delete(dynamicHosts, a) } } dynamicHostsMu.Unlock() dynamicHostsMu.RLock() _, stillPresent := dynamicHosts[addr] dynamicHostsMu.RUnlock() if stillPresent { t.Error("stale dynamic host entry should have been evicted by cleanup sweep") } } // TestDynamicHostCleanupRetainsFreshEntries verifies that the cleanup sweep // keeps entries whose lastSeen is within dynamicHostIdleExpiry. func TestDynamicHostCleanupRetainsFreshEntries(t *testing.T) { resetDynamicHosts() const addr = "192.0.2.11:80" dynamicHostsMu.Lock() dynamicHosts[addr] = dynamicHostEntry{ host: new(Host), lastSeen: time.Now(), } dynamicHostsMu.Unlock() // Run the cleanup logic inline. dynamicHostsMu.Lock() for a, entry := range dynamicHosts { if time.Since(entry.lastSeen) > dynamicHostIdleExpiry { delete(dynamicHosts, a) } } dynamicHostsMu.Unlock() dynamicHostsMu.RLock() _, stillPresent := dynamicHosts[addr] dynamicHostsMu.RUnlock() if !stillPresent { t.Error("fresh dynamic host entry should be retained by cleanup sweep") } } // TestDynamicHostConcurrentFillHost verifies that concurrent calls to // fillDynamicHost for the same address all get the same *Host pointer and // don't race (run with -race). func TestDynamicHostConcurrentFillHost(t *testing.T) { resetDynamicHosts() const addr = "192.0.2.12:80" const goroutines = 50 var wg sync.WaitGroup hosts := make([]*Host, goroutines) for i := range goroutines { wg.Add(1) go func(idx int) { defer wg.Done() u := &Upstream{Dial: addr} u.fillDynamicHost() hosts[idx] = u.Host }(i) } wg.Wait() first := hosts[0] for i, h := range hosts { if h != first { t.Errorf("goroutine %d got a different *Host pointer; expected all to share the same entry", i) } } } ================================================ FILE: modules/caddyhttp/reverseproxy/fastcgi/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fastcgi import ( "encoding/json" "net/http" "slices" "strconv" "strings" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp/fileserver" "github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy" "github.com/caddyserver/caddy/v2/modules/caddyhttp/rewrite" ) func init() { httpcaddyfile.RegisterDirective("php_fastcgi", parsePHPFastCGI) } // UnmarshalCaddyfile deserializes Caddyfile tokens into h. // // transport fastcgi { // root // split // env // resolve_root_symlink // dial_timeout // read_timeout // write_timeout // capture_stderr // } func (t *Transport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume transport name for d.NextBlock(0) { switch d.Val() { case "root": if !d.NextArg() { return d.ArgErr() } t.Root = d.Val() case "split": t.SplitPath = d.RemainingArgs() if len(t.SplitPath) == 0 { return d.ArgErr() } case "env": args := d.RemainingArgs() if len(args) != 2 { return d.ArgErr() } if t.EnvVars == nil { t.EnvVars = make(map[string]string) } t.EnvVars[args[0]] = args[1] case "resolve_root_symlink": if d.NextArg() { return d.ArgErr() } t.ResolveRootSymlink = true case "dial_timeout": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad timeout value %s: %v", d.Val(), err) } t.DialTimeout = caddy.Duration(dur) case "read_timeout": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad timeout value %s: %v", d.Val(), err) } t.ReadTimeout = caddy.Duration(dur) case "write_timeout": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad timeout value %s: %v", d.Val(), err) } t.WriteTimeout = caddy.Duration(dur) case "capture_stderr": if d.NextArg() { return d.ArgErr() } t.CaptureStderr = true default: return d.Errf("unrecognized subdirective %s", d.Val()) } } return nil } // parsePHPFastCGI parses the php_fastcgi directive, which has the same syntax // as the reverse_proxy directive (in fact, the reverse_proxy's directive // Unmarshaler is invoked by this function) but the resulting proxy is specially // configured for most™️ PHP apps over FastCGI. A line such as this: // // php_fastcgi localhost:7777 // // is equivalent to a route consisting of: // // # Add trailing slash for directory requests // # This redirection is automatically disabled if "{http.request.uri.path}/index.php" // # doesn't appear in the try_files list // @canonicalPath { // file {path}/index.php // not path */ // } // redir @canonicalPath {path}/ 308 // // # If the requested file does not exist, try index files and assume index.php always exists // @indexFiles file { // try_files {path} {path}/index.php index.php // try_policy first_exist_fallback // split_path .php // } // rewrite @indexFiles {http.matchers.file.relative} // // # Proxy PHP files to the FastCGI responder // @phpFiles path *.php // reverse_proxy @phpFiles localhost:7777 { // transport fastcgi { // split .php // } // } // // Thus, this directive produces multiple handlers, each with a different // matcher because multiple consecutive handlers are necessary to support // the common PHP use case. If this "common" config is not compatible // with a user's PHP requirements, they can use a manual approach based // on the example above to configure it precisely as they need. // // If a matcher is specified by the user, for example: // // php_fastcgi /subpath localhost:7777 // // then the resulting handlers are wrapped in a subroute that uses the // user's matcher as a prerequisite to enter the subroute. In other // words, the directive's matcher is necessary, but not sufficient. func parsePHPFastCGI(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) { if !h.Next() { return nil, h.ArgErr() } // set up the transport for FastCGI, and specifically PHP fcgiTransport := Transport{} // set up the set of file extensions allowed to execute PHP code extensions := []string{".php"} // set the default index file for the try_files rewrites indexFile := "index.php" // set up for explicitly overriding try_files var tryFiles []string // if the user specified a matcher token, use that // matcher in a route that wraps both of our routes; // either way, strip the matcher token and pass // the remaining tokens to the unmarshaler so that // we can gain the rest of the reverse_proxy syntax userMatcherSet, err := h.ExtractMatcherSet() if err != nil { return nil, err } // make a new dispenser from the remaining tokens so that we // can reset the dispenser back to this point for the // reverse_proxy unmarshaler to read from it as well dispenser := h.NewFromNextSegment() // read the subdirectives that we allow as overrides to // the php_fastcgi shortcut // NOTE: we delete the tokens as we go so that the reverse_proxy // unmarshal doesn't see these subdirectives which it cannot handle for dispenser.Next() { for dispenser.NextBlock(0) { // ignore any sub-subdirectives that might // have the same name somewhere within // the reverse_proxy passthrough tokens if dispenser.Nesting() != 1 { continue } // parse the php_fastcgi subdirectives switch dispenser.Val() { case "root": if !dispenser.NextArg() { return nil, dispenser.ArgErr() } fcgiTransport.Root = dispenser.Val() dispenser.DeleteN(2) case "split": extensions = dispenser.RemainingArgs() dispenser.DeleteN(len(extensions) + 1) if len(extensions) == 0 { return nil, dispenser.ArgErr() } case "env": args := dispenser.RemainingArgs() dispenser.DeleteN(len(args) + 1) if len(args) != 2 { return nil, dispenser.ArgErr() } if fcgiTransport.EnvVars == nil { fcgiTransport.EnvVars = make(map[string]string) } fcgiTransport.EnvVars[args[0]] = args[1] case "index": args := dispenser.RemainingArgs() dispenser.DeleteN(len(args) + 1) if len(args) != 1 { return nil, dispenser.ArgErr() } indexFile = args[0] case "try_files": args := dispenser.RemainingArgs() dispenser.DeleteN(len(args) + 1) if len(args) < 1 { return nil, dispenser.ArgErr() } tryFiles = args case "resolve_root_symlink": args := dispenser.RemainingArgs() dispenser.DeleteN(len(args) + 1) fcgiTransport.ResolveRootSymlink = true case "dial_timeout": if !dispenser.NextArg() { return nil, dispenser.ArgErr() } dur, err := caddy.ParseDuration(dispenser.Val()) if err != nil { return nil, dispenser.Errf("bad timeout value %s: %v", dispenser.Val(), err) } fcgiTransport.DialTimeout = caddy.Duration(dur) dispenser.DeleteN(2) case "read_timeout": if !dispenser.NextArg() { return nil, dispenser.ArgErr() } dur, err := caddy.ParseDuration(dispenser.Val()) if err != nil { return nil, dispenser.Errf("bad timeout value %s: %v", dispenser.Val(), err) } fcgiTransport.ReadTimeout = caddy.Duration(dur) dispenser.DeleteN(2) case "write_timeout": if !dispenser.NextArg() { return nil, dispenser.ArgErr() } dur, err := caddy.ParseDuration(dispenser.Val()) if err != nil { return nil, dispenser.Errf("bad timeout value %s: %v", dispenser.Val(), err) } fcgiTransport.WriteTimeout = caddy.Duration(dur) dispenser.DeleteN(2) case "capture_stderr": args := dispenser.RemainingArgs() dispenser.DeleteN(len(args) + 1) fcgiTransport.CaptureStderr = true } } } // reset the dispenser after we're done so that the reverse_proxy // unmarshaler can read it from the start dispenser.Reset() // set up a route list that we'll append to routes := caddyhttp.RouteList{} // set the list of allowed path segments on which to split fcgiTransport.SplitPath = extensions // if the index is turned off, we skip the redirect and try_files if indexFile != "off" { var dirRedir bool dirIndex := "{http.request.uri.path}/" + indexFile tryPolicy := "first_exist_fallback" // if tryFiles wasn't overridden, use a reasonable default if len(tryFiles) == 0 { tryFiles = []string{"{http.request.uri.path}", dirIndex, indexFile} dirRedir = true } else { if !strings.HasSuffix(tryFiles[len(tryFiles)-1], ".php") { // use first_exist strategy if the last file is not a PHP file tryPolicy = "" } dirRedir = slices.Contains(tryFiles, dirIndex) } if dirRedir { // route to redirect to canonical path if index PHP file redirMatcherSet := caddy.ModuleMap{ "file": h.JSON(fileserver.MatchFile{ TryFiles: []string{dirIndex}, }), "not": h.JSON(caddyhttp.MatchNot{ MatcherSetsRaw: []caddy.ModuleMap{ { "path": h.JSON(caddyhttp.MatchPath{"*/"}), }, }, }), } redirHandler := caddyhttp.StaticResponse{ StatusCode: caddyhttp.WeakString(strconv.Itoa(http.StatusPermanentRedirect)), Headers: http.Header{"Location": []string{"{http.request.orig_uri.path}/{http.request.orig_uri.prefixed_query}"}}, } redirRoute := caddyhttp.Route{ MatcherSetsRaw: []caddy.ModuleMap{redirMatcherSet}, HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(redirHandler, "handler", "static_response", nil)}, } routes = append(routes, redirRoute) } // route to rewrite to PHP index file rewriteMatcherSet := caddy.ModuleMap{ "file": h.JSON(fileserver.MatchFile{ TryFiles: tryFiles, TryPolicy: tryPolicy, SplitPath: extensions, }), } rewriteHandler := rewrite.Rewrite{ URI: "{http.matchers.file.relative}", } rewriteRoute := caddyhttp.Route{ MatcherSetsRaw: []caddy.ModuleMap{rewriteMatcherSet}, HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(rewriteHandler, "handler", "rewrite", nil)}, } routes = append(routes, rewriteRoute) } // route to actually reverse proxy requests to PHP files; // match only requests that are for PHP files pathList := []string{} for _, ext := range extensions { pathList = append(pathList, "*"+ext) } rpMatcherSet := caddy.ModuleMap{ "path": h.JSON(pathList), } // create the reverse proxy handler which uses our FastCGI transport rpHandler := &reverseproxy.Handler{ TransportRaw: caddyconfig.JSONModuleObject(fcgiTransport, "protocol", "fastcgi", nil), } // the rest of the config is specified by the user // using the reverse_proxy directive syntax dispenser.Next() // consume the directive name err = rpHandler.UnmarshalCaddyfile(dispenser) if err != nil { return nil, err } err = rpHandler.FinalizeUnmarshalCaddyfile(h) if err != nil { return nil, err } // create the final reverse proxy route which is // conditional on matching PHP files rpRoute := caddyhttp.Route{ MatcherSetsRaw: []caddy.ModuleMap{rpMatcherSet}, HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(rpHandler, "handler", "reverse_proxy", nil)}, } subroute := caddyhttp.Subroute{ Routes: append(routes, rpRoute), } // the user's matcher is a prerequisite for ours, so // wrap ours in a subroute and return that if userMatcherSet != nil { return []httpcaddyfile.ConfigValue{ { Class: "route", Value: caddyhttp.Route{ MatcherSetsRaw: []caddy.ModuleMap{userMatcherSet}, HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(subroute, "handler", "subroute", nil)}, }, }, }, nil } // otherwise, return the literal subroute instead of // individual routes, to ensure they stay together and // are treated as a single unit, without necessarily // creating an actual subroute in the output return []httpcaddyfile.ConfigValue{ { Class: "route", Value: subroute, }, }, nil } ================================================ FILE: modules/caddyhttp/reverseproxy/fastcgi/client.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Forked Jan. 2015 from http://bitbucket.org/PinIdea/fcgi_client // (which is forked from https://code.google.com/p/go-fastcgi-client/). // This fork contains several fixes and improvements by Matt Holt and // other contributors to the Caddy project. // Copyright 2012 Junqing Tan and The Go Authors // Use of this source code is governed by a BSD-style // Part of source code is from Go fcgi package package fastcgi import ( "bufio" "bytes" "io" "mime/multipart" "net" "net/http" "net/http/httputil" "net/textproto" "net/url" "os" "path/filepath" "strconv" "strings" "time" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) // FCGIListenSockFileno describes listen socket file number. const FCGIListenSockFileno uint8 = 0 // FCGIHeaderLen describes header length. const FCGIHeaderLen uint8 = 8 // Version1 describes the version. const Version1 uint8 = 1 // FCGINullRequestID describes the null request ID. const FCGINullRequestID uint8 = 0 // FCGIKeepConn describes keep connection mode. const FCGIKeepConn uint8 = 1 const ( // BeginRequest is the begin request flag. BeginRequest uint8 = iota + 1 // AbortRequest is the abort request flag. AbortRequest // EndRequest is the end request flag. EndRequest // Params is the parameters flag. Params // Stdin is the standard input flag. Stdin // Stdout is the standard output flag. Stdout // Stderr is the standard error flag. Stderr // Data is the data flag. Data // GetValues is the get values flag. GetValues // GetValuesResult is the get values result flag. GetValuesResult // UnknownType is the unknown type flag. UnknownType // MaxType is the maximum type flag. MaxType = UnknownType ) const ( // Responder is the responder flag. Responder uint8 = iota + 1 // Authorizer is the authorizer flag. Authorizer // Filter is the filter flag. Filter ) const ( // RequestComplete is the completed request flag. RequestComplete uint8 = iota // CantMultiplexConns is the multiplexed connections flag. CantMultiplexConns // Overloaded is the overloaded flag. Overloaded // UnknownRole is the unknown role flag. UnknownRole ) const ( // MaxConns is the maximum connections flag. MaxConns string = "MAX_CONNS" // MaxRequests is the maximum requests flag. MaxRequests string = "MAX_REQS" // MultiplexConns is the multiplex connections flag. MultiplexConns string = "MPXS_CONNS" ) const ( maxWrite = 65500 // 65530 may work, but for compatibility maxPad = 255 ) // for padding so we don't have to allocate all the time // not synchronized because we don't care what the contents are var pad [maxPad]byte // client implements a FastCGI client, which is a standard for // interfacing external applications with Web servers. type client struct { rwc net.Conn // keepAlive bool // TODO: implement reqID uint16 stderr bool logger *zap.Logger } // Do made the request and returns a io.Reader that translates the data read // from fcgi responder out of fcgi packet before returning it. func (c *client) Do(p map[string]string, req io.Reader) (r io.Reader, err error) { // check for CONTENT_LENGTH, since the lack of it or wrong value will cause the backend to hang if clStr, ok := p["CONTENT_LENGTH"]; !ok { return nil, caddyhttp.Error(http.StatusLengthRequired, nil) } else if _, err := strconv.ParseUint(clStr, 10, 64); err != nil { // stdlib won't return a negative Content-Length, but we check just in case, // the most likely cause is from a missing content length, which is -1 return nil, caddyhttp.Error(http.StatusLengthRequired, err) } writer := &streamWriter{c: c} writer.buf = bufPool.Get().(*bytes.Buffer) writer.buf.Reset() defer bufPool.Put(writer.buf) err = writer.writeBeginRequest(uint16(Responder), 0) if err != nil { return r, err } writer.recType = Params err = writer.writePairs(p) if err != nil { return r, err } writer.recType = Stdin if req != nil { _, err = io.Copy(writer, req) if err != nil { return nil, err } } err = writer.FlushStream() if err != nil { return nil, err } r = &streamReader{c: c} return r, err } // clientCloser is a io.ReadCloser. It wraps a io.Reader with a Closer // that closes the client connection. type clientCloser struct { rwc net.Conn r *streamReader io.Reader status int logger *zap.Logger } func (f clientCloser) Close() error { stderr := f.r.stderr.Bytes() if len(stderr) == 0 { return f.rwc.Close() } logLevel := zapcore.WarnLevel if f.status >= 400 { logLevel = zapcore.ErrorLevel } if c := f.logger.Check(logLevel, "stderr"); c != nil { c.Write(zap.ByteString("body", stderr)) } return f.rwc.Close() } // Request returns a HTTP Response with Header and Body // from fcgi responder func (c *client) Request(p map[string]string, req io.Reader) (resp *http.Response, err error) { r, err := c.Do(p, req) if err != nil { return resp, err } rb := bufio.NewReader(r) tp := textproto.NewReader(rb) resp = new(http.Response) // Parse the response headers. mimeHeader, err := tp.ReadMIMEHeader() if err != nil && err != io.EOF { return resp, err } resp.Header = http.Header(mimeHeader) if resp.Header.Get("Status") != "" { statusNumber, statusInfo, statusIsCut := strings.Cut(resp.Header.Get("Status"), " ") resp.StatusCode, err = strconv.Atoi(statusNumber) if err != nil { return resp, err } if statusIsCut { resp.Status = statusInfo } } else { resp.StatusCode = http.StatusOK } // TODO: fixTransferEncoding ? resp.TransferEncoding = resp.Header["Transfer-Encoding"] resp.ContentLength, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) // wrap the response body in our closer closer := clientCloser{ rwc: c.rwc, r: r.(*streamReader), Reader: rb, status: resp.StatusCode, logger: noopLogger, } if chunked(resp.TransferEncoding) { closer.Reader = httputil.NewChunkedReader(rb) } if c.stderr { closer.logger = c.logger } resp.Body = closer return resp, err } // Get issues a GET request to the fcgi responder. func (c *client) Get(p map[string]string, body io.Reader, l int64) (resp *http.Response, err error) { p["REQUEST_METHOD"] = "GET" p["CONTENT_LENGTH"] = strconv.FormatInt(l, 10) return c.Request(p, body) } // Head issues a HEAD request to the fcgi responder. func (c *client) Head(p map[string]string) (resp *http.Response, err error) { p["REQUEST_METHOD"] = "HEAD" p["CONTENT_LENGTH"] = "0" return c.Request(p, nil) } // Options issues an OPTIONS request to the fcgi responder. func (c *client) Options(p map[string]string) (resp *http.Response, err error) { p["REQUEST_METHOD"] = "OPTIONS" p["CONTENT_LENGTH"] = "0" return c.Request(p, nil) } // Post issues a POST request to the fcgi responder. with request body // in the format that bodyType specified func (c *client) Post(p map[string]string, method string, bodyType string, body io.Reader, l int64) (resp *http.Response, err error) { if p == nil { p = make(map[string]string) } p["REQUEST_METHOD"] = strings.ToUpper(method) if len(p["REQUEST_METHOD"]) == 0 || p["REQUEST_METHOD"] == "GET" { p["REQUEST_METHOD"] = "POST" } p["CONTENT_LENGTH"] = strconv.FormatInt(l, 10) if len(bodyType) > 0 { p["CONTENT_TYPE"] = bodyType } else { p["CONTENT_TYPE"] = "application/x-www-form-urlencoded" } return c.Request(p, body) } // PostForm issues a POST to the fcgi responder, with form // as a string key to a list values (url.Values) func (c *client) PostForm(p map[string]string, data url.Values) (resp *http.Response, err error) { body := bytes.NewReader([]byte(data.Encode())) return c.Post(p, "POST", "application/x-www-form-urlencoded", body, int64(body.Len())) } // PostFile issues a POST to the fcgi responder in multipart(RFC 2046) standard, // with form as a string key to a list values (url.Values), // and/or with file as a string key to a list file path. func (c *client) PostFile(p map[string]string, data url.Values, file map[string]string) (resp *http.Response, err error) { buf := &bytes.Buffer{} writer := multipart.NewWriter(buf) bodyType := writer.FormDataContentType() for key, val := range data { for _, v0 := range val { err = writer.WriteField(key, v0) if err != nil { return resp, err } } } for key, val := range file { fd, e := os.Open(val) if e != nil { return nil, e } defer fd.Close() part, e := writer.CreateFormFile(key, filepath.Base(val)) if e != nil { return nil, e } _, err = io.Copy(part, fd) if err != nil { return resp, err } } err = writer.Close() if err != nil { return resp, err } return c.Post(p, "POST", bodyType, buf, int64(buf.Len())) } // SetReadTimeout sets the read timeout for future calls that read from the // fcgi responder. A zero value for t means no timeout will be set. func (c *client) SetReadTimeout(t time.Duration) error { if t != 0 { return c.rwc.SetReadDeadline(time.Now().Add(t)) } return nil } // SetWriteTimeout sets the write timeout for future calls that send data to // the fcgi responder. A zero value for t means no timeout will be set. func (c *client) SetWriteTimeout(t time.Duration) error { if t != 0 { return c.rwc.SetWriteDeadline(time.Now().Add(t)) } return nil } // Checks whether chunked is part of the encodings stack func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" } ================================================ FILE: modules/caddyhttp/reverseproxy/fastcgi/client_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // NOTE: These tests were adapted from the original // repository from which this package was forked. // The tests are slow (~10s) and in dire need of rewriting. // As such, the tests have been disabled to speed up // automated builds until they can be properly written. package fastcgi import ( "bytes" "crypto/md5" "encoding/binary" "fmt" "io" "log" "math/rand/v2" "net" "net/http" "net/http/fcgi" "net/url" "os" "path/filepath" "strconv" "strings" "testing" "time" ) // test fcgi protocol includes: // Get, Post, Post in multipart/form-data, and Post with files // each key should be the md5 of the value or the file uploaded // specify remote fcgi responder ip:port to test with php // test failed if the remote fcgi(script) failed md5 verification // and output "FAILED" in response const ( scriptFile = "/tank/www/fcgic_test.php" // ipPort = "remote-php-serv:59000" ipPort = "127.0.0.1:59000" ) var globalt *testing.T type FastCGIServer struct{} func (s FastCGIServer) ServeHTTP(resp http.ResponseWriter, req *http.Request) { if err := req.ParseMultipartForm(100000000); err != nil { log.Printf("[ERROR] failed to parse: %v", err) } stat := "PASSED" fmt.Fprintln(resp, "-") fileNum := 0 { length := 0 for k0, v0 := range req.Form { h := md5.New() _, _ = io.WriteString(h, v0[0]) _md5 := fmt.Sprintf("%x", h.Sum(nil)) length += len(k0) length += len(v0[0]) // echo error when key != _md5(val) if _md5 != k0 { fmt.Fprintln(resp, "server:err ", _md5, k0) stat = "FAILED" } } if req.MultipartForm != nil { fileNum = len(req.MultipartForm.File) for kn, fns := range req.MultipartForm.File { // fmt.Fprintln(resp, "server:filekey ", kn ) length += len(kn) for _, f := range fns { fd, err := f.Open() if err != nil { log.Println("server:", err) return } h := md5.New() l0, err := io.Copy(h, fd) if err != nil { log.Println(err) return } length += int(l0) defer fd.Close() md5 := fmt.Sprintf("%x", h.Sum(nil)) // fmt.Fprintln(resp, "server:filemd5 ", md5 ) if kn != md5 { fmt.Fprintln(resp, "server:err ", md5, kn) stat = "FAILED" } // fmt.Fprintln(resp, "server:filename ", f.Filename ) } } } fmt.Fprintln(resp, "server:got data length", length) } fmt.Fprintln(resp, "-"+stat+"-POST(", len(req.Form), ")-FILE(", fileNum, ")--") } func sendFcgi(reqType int, fcgiParams map[string]string, data []byte, posts map[string]string, files map[string]string) (content []byte) { conn, err := net.Dial("tcp", ipPort) if err != nil { log.Println("err:", err) return content } fcgi := client{rwc: conn, reqID: 1} length := 0 var resp *http.Response switch reqType { case 0: if len(data) > 0 { length = len(data) rd := bytes.NewReader(data) resp, err = fcgi.Post(fcgiParams, "", "", rd, int64(rd.Len())) } else if len(posts) > 0 { values := url.Values{} for k, v := range posts { values.Set(k, v) length += len(k) + 2 + len(v) } resp, err = fcgi.PostForm(fcgiParams, values) } else { rd := bytes.NewReader(data) resp, err = fcgi.Get(fcgiParams, rd, int64(rd.Len())) } default: values := url.Values{} for k, v := range posts { values.Set(k, v) length += len(k) + 2 + len(v) } for k, v := range files { fi, _ := os.Lstat(v) length += len(k) + int(fi.Size()) } resp, err = fcgi.PostFile(fcgiParams, values, files) } if err != nil { log.Println("err:", err) return content } defer resp.Body.Close() content, _ = io.ReadAll(resp.Body) log.Println("c: send data length ≈", length, string(content)) conn.Close() time.Sleep(250 * time.Millisecond) if bytes.Contains(content, []byte("FAILED")) { globalt.Error("Server return failed message") } return content } func generateRandFile(size int) (p string, m string) { p = filepath.Join(os.TempDir(), "fcgict"+strconv.Itoa(rand.Int())) // open output file fo, err := os.Create(p) if err != nil { panic(err) } // close fo on exit and check for its returned error defer func() { if err := fo.Close(); err != nil { panic(err) } }() h := md5.New() for i := 0; i < size/16; i++ { buf := make([]byte, 16) binary.PutVarint(buf, rand.Int64()) if _, err := fo.Write(buf); err != nil { log.Printf("[ERROR] failed to write buffer: %v\n", err) } if _, err := h.Write(buf); err != nil { log.Printf("[ERROR] failed to write buffer: %v\n", err) } } m = fmt.Sprintf("%x", h.Sum(nil)) return p, m } func DisabledTest(t *testing.T) { // TODO: test chunked reader globalt = t // server go func() { listener, err := net.Listen("tcp", ipPort) if err != nil { log.Println("listener creation failed: ", err) } srv := new(FastCGIServer) if err := fcgi.Serve(listener, srv); err != nil { log.Print("[ERROR] failed to start server: ", err) } }() time.Sleep(250 * time.Millisecond) // init fcgiParams := make(map[string]string) fcgiParams["REQUEST_METHOD"] = "GET" fcgiParams["SERVER_PROTOCOL"] = "HTTP/1.1" // fcgi_params["GATEWAY_INTERFACE"] = "CGI/1.1" fcgiParams["SCRIPT_FILENAME"] = scriptFile // simple GET log.Println("test:", "get") sendFcgi(0, fcgiParams, nil, nil, nil) // simple post data log.Println("test:", "post") sendFcgi(0, fcgiParams, []byte("c4ca4238a0b923820dcc509a6f75849b=1&7b8b965ad4bca0e41ab51de7b31363a1=n"), nil, nil) log.Println("test:", "post data (more than 60KB)") data := "" for i := 0x00; i < 0xff; i++ { v0 := strings.Repeat(fmt.Sprint(i), 256) h := md5.New() _, _ = io.WriteString(h, v0) k0 := fmt.Sprintf("%x", h.Sum(nil)) data += k0 + "=" + url.QueryEscape(v0) + "&" } sendFcgi(0, fcgiParams, []byte(data), nil, nil) log.Println("test:", "post form (use url.Values)") p0 := make(map[string]string, 1) p0["c4ca4238a0b923820dcc509a6f75849b"] = "1" p0["7b8b965ad4bca0e41ab51de7b31363a1"] = "n" sendFcgi(1, fcgiParams, nil, p0, nil) log.Println("test:", "post forms (256 keys, more than 1MB)") p1 := make(map[string]string, 1) for i := 0x00; i < 0xff; i++ { v0 := strings.Repeat(fmt.Sprint(i), 4096) h := md5.New() _, _ = io.WriteString(h, v0) k0 := fmt.Sprintf("%x", h.Sum(nil)) p1[k0] = v0 } sendFcgi(1, fcgiParams, nil, p1, nil) log.Println("test:", "post file (1 file, 500KB)) ") f0 := make(map[string]string, 1) path0, m0 := generateRandFile(500000) f0[m0] = path0 sendFcgi(1, fcgiParams, nil, p1, f0) log.Println("test:", "post multiple files (2 files, 5M each) and forms (256 keys, more than 1MB data") path1, m1 := generateRandFile(5000000) f0[m1] = path1 sendFcgi(1, fcgiParams, nil, p1, f0) log.Println("test:", "post only files (2 files, 5M each)") sendFcgi(1, fcgiParams, nil, nil, f0) log.Println("test:", "post only 1 file") delete(f0, "m0") sendFcgi(1, fcgiParams, nil, nil, f0) if err := os.Remove(path0); err != nil { log.Println("[ERROR] failed to remove path: ", err) } if err := os.Remove(path1); err != nil { log.Println("[ERROR] failed to remove path: ", err) } } ================================================ FILE: modules/caddyhttp/reverseproxy/fastcgi/fastcgi.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fastcgi import ( "crypto/tls" "errors" "fmt" "net" "net/http" "path/filepath" "strconv" "strings" "time" "unicode/utf8" "go.uber.org/zap" "go.uber.org/zap/zapcore" "golang.org/x/text/language" "golang.org/x/text/search" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy" "github.com/caddyserver/caddy/v2/modules/caddytls" ) var ( ErrInvalidSplitPath = errors.New("split path contains non-ASCII characters") noopLogger = zap.NewNop() ) func init() { caddy.RegisterModule(Transport{}) } // Transport facilitates FastCGI communication. type Transport struct { // Use this directory as the fastcgi root directory. Defaults to the root // directory of the parent virtual host. Root string `json:"root,omitempty"` // The path in the URL will be split into two, with the first piece ending // with the value of SplitPath. The first piece will be assumed as the // actual resource (CGI script) name, and the second piece will be set to // PATH_INFO for the CGI script to use. // // Split paths can only contain ASCII characters. // Comparison is case-insensitive. // // Future enhancements should be careful to avoid CVE-2019-11043, // which can be mitigated with use of a try_files-like behavior // that 404s if the fastcgi path info is not found. SplitPath []string `json:"split_path,omitempty"` // Path declared as root directory will be resolved to its absolute value // after the evaluation of any symbolic links. // Due to the nature of PHP opcache, root directory path is cached: when // using a symlinked directory as root this could generate errors when // symlink is changed without php-fpm being restarted; enabling this // directive will set $_SERVER['DOCUMENT_ROOT'] to the real directory path. ResolveRootSymlink bool `json:"resolve_root_symlink,omitempty"` // Extra environment variables. EnvVars map[string]string `json:"env,omitempty"` // The duration used to set a deadline when connecting to an upstream. Default: `3s`. DialTimeout caddy.Duration `json:"dial_timeout,omitempty"` // The duration used to set a deadline when reading from the FastCGI server. ReadTimeout caddy.Duration `json:"read_timeout,omitempty"` // The duration used to set a deadline when sending to the FastCGI server. WriteTimeout caddy.Duration `json:"write_timeout,omitempty"` // Capture and log any messages sent by the upstream on stderr. Logs at WARN // level by default. If the response has a 4xx or 5xx status ERROR level will // be used instead. CaptureStderr bool `json:"capture_stderr,omitempty"` serverSoftware string logger *zap.Logger } // CaddyModule returns the Caddy module information. func (Transport) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.transport.fastcgi", New: func() caddy.Module { return new(Transport) }, } } // Provision sets up t. func (t *Transport) Provision(ctx caddy.Context) error { t.logger = ctx.Logger() if t.Root == "" { t.Root = "{http.vars.root}" } version, _ := caddy.Version() t.serverSoftware = "Caddy/" + version // Set a relatively short default dial timeout. // This is helpful to make load-balancer retries more speedy. if t.DialTimeout == 0 { t.DialTimeout = caddy.Duration(3 * time.Second) } var b strings.Builder for i, split := range t.SplitPath { b.Grow(len(split)) for j := 0; j < len(split); j++ { c := split[j] if c >= utf8.RuneSelf { return ErrInvalidSplitPath } if 'A' <= c && c <= 'Z' { b.WriteByte(c + 'a' - 'A') } else { b.WriteByte(c) } } t.SplitPath[i] = b.String() b.Reset() } return nil } // DefaultBufferSizes enables request buffering for fastcgi if not configured. // This is because most fastcgi servers are php-fpm that require the content length to be set to read the body, golang // std has fastcgi implementation that doesn't need this value to process the body, but we can safely assume that's // not used. // http3 requests have a negative content length for GET and HEAD requests, if that header is not sent. // see: https://github.com/caddyserver/caddy/issues/6678#issuecomment-2472224182 // Though it appears even if CONTENT_LENGTH is invalid, php-fpm can handle just fine if the body is empty (no Stdin records sent). // php-fpm will hang if there is any data in the body though, https://github.com/caddyserver/caddy/issues/5420#issuecomment-2415943516 // TODO: better default buffering for fastcgi requests without content length, in theory a value of 1 should be enough, make it bigger anyway func (t Transport) DefaultBufferSizes() (int64, int64) { return 4096, 0 } // RoundTrip implements http.RoundTripper. func (t Transport) RoundTrip(r *http.Request) (*http.Response, error) { server := r.Context().Value(caddyhttp.ServerCtxKey).(*caddyhttp.Server) // Disallow null bytes in the request path, because // PHP upstreams may do bad things, like execute a // non-PHP file as PHP code. See #4574 if strings.Contains(r.URL.Path, "\x00") { return nil, caddyhttp.Error(http.StatusBadRequest, fmt.Errorf("invalid request path")) } env, err := t.buildEnv(r) if err != nil { return nil, fmt.Errorf("building environment: %v", err) } ctx := r.Context() // extract dial information from request (should have been embedded by the reverse proxy) network, address := "tcp", r.URL.Host if dialInfo, ok := reverseproxy.GetDialInfo(ctx); ok { network = dialInfo.Network address = dialInfo.Address } logCreds := server.Logs != nil && server.Logs.ShouldLogCredentials loggableReq := caddyhttp.LoggableHTTPRequest{ Request: r, ShouldLogCredentials: logCreds, } loggableEnv := loggableEnv{vars: env, logCredentials: logCreds} logger := t.logger.With( zap.Object("request", loggableReq), zap.Object("env", loggableEnv), ) if c := t.logger.Check(zapcore.DebugLevel, "roundtrip"); c != nil { c.Write( zap.String("dial", address), zap.Object("env", loggableEnv), zap.Object("request", loggableReq), ) } // connect to the backend dialer := net.Dialer{Timeout: time.Duration(t.DialTimeout)} conn, err := dialer.DialContext(ctx, network, address) if err != nil { return nil, fmt.Errorf("dialing backend: %v", err) } defer func() { // conn will be closed with the response body unless there's an error if err != nil { conn.Close() } }() // create the client that will facilitate the protocol client := client{ rwc: conn, reqID: 1, logger: logger, stderr: t.CaptureStderr, } // read/write timeouts if err = client.SetReadTimeout(time.Duration(t.ReadTimeout)); err != nil { return nil, fmt.Errorf("setting read timeout: %v", err) } if err = client.SetWriteTimeout(time.Duration(t.WriteTimeout)); err != nil { return nil, fmt.Errorf("setting write timeout: %v", err) } contentLength := r.ContentLength if contentLength == 0 { contentLength, _ = strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64) } var resp *http.Response switch r.Method { case http.MethodHead: resp, err = client.Head(env) case http.MethodGet: resp, err = client.Get(env, r.Body, contentLength) case http.MethodOptions: resp, err = client.Options(env) default: resp, err = client.Post(env, r.Method, r.Header.Get("Content-Type"), r.Body, contentLength) } if err != nil { return nil, err } return resp, nil } // buildEnv returns a set of CGI environment variables for the request. func (t Transport) buildEnv(r *http.Request) (envVars, error) { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) var env envVars // Separate remote IP and port; more lenient than net.SplitHostPort var ip, port string if idx := strings.LastIndex(r.RemoteAddr, ":"); idx > -1 { ip = r.RemoteAddr[:idx] port = r.RemoteAddr[idx+1:] } else { ip = r.RemoteAddr } // Remove [] from IPv6 addresses ip = strings.Replace(ip, "[", "", 1) ip = strings.Replace(ip, "]", "", 1) // make sure file root is absolute root, err := caddy.FastAbs(repl.ReplaceAll(t.Root, ".")) if err != nil { return nil, err } if t.ResolveRootSymlink { root, err = filepath.EvalSymlinks(root) if err != nil { return nil, err } } fpath := r.URL.Path scriptName := fpath docURI := fpath // split "actual path" from "path info" if configured var pathInfo string if splitPos := t.splitPos(fpath); splitPos > -1 { docURI = fpath[:splitPos] pathInfo = fpath[splitPos:] // Strip PATH_INFO from SCRIPT_NAME scriptName = strings.TrimSuffix(scriptName, pathInfo) } // Try to grab the path remainder from a file matcher // if we didn't get a split result here. // See https://github.com/caddyserver/caddy/issues/3718 if pathInfo == "" { pathInfo, _ = repl.GetString("http.matchers.file.remainder") } // SCRIPT_FILENAME is the absolute path of SCRIPT_NAME scriptFilename := caddyhttp.SanitizedPathJoin(root, scriptName) // Ensure the SCRIPT_NAME has a leading slash for compliance with RFC3875 // Info: https://tools.ietf.org/html/rfc3875#section-4.1.13 if scriptName != "" && !strings.HasPrefix(scriptName, "/") { scriptName = "/" + scriptName } // Get the request URL from context. The context stores the original URL in case // it was changed by a middleware such as rewrite. By default, we pass the // original URI in as the value of REQUEST_URI (the user can overwrite this // if desired). Most PHP apps seem to want the original URI. Besides, this is // how nginx defaults: http://stackoverflow.com/a/12485156/1048862 origReq := r.Context().Value(caddyhttp.OriginalRequestCtxKey).(http.Request) requestScheme := "http" if r.TLS != nil { requestScheme = "https" } reqHost, reqPort, err := net.SplitHostPort(r.Host) if err != nil { // whatever, just assume there was no port reqHost = r.Host } authUser, _ := repl.GetString("http.auth.user.id") // Some variables are unused but cleared explicitly to prevent // the parent environment from interfering. env = envVars{ // Variables defined in CGI 1.1 spec "AUTH_TYPE": "", // Not used "CONTENT_LENGTH": r.Header.Get("Content-Length"), "CONTENT_TYPE": r.Header.Get("Content-Type"), "GATEWAY_INTERFACE": "CGI/1.1", "PATH_INFO": pathInfo, "QUERY_STRING": r.URL.RawQuery, "REMOTE_ADDR": ip, "REMOTE_HOST": ip, // For speed, remote host lookups disabled "REMOTE_PORT": port, "REMOTE_IDENT": "", // Not used "REMOTE_USER": authUser, "REQUEST_METHOD": r.Method, "REQUEST_SCHEME": requestScheme, "SERVER_NAME": reqHost, "SERVER_PROTOCOL": r.Proto, "SERVER_SOFTWARE": t.serverSoftware, // Other variables "DOCUMENT_ROOT": root, "DOCUMENT_URI": docURI, "HTTP_HOST": r.Host, // added here, since not always part of headers "REQUEST_URI": origReq.URL.RequestURI(), "SCRIPT_FILENAME": scriptFilename, "SCRIPT_NAME": scriptName, } // compliance with the CGI specification requires that // PATH_TRANSLATED should only exist if PATH_INFO is defined. // Info: https://www.ietf.org/rfc/rfc3875 Page 14 if env["PATH_INFO"] != "" { env["PATH_TRANSLATED"] = caddyhttp.SanitizedPathJoin(root, pathInfo) // Info: http://www.oreilly.com/openbook/cgi/ch02_04.html } // compliance with the CGI specification requires that // the SERVER_PORT variable MUST be set to the TCP/IP port number on which this request is received from the client // even if the port is the default port for the scheme and could otherwise be omitted from a URI. // https://tools.ietf.org/html/rfc3875#section-4.1.15 if reqPort != "" { env["SERVER_PORT"] = reqPort } else if requestScheme == "http" { env["SERVER_PORT"] = "80" } else if requestScheme == "https" { env["SERVER_PORT"] = "443" } // Some web apps rely on knowing HTTPS or not if r.TLS != nil { env["HTTPS"] = "on" // and pass the protocol details in a manner compatible with apache's mod_ssl // (which is why these have a SSL_ prefix and not TLS_). v, ok := tlsProtocolStrings[r.TLS.Version] if ok { env["SSL_PROTOCOL"] = v } // and pass the cipher suite in a manner compatible with apache's mod_ssl for _, cs := range caddytls.SupportedCipherSuites() { if cs.ID == r.TLS.CipherSuite { env["SSL_CIPHER"] = cs.Name break } } } // Add env variables from config (with support for placeholders in values) for key, value := range t.EnvVars { env[key] = repl.ReplaceAll(value, "") } // Add all HTTP headers to env variables for field, val := range r.Header { header := strings.ToUpper(field) header = headerNameReplacer.Replace(header) env["HTTP_"+header] = strings.Join(val, ", ") } return env, nil } var splitSearchNonASCII = search.New(language.Und, search.IgnoreCase) // splitPos returns the index where path should // be split based on t.SplitPath. // // example: if splitPath is [".php"] // "/path/to/script.php/some/path": ("/path/to/script.php", "/some/path") // // Adapted from FrankenPHP's code (copyright 2026 Kévin Dunglas, MIT license) func (t Transport) splitPos(path string) int { // TODO: from v1... // if httpserver.CaseSensitivePath { // return strings.Index(path, r.SplitPath) // } if len(t.SplitPath) == 0 { return 0 } pathLen := len(path) // We are sure that split strings are all ASCII-only and lower-case because of validation and normalization in Provision(). for _, split := range t.SplitPath { splitLen := len(split) for i := range pathLen { if path[i] >= utf8.RuneSelf { if _, end := splitSearchNonASCII.IndexString(path, split); end > -1 { return end } break } if i+splitLen > pathLen { continue } match := true for j := range splitLen { c := path[i+j] if c >= utf8.RuneSelf { if _, end := splitSearchNonASCII.IndexString(path, split); end > -1 { return end } break } if 'A' <= c && c <= 'Z' { c += 'a' - 'A' } if c != split[j] { match = false break } } if match { return i + splitLen } } } return -1 } type envVars map[string]string // loggableEnv is a simple type to allow for speeding up zap log encoding. type loggableEnv struct { vars envVars logCredentials bool } func (env loggableEnv) MarshalLogObject(enc zapcore.ObjectEncoder) error { for k, v := range env.vars { if !env.logCredentials { switch strings.ToLower(k) { case "http_cookie", "http_set_cookie", "http_authorization", "http_proxy_authorization": v = "" } } enc.AddString(k, v) } return nil } // Map of supported protocols to Apache ssl_mod format // Note that these are slightly different from SupportedProtocols in caddytls/config.go var tlsProtocolStrings = map[uint16]string{ tls.VersionTLS10: "TLSv1", tls.VersionTLS11: "TLSv1.1", tls.VersionTLS12: "TLSv1.2", tls.VersionTLS13: "TLSv1.3", } var headerNameReplacer = strings.NewReplacer(" ", "_", "-", "_") // Interface guards var ( _ zapcore.ObjectMarshaler = (*loggableEnv)(nil) _ caddy.Provisioner = (*Transport)(nil) _ http.RoundTripper = (*Transport)(nil) _ reverseproxy.BufferedTransport = (*Transport)(nil) ) ================================================ FILE: modules/caddyhttp/reverseproxy/fastcgi/fastcgi_test.go ================================================ package fastcgi import ( "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/caddyserver/caddy/v2" ) func TestProvisionSplitPath(t *testing.T) { tests := []struct { name string splitPath []string wantErr error wantSplitPath []string }{ { name: "valid lowercase split path", splitPath: []string{".php"}, wantErr: nil, wantSplitPath: []string{".php"}, }, { name: "valid uppercase split path normalized", splitPath: []string{".PHP"}, wantErr: nil, wantSplitPath: []string{".php"}, }, { name: "valid mixed case split path normalized", splitPath: []string{".PhP", ".PHTML"}, wantErr: nil, wantSplitPath: []string{".php", ".phtml"}, }, { name: "empty split path", splitPath: []string{}, wantErr: nil, wantSplitPath: []string{}, }, { name: "non-ASCII character in split path rejected", splitPath: []string{".php", ".Ⱥphp"}, wantErr: ErrInvalidSplitPath, }, { name: "unicode character in split path rejected", splitPath: []string{".phpⱥ"}, wantErr: ErrInvalidSplitPath, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tr := Transport{SplitPath: tt.splitPath} err := tr.Provision(caddy.Context{}) if tt.wantErr != nil { require.ErrorIs(t, err, tt.wantErr) return } require.NoError(t, err) assert.Equal(t, tt.wantSplitPath, tr.SplitPath) }) } } func TestSplitPos(t *testing.T) { tests := []struct { name string path string splitPath []string wantPos int }{ { name: "simple php extension", path: "/path/to/script.php", splitPath: []string{".php"}, wantPos: 19, }, { name: "php extension with path info", path: "/path/to/script.php/some/path", splitPath: []string{".php"}, wantPos: 19, }, { name: "case insensitive match", path: "/path/to/script.PHP", splitPath: []string{".php"}, wantPos: 19, }, { name: "mixed case match", path: "/path/to/script.PhP/info", splitPath: []string{".php"}, wantPos: 19, }, { name: "no match", path: "/path/to/script.txt", splitPath: []string{".php"}, wantPos: -1, }, { name: "empty split path", path: "/path/to/script.php", splitPath: []string{}, wantPos: 0, }, { name: "multiple split paths first match", path: "/path/to/script.php", splitPath: []string{".php", ".phtml"}, wantPos: 19, }, { name: "multiple split paths second match", path: "/path/to/script.phtml", splitPath: []string{".php", ".phtml"}, wantPos: 21, }, // Unicode case-folding tests (security fix for GHSA-g966-83w7-6w38) // U+023A (Ⱥ) lowercases to U+2C65 (ⱥ), which has different UTF-8 byte length // Ⱥ: 2 bytes (C8 BA), ⱥ: 3 bytes (E2 B1 A5) { name: "unicode path with case-folding length expansion", path: "/ȺȺȺȺshell.php", splitPath: []string{".php"}, wantPos: 18, // correct position in original string }, { name: "unicode path with extension after expansion chars", path: "/ȺȺȺȺshell.php/path/info", splitPath: []string{".php"}, wantPos: 18, }, { name: "unicode in filename with multiple php occurrences", path: "/ȺȺȺȺshell.php.txt.php", splitPath: []string{".php"}, wantPos: 18, // should match first .php, not be confused by byte offset shift }, { name: "unicode case insensitive extension", path: "/ȺȺȺȺshell.PHP", splitPath: []string{".php"}, wantPos: 18, }, { name: "unicode in middle of path", path: "/path/Ⱥtest/script.php", splitPath: []string{".php"}, wantPos: 23, // Ⱥ is 2 bytes, so path is 23 bytes total, .php ends at byte 23 }, { name: "unicode only in directory not filename", path: "/Ⱥ/script.php", splitPath: []string{".php"}, wantPos: 14, }, // Additional Unicode characters that expand when lowercased // U+0130 (İ - Turkish capital I with dot) lowercases to U+0069 + U+0307 { name: "turkish capital I with dot", path: "/İtest.php", splitPath: []string{".php"}, wantPos: 11, }, // Ensure standard ASCII still works correctly { name: "ascii only path with case variation", path: "/PATH/TO/SCRIPT.PHP/INFO", splitPath: []string{".php"}, wantPos: 19, }, { name: "path at root", path: "/index.php", splitPath: []string{".php"}, wantPos: 10, }, { name: "extension in middle of filename", path: "/test.php.bak", splitPath: []string{".php"}, wantPos: 9, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { gotPos := Transport{SplitPath: tt.splitPath}.splitPos(tt.path) assert.Equal(t, tt.wantPos, gotPos, "splitPos(%q, %v)", tt.path, tt.splitPath) // Verify that the split produces valid substrings if gotPos > 0 && gotPos <= len(tt.path) { scriptName := tt.path[:gotPos] pathInfo := tt.path[gotPos:] // The script name should end with one of the split extensions (case-insensitive) hasValidEnding := false for _, split := range tt.splitPath { if strings.HasSuffix(strings.ToLower(scriptName), split) { hasValidEnding = true break } } assert.True(t, hasValidEnding, "script name %q should end with one of %v", scriptName, tt.splitPath) // Original path should be reconstructable assert.Equal(t, tt.path, scriptName+pathInfo, "path should be reconstructable from split parts") } }) } } // TestSplitPosUnicodeSecurityRegression specifically tests the vulnerability // described in GHSA-g966-83w7-6w38 where Unicode case-folding caused // incorrect SCRIPT_NAME/PATH_INFO splitting func TestSplitPosUnicodeSecurityRegression(t *testing.T) { // U+023A: Ⱥ (UTF-8: C8 BA). Lowercase is ⱥ (UTF-8: E2 B1 A5), longer in bytes. path := "/ȺȺȺȺshell.php.txt.php" split := []string{".php"} pos := Transport{SplitPath: split}.splitPos(path) // The vulnerable code would return 22 (computed on lowercased string) // The correct code should return 18 (position in original string) expectedPos := strings.Index(path, ".php") + len(".php") assert.Equal(t, expectedPos, pos, "split position should match first .php in original string") assert.Equal(t, 18, pos, "split position should be 18, not 22") if pos > 0 && pos <= len(path) { scriptName := path[:pos] pathInfo := path[pos:] assert.Equal(t, "/ȺȺȺȺshell.php", scriptName, "script name should be the path up to first .php") assert.Equal(t, ".txt.php", pathInfo, "path info should be the remainder after first .php") } } ================================================ FILE: modules/caddyhttp/reverseproxy/fastcgi/header.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fastcgi type header struct { Version uint8 Type uint8 ID uint16 ContentLength uint16 PaddingLength uint8 Reserved uint8 } func (h *header) init(recType uint8, reqID uint16, contentLength int) { h.Version = 1 h.Type = recType h.ID = reqID h.ContentLength = uint16(contentLength) h.PaddingLength = uint8(-contentLength & 7) } ================================================ FILE: modules/caddyhttp/reverseproxy/fastcgi/pool.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fastcgi import ( "bytes" "sync" ) var bufPool = sync.Pool{ New: func() any { return new(bytes.Buffer) }, } ================================================ FILE: modules/caddyhttp/reverseproxy/fastcgi/reader.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fastcgi import ( "bytes" "io" ) type streamReader struct { c *client rec record stderr bytes.Buffer } func (w *streamReader) Read(p []byte) (n int, err error) { for !w.rec.hasMore() { err = w.rec.fill(w.c.rwc) if err != nil { return 0, err } // standard error output if w.rec.h.Type == Stderr { if _, err = io.Copy(&w.stderr, &w.rec); err != nil { return 0, err } } } return w.rec.Read(p) } ================================================ FILE: modules/caddyhttp/reverseproxy/fastcgi/record.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fastcgi import ( "encoding/binary" "errors" "io" ) type record struct { h header lr io.LimitedReader padding int64 } func (rec *record) fill(r io.Reader) (err error) { rec.lr.N = rec.padding rec.lr.R = r if _, err = io.Copy(io.Discard, rec); err != nil { return err } if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil { return err } if rec.h.Version != 1 { err = errors.New("fcgi: invalid header version") return err } if rec.h.Type == EndRequest { err = io.EOF return err } rec.lr.N = int64(rec.h.ContentLength) rec.padding = int64(rec.h.PaddingLength) return err } func (rec *record) Read(p []byte) (n int, err error) { return rec.lr.Read(p) } func (rec *record) hasMore() bool { return rec.lr.N > 0 } ================================================ FILE: modules/caddyhttp/reverseproxy/fastcgi/writer.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fastcgi import ( "bytes" "encoding/binary" ) // streamWriter abstracts out the separation of a stream into discrete records. // It only writes maxWrite bytes at a time. type streamWriter struct { c *client h header buf *bytes.Buffer recType uint8 } func (w *streamWriter) writeRecord(recType uint8, content []byte) (err error) { w.h.init(recType, w.c.reqID, len(content)) w.buf.Write(pad[:8]) w.writeHeader() w.buf.Write(content) w.buf.Write(pad[:w.h.PaddingLength]) _, err = w.buf.WriteTo(w.c.rwc) return err } func (w *streamWriter) writeBeginRequest(role uint16, flags uint8) error { b := [8]byte{byte(role >> 8), byte(role), flags} return w.writeRecord(BeginRequest, b[:]) } func (w *streamWriter) Write(p []byte) (int, error) { // init header if w.buf.Len() < 8 { w.buf.Write(pad[:8]) } nn := 0 for len(p) > 0 { n := len(p) nl := maxWrite + 8 - w.buf.Len() if n > nl { n = nl w.buf.Write(p[:n]) if err := w.Flush(); err != nil { return nn, err } // reset headers w.buf.Write(pad[:8]) } else { w.buf.Write(p[:n]) } nn += n p = p[n:] } return nn, nil } func (w *streamWriter) endStream() error { // send empty record to close the stream return w.writeRecord(w.recType, nil) } func (w *streamWriter) writePairs(pairs map[string]string) error { b := make([]byte, 8) nn := 0 // init headers w.buf.Write(b) for k, v := range pairs { m := 8 + len(k) + len(v) if m > maxWrite { // param data size exceed 65535 bytes" vl := maxWrite - 8 - len(k) v = v[:vl] } n := encodeSize(b, uint32(len(k))) n += encodeSize(b[n:], uint32(len(v))) m = n + len(k) + len(v) if (nn + m) > maxWrite { if err := w.Flush(); err != nil { return err } // reset headers w.buf.Write(b) nn = 0 } nn += m w.buf.Write(b[:n]) w.buf.WriteString(k) w.buf.WriteString(v) } return w.FlushStream() } func encodeSize(b []byte, size uint32) int { if size > 127 { size |= 1 << 31 binary.BigEndian.PutUint32(b, size) return 4 } b[0] = byte(size) //nolint:gosec // false positive; b is made 8 bytes long, then this function is always called with b being at least 4 or 1 byte long return 1 } // writeHeader populate header wire data in buf, it abuses buffer.Bytes() modification func (w *streamWriter) writeHeader() { h := w.buf.Bytes()[:8] h[0] = w.h.Version h[1] = w.h.Type binary.BigEndian.PutUint16(h[2:4], w.h.ID) binary.BigEndian.PutUint16(h[4:6], w.h.ContentLength) h[6] = w.h.PaddingLength h[7] = w.h.Reserved } // Flush write buffer data to the underlying connection, it assumes header data is the first 8 bytes of buf func (w *streamWriter) Flush() error { w.h.init(w.recType, w.c.reqID, w.buf.Len()-8) w.writeHeader() w.buf.Write(pad[:w.h.PaddingLength]) _, err := w.buf.WriteTo(w.c.rwc) return err } // FlushStream flush data then end current stream func (w *streamWriter) FlushStream() error { if err := w.Flush(); err != nil { return err } return w.endStream() } ================================================ FILE: modules/caddyhttp/reverseproxy/forwardauth/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package forwardauth import ( "encoding/json" "net/http" "sort" "strings" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers" "github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy" "github.com/caddyserver/caddy/v2/modules/caddyhttp/rewrite" ) func init() { httpcaddyfile.RegisterDirective("forward_auth", parseCaddyfile) } // parseCaddyfile parses the forward_auth directive, which has the same syntax // as the reverse_proxy directive (in fact, the reverse_proxy's directive // Unmarshaler is invoked by this function) but the resulting proxy is specially // configured for most™️ auth gateways that support forward auth. The typical // config which looks something like this: // // forward_auth auth-gateway:9091 { // uri /authenticate?redirect=https://auth.example.com // copy_headers Remote-User Remote-Email // } // // is equivalent to a reverse_proxy directive like this: // // reverse_proxy auth-gateway:9091 { // method GET // rewrite /authenticate?redirect=https://auth.example.com // // header_up X-Forwarded-Method {method} // header_up X-Forwarded-Uri {uri} // // @good status 2xx // handle_response @good { // request_header { // Remote-User {http.reverse_proxy.header.Remote-User} // Remote-Email {http.reverse_proxy.header.Remote-Email} // } // } // } func parseCaddyfile(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) { if !h.Next() { return nil, h.ArgErr() } // if the user specified a matcher token, use that // matcher in a route that wraps both of our routes; // either way, strip the matcher token and pass // the remaining tokens to the unmarshaler so that // we can gain the rest of the reverse_proxy syntax userMatcherSet, err := h.ExtractMatcherSet() if err != nil { return nil, err } // make a new dispenser from the remaining tokens so that we // can reset the dispenser back to this point for the // reverse_proxy unmarshaler to read from it as well dispenser := h.NewFromNextSegment() // create the reverse proxy handler rpHandler := &reverseproxy.Handler{ // set up defaults for header_up; reverse_proxy already deals with // adding the other three X-Forwarded-* headers, but for this flow, // we want to also send along the incoming method and URI since this // request will have a rewritten URI and method. Headers: &headers.Handler{ Request: &headers.HeaderOps{ Set: http.Header{ "X-Forwarded-Method": []string{"{http.request.method}"}, "X-Forwarded-Uri": []string{"{http.request.uri}"}, }, }, }, // we always rewrite the method to GET, which implicitly // turns off sending the incoming request's body, which // allows later middleware handlers to consume it Rewrite: &rewrite.Rewrite{ Method: "GET", }, HandleResponse: []caddyhttp.ResponseHandler{}, } // collect the headers to copy from the auth response // onto the original request, so they can get passed // through to a backend app headersToCopy := make(map[string]string) // read the subdirectives for configuring the forward_auth shortcut // NOTE: we delete the tokens as we go so that the reverse_proxy // unmarshal doesn't see these subdirectives which it cannot handle for dispenser.Next() { for dispenser.NextBlock(0) { // ignore any sub-subdirectives that might // have the same name somewhere within // the reverse_proxy passthrough tokens if dispenser.Nesting() != 1 { continue } // parse the forward_auth subdirectives switch dispenser.Val() { case "uri": if !dispenser.NextArg() { return nil, dispenser.ArgErr() } rpHandler.Rewrite.URI = dispenser.Val() dispenser.DeleteN(2) case "copy_headers": args := dispenser.RemainingArgs() hadBlock := false for nesting := dispenser.Nesting(); dispenser.NextBlock(nesting); { hadBlock = true args = append(args, dispenser.Val()) } // directive name + args dispenser.DeleteN(len(args) + 1) if hadBlock { // opening & closing brace dispenser.DeleteN(2) } for _, headerField := range args { if strings.Contains(headerField, ">") { parts := strings.Split(headerField, ">") headersToCopy[parts[0]] = parts[1] } else { headersToCopy[headerField] = headerField } } if len(headersToCopy) == 0 { return nil, dispenser.ArgErr() } } } } // reset the dispenser after we're done so that the reverse_proxy // unmarshaler can read it from the start dispenser.Reset() // the auth target URI must not be empty if rpHandler.Rewrite.URI == "" { return nil, dispenser.Errf("the 'uri' subdirective is required") } // Set up handler for good responses; when a response has 2xx status, // then we will copy some headers from the response onto the original // request, and allow handling to continue down the middleware chain, // by _not_ executing a terminal handler. We must have at least one // route in the response handler, even if it's no-op, so that the // response handling logic in reverse_proxy doesn't skip this entry. goodResponseHandler := caddyhttp.ResponseHandler{ Match: &caddyhttp.ResponseMatcher{ StatusCode: []int{2}, }, Routes: []caddyhttp.Route{ { HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject( &caddyhttp.VarsMiddleware{}, "handler", "vars", nil, )}, }, }, } // Sort the headers so that the order in the JSON output is deterministic. sortedHeadersToCopy := make([]string, 0, len(headersToCopy)) for k := range headersToCopy { sortedHeadersToCopy = append(sortedHeadersToCopy, k) } sort.Strings(sortedHeadersToCopy) // Set up handlers to copy headers from the auth response onto the // original request. We use vars matchers to test that the placeholder // values aren't empty, because the header handler would not replace // placeholders which have no value. copyHeaderRoutes := []caddyhttp.Route{} for _, from := range sortedHeadersToCopy { to := http.CanonicalHeaderKey(headersToCopy[from]) placeholderName := "http.reverse_proxy.header." + http.CanonicalHeaderKey(from) // Always delete the client-supplied header before conditionally setting // it from the auth response. Without this, a client that pre-supplies a // header listed in copy_headers can inject arbitrary values when the auth // service does not return that header: the MatchNot guard below would // skip the Set entirely, leaving the original client-controlled value // intact and forwarding it to the backend. copyHeaderRoutes = append(copyHeaderRoutes, caddyhttp.Route{ HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject( &headers.Handler{ Request: &headers.HeaderOps{ Delete: []string{to}, }, }, "handler", "headers", nil, )}, }) handler := &headers.Handler{ Request: &headers.HeaderOps{ Set: http.Header{ to: []string{"{" + placeholderName + "}"}, }, }, } copyHeaderRoutes = append(copyHeaderRoutes, caddyhttp.Route{ MatcherSetsRaw: []caddy.ModuleMap{{ "not": h.JSON(caddyhttp.MatchNot{MatcherSetsRaw: []caddy.ModuleMap{{ "vars": h.JSON(caddyhttp.VarsMatcher{"{" + placeholderName + "}": []string{""}}), }}}), }}, HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject( handler, "handler", "headers", nil, )}, }) } goodResponseHandler.Routes = append(goodResponseHandler.Routes, copyHeaderRoutes...) // note that when a response has any other status than 2xx, then we // use the reverse proxy's default behaviour of copying the response // back to the client, so we don't need to explicitly add a response // handler specifically for that behaviour; we do need the 2xx handler // though, to make handling fall through to handlers deeper in the chain. rpHandler.HandleResponse = append(rpHandler.HandleResponse, goodResponseHandler) // the rest of the config is specified by the user // using the reverse_proxy directive syntax dispenser.Next() // consume the directive name err = rpHandler.UnmarshalCaddyfile(dispenser) if err != nil { return nil, err } err = rpHandler.FinalizeUnmarshalCaddyfile(h) if err != nil { return nil, err } // create the final reverse proxy route rpRoute := caddyhttp.Route{ HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject( rpHandler, "handler", "reverse_proxy", nil, )}, } // apply the user's matcher if any if userMatcherSet != nil { rpRoute.MatcherSetsRaw = []caddy.ModuleMap{userMatcherSet} } return []httpcaddyfile.ConfigValue{ { Class: "route", Value: rpRoute, }, }, nil } ================================================ FILE: modules/caddyhttp/reverseproxy/headers_test.go ================================================ package reverseproxy import ( "context" "net/http/httptest" "testing" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func TestAddForwardedHeadersNonIP(t *testing.T) { h := Handler{} // Simulate a request with a non-IP remote address (e.g. SCION, abstract socket, or hostname) req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "my-weird-network:12345" // Mock the context variables required by Caddy. // We need to inject the variable map manually since we aren't running the full server. vars := map[string]interface{}{ caddyhttp.TrustedProxyVarKey: false, } ctx := context.WithValue(req.Context(), caddyhttp.VarsCtxKey, vars) req = req.WithContext(ctx) // Execute the unexported function err := h.addForwardedHeaders(req) // Expectation: No error should be returned for non-IP addresses. // The function should simply skip the trusted proxy check. if err != nil { t.Errorf("expected no error for non-IP address, got: %v", err) } } func TestAddForwardedHeaders_UnixSocketTrusted(t *testing.T) { h := Handler{} req := httptest.NewRequest("GET", "http://example.com/", nil) req.RemoteAddr = "@" req.Header.Set("X-Forwarded-For", "1.2.3.4, 10.0.0.1") req.Header.Set("X-Forwarded-Proto", "https") req.Header.Set("X-Forwarded-Host", "original.example.com") vars := map[string]interface{}{ caddyhttp.TrustedProxyVarKey: true, caddyhttp.ClientIPVarKey: "1.2.3.4", } ctx := context.WithValue(req.Context(), caddyhttp.VarsCtxKey, vars) req = req.WithContext(ctx) err := h.addForwardedHeaders(req) if err != nil { t.Fatalf("expected no error, got: %v", err) } if got := req.Header.Get("X-Forwarded-For"); got != "1.2.3.4, 10.0.0.1" { t.Errorf("X-Forwarded-For = %q, want %q", got, "1.2.3.4, 10.0.0.1") } if got := req.Header.Get("X-Forwarded-Proto"); got != "https" { t.Errorf("X-Forwarded-Proto = %q, want %q", got, "https") } if got := req.Header.Get("X-Forwarded-Host"); got != "original.example.com" { t.Errorf("X-Forwarded-Host = %q, want %q", got, "original.example.com") } } func TestAddForwardedHeaders_UnixSocketUntrusted(t *testing.T) { h := Handler{} req := httptest.NewRequest("GET", "http://example.com/", nil) req.RemoteAddr = "@" req.Header.Set("X-Forwarded-For", "1.2.3.4") req.Header.Set("X-Forwarded-Proto", "https") req.Header.Set("X-Forwarded-Host", "spoofed.example.com") vars := map[string]interface{}{ caddyhttp.TrustedProxyVarKey: false, caddyhttp.ClientIPVarKey: "", } ctx := context.WithValue(req.Context(), caddyhttp.VarsCtxKey, vars) req = req.WithContext(ctx) err := h.addForwardedHeaders(req) if err != nil { t.Fatalf("expected no error, got: %v", err) } if got := req.Header.Get("X-Forwarded-For"); got != "" { t.Errorf("X-Forwarded-For should be deleted, got %q", got) } if got := req.Header.Get("X-Forwarded-Proto"); got != "" { t.Errorf("X-Forwarded-Proto should be deleted, got %q", got) } if got := req.Header.Get("X-Forwarded-Host"); got != "" { t.Errorf("X-Forwarded-Host should be deleted, got %q", got) } } func TestAddForwardedHeaders_UnixSocketTrustedNoExistingHeaders(t *testing.T) { h := Handler{} req := httptest.NewRequest("GET", "http://example.com/", nil) req.RemoteAddr = "@" vars := map[string]interface{}{ caddyhttp.TrustedProxyVarKey: true, caddyhttp.ClientIPVarKey: "5.6.7.8", } ctx := context.WithValue(req.Context(), caddyhttp.VarsCtxKey, vars) req = req.WithContext(ctx) err := h.addForwardedHeaders(req) if err != nil { t.Fatalf("expected no error, got: %v", err) } if got := req.Header.Get("X-Forwarded-For"); got != "" { t.Errorf("X-Forwarded-For should be empty when no prior XFF exists, got %q", got) } if got := req.Header.Get("X-Forwarded-Proto"); got != "http" { t.Errorf("X-Forwarded-Proto = %q, want %q", got, "http") } if got := req.Header.Get("X-Forwarded-Host"); got != "example.com" { t.Errorf("X-Forwarded-Host = %q, want %q", got, "example.com") } } ================================================ FILE: modules/caddyhttp/reverseproxy/healthchecks.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "context" "fmt" "io" "net" "net/http" "net/url" "regexp" "runtime/debug" "strconv" "strings" "time" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) // HealthChecks configures active and passive health checks. type HealthChecks struct { // Active health checks run in the background on a timer. To // minimally enable active health checks, set either path or // port (or both). Note that active health check status // (healthy/unhealthy) is stored per-proxy-handler, not // globally; this allows different handlers to use different // criteria to decide what defines a healthy backend. // // Active health checks do not run for dynamic upstreams. Active *ActiveHealthChecks `json:"active,omitempty"` // Passive health checks monitor proxied requests for errors or timeouts. // To minimally enable passive health checks, specify at least an empty // config object with fail_duration > 0. Passive health check state is // shared (stored globally), so a failure from one handler will be counted // by all handlers; but the tolerances or standards for what defines // healthy/unhealthy backends is configured per-proxy-handler. // // Passive health checks technically do operate on dynamic upstreams, // but are only effective for very busy proxies where the list of // upstreams is mostly stable. This is because the shared/global // state of upstreams is cleaned up when the upstreams are no longer // used. Since dynamic upstreams are allocated dynamically at each // request (specifically, each iteration of the proxy loop per request), // they are also cleaned up after every request. Thus, if there is a // moment when no requests are actively referring to a particular // upstream host, the passive health check state will be reset because // it will be garbage-collected. It is usually better for the dynamic // upstream module to only return healthy, available backends instead. Passive *PassiveHealthChecks `json:"passive,omitempty"` } // ActiveHealthChecks holds configuration related to active // health checks (that is, health checks which occur in a // background goroutine independently). type ActiveHealthChecks struct { // Deprecated: Use 'uri' instead. This field will be removed. TODO: remove this field Path string `json:"path,omitempty"` // The URI (path and query) to use for health checks URI string `json:"uri,omitempty"` // The host:port to use (if different from the upstream's dial address) // for health checks. This should be used in tandem with `health_header` and // `{http.reverse_proxy.active.target_upstream}`. This can be helpful when // creating an intermediate service to do a more thorough health check. // If upstream is set, the active health check port is ignored. Upstream string `json:"upstream,omitempty"` // The port to use (if different from the upstream's dial // address) for health checks. If active upstream is set, // this value is ignored. Port int `json:"port,omitempty"` // HTTP headers to set on health check requests. Headers http.Header `json:"headers,omitempty"` // The HTTP method to use for health checks (default "GET"). Method string `json:"method,omitempty"` // The body to send with the health check request. Body string `json:"body,omitempty"` // Whether to follow HTTP redirects in response to active health checks (default off). FollowRedirects bool `json:"follow_redirects,omitempty"` // How frequently to perform active health checks (default 30s). Interval caddy.Duration `json:"interval,omitempty"` // How long to wait for a response from a backend before // considering it unhealthy (default 5s). Timeout caddy.Duration `json:"timeout,omitempty"` // Number of consecutive health check passes before marking // a previously unhealthy backend as healthy again (default 1). Passes int `json:"passes,omitempty"` // Number of consecutive health check failures before marking // a previously healthy backend as unhealthy (default 1). Fails int `json:"fails,omitempty"` // The maximum response body to download from the backend // during a health check. MaxSize int64 `json:"max_size,omitempty"` // The HTTP status code to expect from a healthy backend. ExpectStatus int `json:"expect_status,omitempty"` // A regular expression against which to match the response // body of a healthy backend. ExpectBody string `json:"expect_body,omitempty"` uri *url.URL httpClient *http.Client bodyRegexp *regexp.Regexp logger *zap.Logger } // Provision ensures that a is set up properly before use. func (a *ActiveHealthChecks) Provision(ctx caddy.Context, h *Handler) error { if !a.IsEnabled() { return nil } // Canonicalize the header keys ahead of time, since // JSON unmarshaled headers may be incorrect cleaned := http.Header{} for key, hdrs := range a.Headers { for _, val := range hdrs { cleaned.Add(key, val) } } a.Headers = cleaned // If Method is not set, default to GET if a.Method == "" { a.Method = http.MethodGet } h.HealthChecks.Active.logger = h.logger.Named("health_checker.active") timeout := time.Duration(a.Timeout) if timeout == 0 { timeout = 5 * time.Second } if a.Path != "" { a.logger.Warn("the 'path' option is deprecated, please use 'uri' instead!") } // parse the URI string (supports path and query) if a.URI != "" { parsedURI, err := url.Parse(a.URI) if err != nil { return err } a.uri = parsedURI } a.httpClient = &http.Client{ Timeout: timeout, Transport: h.Transport, CheckRedirect: func(req *http.Request, via []*http.Request) error { if !a.FollowRedirects { return http.ErrUseLastResponse } return nil }, } for _, upstream := range h.Upstreams { // if there's an alternative upstream for health-check provided in the config, // then use it, otherwise use the upstream's dial address. if upstream is used, // then the port is ignored. if a.Upstream != "" { upstream.activeHealthCheckUpstream = a.Upstream } else if a.Port != 0 { // if there's an alternative port for health-check provided in the config, // then use it, otherwise use the port of upstream. upstream.activeHealthCheckPort = a.Port } } if a.Interval == 0 { a.Interval = caddy.Duration(30 * time.Second) } if a.ExpectBody != "" { var err error a.bodyRegexp, err = regexp.Compile(a.ExpectBody) if err != nil { return fmt.Errorf("expect_body: compiling regular expression: %v", err) } } if a.Passes < 1 { a.Passes = 1 } if a.Fails < 1 { a.Fails = 1 } return nil } // IsEnabled checks if the active health checks have // the minimum config necessary to be enabled. func (a *ActiveHealthChecks) IsEnabled() bool { return a.Path != "" || a.URI != "" || a.Port != 0 } // PassiveHealthChecks holds configuration related to passive // health checks (that is, health checks which occur during // the normal flow of request proxying). type PassiveHealthChecks struct { // How long to remember a failed request to a backend. A duration > 0 // enables passive health checking. Default is 0. FailDuration caddy.Duration `json:"fail_duration,omitempty"` // The number of failed requests within the FailDuration window to // consider a backend as "down". Must be >= 1; default is 1. Requires // that FailDuration be > 0. MaxFails int `json:"max_fails,omitempty"` // Limits the number of simultaneous requests to a backend by // marking the backend as "down" if it has this many concurrent // requests or more. UnhealthyRequestCount int `json:"unhealthy_request_count,omitempty"` // Count the request as failed if the response comes back with // one of these status codes. UnhealthyStatus []int `json:"unhealthy_status,omitempty"` // Count the request as failed if the response takes at least this // long to receive. UnhealthyLatency caddy.Duration `json:"unhealthy_latency,omitempty"` logger *zap.Logger } // CircuitBreaker is a type that can act as an early-warning // system for the health checker when backends are getting // overloaded. This interface is still experimental and is // subject to change. type CircuitBreaker interface { OK() bool RecordMetric(statusCode int, latency time.Duration) } // activeHealthChecker runs active health checks on a // regular basis and blocks until // h.HealthChecks.Active.stopChan is closed. func (h *Handler) activeHealthChecker() { defer func() { if err := recover(); err != nil { if c := h.HealthChecks.Active.logger.Check(zapcore.ErrorLevel, "active health checker panicked"); c != nil { c.Write( zap.Any("error", err), zap.ByteString("stack", debug.Stack()), ) } } }() ticker := time.NewTicker(time.Duration(h.HealthChecks.Active.Interval)) h.doActiveHealthCheckForAllHosts() for { select { case <-ticker.C: h.doActiveHealthCheckForAllHosts() case <-h.ctx.Done(): ticker.Stop() return } } } // doActiveHealthCheckForAllHosts immediately performs a // health checks for all upstream hosts configured by h. func (h *Handler) doActiveHealthCheckForAllHosts() { for _, upstream := range h.Upstreams { go func(upstream *Upstream) { defer func() { if err := recover(); err != nil { if c := h.HealthChecks.Active.logger.Check(zapcore.ErrorLevel, "active health checker panicked"); c != nil { c.Write( zap.Any("error", err), zap.ByteString("stack", debug.Stack()), ) } } }() repl := caddy.NewReplacer() networkAddr, err := repl.ReplaceOrErr(upstream.Dial, true, true) if err != nil { if c := h.HealthChecks.Active.logger.Check(zapcore.ErrorLevel, "invalid use of placeholders in dial address for active health checks"); c != nil { c.Write( zap.String("address", networkAddr), zap.Error(err), ) } return } addr, err := caddy.ParseNetworkAddress(networkAddr) if err != nil { if c := h.HealthChecks.Active.logger.Check(zapcore.ErrorLevel, "bad network address"); c != nil { c.Write( zap.String("address", networkAddr), zap.Error(err), ) } return } if hcp := uint(upstream.activeHealthCheckPort); hcp != 0 { if addr.IsUnixNetwork() || addr.IsFdNetwork() { addr.Network = "tcp" // I guess we just assume TCP since we are using a port?? } addr.StartPort, addr.EndPort = hcp, hcp } if addr.PortRangeSize() != 1 { if c := h.HealthChecks.Active.logger.Check(zapcore.ErrorLevel, "multiple addresses (upstream must map to only one address)"); c != nil { c.Write( zap.String("address", networkAddr), ) } return } hostAddr := addr.JoinHostPort(0) if addr.IsUnixNetwork() || addr.IsFdNetwork() { // this will be used as the Host portion of a http.Request URL, and // paths to socket files would produce an error when creating URL, // so use a fake Host value instead; unix sockets are usually local hostAddr = "localhost" } // Fill in the dial info for the upstream // If the upstream is set, use that instead dialInfoUpstream := upstream if h.HealthChecks.Active.Upstream != "" { dialInfoUpstream = &Upstream{ Dial: h.HealthChecks.Active.Upstream, } } else if upstream.activeHealthCheckPort != 0 { // health_port overrides the port; addr has already been updated // with the health port, so use its address for dialing dialInfoUpstream = &Upstream{ Dial: addr.JoinHostPort(0), } } dialInfo, _ := dialInfoUpstream.fillDialInfo(repl) err = h.doActiveHealthCheck(dialInfo, hostAddr, networkAddr, upstream) if err != nil { if c := h.HealthChecks.Active.logger.Check(zapcore.ErrorLevel, "active health check failed"); c != nil { c.Write( zap.String("address", hostAddr), zap.Error(err), ) } } }(upstream) } } // doActiveHealthCheck performs a health check to upstream which // can be reached at address hostAddr. The actual address for // the request will be built according to active health checker // config. The health status of the host will be updated // according to whether it passes the health check. An error is // returned only if the health check fails to occur or if marking // the host's health status fails. func (h *Handler) doActiveHealthCheck(dialInfo DialInfo, hostAddr string, networkAddr string, upstream *Upstream) error { // create the URL for the request that acts as a health check u := &url.URL{ Scheme: "http", Host: hostAddr, } // split the host and port if possible, override the port if configured host, port, err := net.SplitHostPort(hostAddr) if err != nil { host = hostAddr } // ignore active health check port if active upstream is provided as the // active upstream already contains the replacement port if h.HealthChecks.Active.Upstream != "" { u.Host = h.HealthChecks.Active.Upstream } else if h.HealthChecks.Active.Port != 0 { port := strconv.Itoa(h.HealthChecks.Active.Port) u.Host = net.JoinHostPort(host, port) } // override health check schemes if applicable if hcsot, ok := h.Transport.(HealthCheckSchemeOverriderTransport); ok { hcsot.OverrideHealthCheckScheme(u, port) } // if we have a provisioned uri, use that, otherwise use // the deprecated Path option if h.HealthChecks.Active.uri != nil { u.Path = h.HealthChecks.Active.uri.Path u.RawQuery = h.HealthChecks.Active.uri.RawQuery } else { u.Path = h.HealthChecks.Active.Path } // replacer used for both body and headers. Only globals (env vars, system info, etc.) are available repl := caddy.NewReplacer() // if body is provided, create a reader for it, otherwise nil var requestBody io.Reader if h.HealthChecks.Active.Body != "" { // set body, using replacer requestBody = strings.NewReader(repl.ReplaceAll(h.HealthChecks.Active.Body, "")) } // attach dialing information to this request, as well as context values that // may be expected by handlers of this request ctx := h.ctx.Context ctx = context.WithValue(ctx, caddy.ReplacerCtxKey, caddy.NewReplacer()) ctx = context.WithValue(ctx, caddyhttp.VarsCtxKey, map[string]any{ dialInfoVarKey: dialInfo, }) req, err := http.NewRequestWithContext(ctx, h.HealthChecks.Active.Method, u.String(), requestBody) if err != nil { return fmt.Errorf("making request: %v", err) } ctx = context.WithValue(ctx, caddyhttp.OriginalRequestCtxKey, *req) req = req.WithContext(ctx) // set headers, using replacer repl.Set("http.reverse_proxy.active.target_upstream", networkAddr) for key, vals := range h.HealthChecks.Active.Headers { key = repl.ReplaceAll(key, "") if key == "Host" { req.Host = repl.ReplaceAll(h.HealthChecks.Active.Headers.Get(key), "") continue } for _, val := range vals { req.Header.Add(key, repl.ReplaceKnown(val, "")) } } markUnhealthy := func() { // increment failures and then check if it has reached the threshold to mark unhealthy err := upstream.Host.countHealthFail(1) if err != nil { if c := h.HealthChecks.Active.logger.Check(zapcore.ErrorLevel, "could not count active health failure"); c != nil { c.Write( zap.String("host", upstream.Dial), zap.Error(err), ) } return } if upstream.Host.activeHealthFails() >= h.HealthChecks.Active.Fails { // dispatch an event that the host newly became unhealthy if upstream.setHealthy(false) { h.events.Emit(h.ctx, "unhealthy", map[string]any{"host": hostAddr}) upstream.Host.resetHealth() } } } markHealthy := func() { // increment passes and then check if it has reached the threshold to be healthy err := upstream.countHealthPass(1) if err != nil { if c := h.HealthChecks.Active.logger.Check(zapcore.ErrorLevel, "could not count active health pass"); c != nil { c.Write( zap.String("host", upstream.Dial), zap.Error(err), ) } return } if upstream.Host.activeHealthPasses() >= h.HealthChecks.Active.Passes { if upstream.setHealthy(true) { if c := h.HealthChecks.Active.logger.Check(zapcore.InfoLevel, "host is up"); c != nil { c.Write(zap.String("host", hostAddr)) } h.events.Emit(h.ctx, "healthy", map[string]any{"host": hostAddr}) upstream.Host.resetHealth() } } } // do the request, being careful to tame the response body resp, err := h.HealthChecks.Active.httpClient.Do(req) //nolint:gosec // no SSRF if err != nil { if c := h.HealthChecks.Active.logger.Check(zapcore.InfoLevel, "HTTP request failed"); c != nil { c.Write( zap.String("host", hostAddr), zap.Error(err), ) } markUnhealthy() return nil } var body io.Reader = resp.Body if h.HealthChecks.Active.MaxSize > 0 { body = io.LimitReader(body, h.HealthChecks.Active.MaxSize) } defer func() { // drain any remaining body so connection could be re-used _, _ = io.Copy(io.Discard, body) resp.Body.Close() }() // if status code is outside criteria, mark down if h.HealthChecks.Active.ExpectStatus > 0 { if !caddyhttp.StatusCodeMatches(resp.StatusCode, h.HealthChecks.Active.ExpectStatus) { if c := h.HealthChecks.Active.logger.Check(zapcore.InfoLevel, "unexpected status code"); c != nil { c.Write( zap.Int("status_code", resp.StatusCode), zap.String("host", hostAddr), ) } markUnhealthy() return nil } } else if resp.StatusCode < 200 || resp.StatusCode >= 300 { if c := h.HealthChecks.Active.logger.Check(zapcore.InfoLevel, "status code out of tolerances"); c != nil { c.Write( zap.Int("status_code", resp.StatusCode), zap.String("host", hostAddr), ) } markUnhealthy() return nil } // if body does not match regex, mark down if h.HealthChecks.Active.bodyRegexp != nil { bodyBytes, err := io.ReadAll(body) if err != nil { if c := h.HealthChecks.Active.logger.Check(zapcore.InfoLevel, "failed to read response body"); c != nil { c.Write( zap.String("host", hostAddr), zap.Error(err), ) } markUnhealthy() return nil } if !h.HealthChecks.Active.bodyRegexp.Match(bodyBytes) { if c := h.HealthChecks.Active.logger.Check(zapcore.InfoLevel, "response body failed expectations"); c != nil { c.Write( zap.String("host", hostAddr), ) } markUnhealthy() return nil } } // passed health check parameters, so mark as healthy markHealthy() return nil } // countFailure is used with passive health checks. It // remembers 1 failure for upstream for the configured // duration. If passive health checks are disabled or // failure expiry is 0, this is a no-op. func (h *Handler) countFailure(upstream *Upstream) { // only count failures if passive health checking is enabled // and if failures are configured have a non-zero expiry if h.HealthChecks == nil || h.HealthChecks.Passive == nil { return } failDuration := time.Duration(h.HealthChecks.Passive.FailDuration) if failDuration == 0 { return } // count failure immediately err := upstream.Host.countFail(1) if err != nil { if c := h.HealthChecks.Active.logger.Check(zapcore.ErrorLevel, "could not count failure"); c != nil { c.Write( zap.String("host", upstream.Dial), zap.Error(err), ) } return } // forget it later go func(host *Host, failDuration time.Duration) { defer func() { if err := recover(); err != nil { if c := h.HealthChecks.Active.logger.Check(zapcore.ErrorLevel, "passive health check failure forgetter panicked"); c != nil { c.Write( zap.Any("error", err), zap.ByteString("stack", debug.Stack()), ) } } }() timer := time.NewTimer(failDuration) select { case <-h.ctx.Done(): if !timer.Stop() { <-timer.C } case <-timer.C: } err := host.countFail(-1) if err != nil { if c := h.HealthChecks.Active.logger.Check(zapcore.ErrorLevel, "could not forget failure"); c != nil { c.Write( zap.String("host", upstream.Dial), zap.Error(err), ) } } }(upstream.Host, failDuration) } ================================================ FILE: modules/caddyhttp/reverseproxy/hosts.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "context" "fmt" "net/netip" "strconv" "sync" "sync/atomic" "time" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) // UpstreamPool is a collection of upstreams. type UpstreamPool []*Upstream // Upstream bridges this proxy's configuration to the // state of the backend host it is correlated with. // Upstream values must not be copied. type Upstream struct { *Host `json:"-"` // The [network address](/docs/conventions#network-addresses) // to dial to connect to the upstream. Must represent precisely // one socket (i.e. no port ranges). A valid network address // either has a host and port or is a unix socket address. // // Placeholders may be used to make the upstream dynamic, but be // aware of the health check implications of this: a single // upstream that represents numerous (perhaps arbitrary) backends // can be considered down if one or enough of the arbitrary // backends is down. Also be aware of open proxy vulnerabilities. Dial string `json:"dial,omitempty"` // The maximum number of simultaneous requests to allow to // this upstream. If set, overrides the global passive health // check UnhealthyRequestCount value. MaxRequests int `json:"max_requests,omitempty"` // TODO: This could be really useful, to bind requests // with certain properties to specific backends // HeaderAffinity string // IPAffinity string activeHealthCheckPort int activeHealthCheckUpstream string healthCheckPolicy *PassiveHealthChecks cb CircuitBreaker unhealthy int32 // accessed atomically; status from active health checker } // (pointer receiver necessary to avoid a race condition, since // copying the Upstream reads the 'unhealthy' field which is // accessed atomically) func (u *Upstream) String() string { return u.Dial } // Available returns true if the remote host // is available to receive requests. This is // the method that should be used by selection // policies, etc. to determine if a backend // should be able to be sent a request. func (u *Upstream) Available() bool { return u.Healthy() && !u.Full() } // Healthy returns true if the remote host // is currently known to be healthy or "up". // It consults the circuit breaker, if any. func (u *Upstream) Healthy() bool { healthy := u.healthy() if healthy && u.healthCheckPolicy != nil { healthy = u.Host.Fails() < u.healthCheckPolicy.MaxFails } if healthy && u.cb != nil { healthy = u.cb.OK() } return healthy } // Full returns true if the remote host // cannot receive more requests at this time. func (u *Upstream) Full() bool { return u.MaxRequests > 0 && u.Host.NumRequests() >= u.MaxRequests } // fillDialInfo returns a filled DialInfo for upstream u, using the request // context. Note that the returned value is not a pointer. func (u *Upstream) fillDialInfo(repl *caddy.Replacer) (DialInfo, error) { var addr caddy.NetworkAddress // use provided dial address var err error dial := repl.ReplaceAll(u.Dial, "") addr, err = caddy.ParseNetworkAddress(dial) if err != nil { return DialInfo{}, fmt.Errorf("upstream %s: invalid dial address %s: %v", u.Dial, dial, err) } if numPorts := addr.PortRangeSize(); numPorts != 1 { return DialInfo{}, fmt.Errorf("upstream %s: dial address must represent precisely one socket: %s represents %d", u.Dial, dial, numPorts) } return DialInfo{ Upstream: u, Network: addr.Network, Address: addr.JoinHostPort(0), Host: addr.Host, Port: strconv.Itoa(int(addr.StartPort)), }, nil } func (u *Upstream) fillHost() { host := new(Host) existingHost, loaded := hosts.LoadOrStore(u.String(), host) if loaded { host = existingHost.(*Host) } u.Host = host } // fillDynamicHost is like fillHost, but stores the host in the separate // dynamicHosts map rather than the reference-counted UsagePool. Dynamic // hosts are not reference-counted; instead, they are retained as long as // they are actively seen and are evicted by a background cleanup goroutine // after dynamicHostIdleExpiry of inactivity. This preserves health state // (e.g. passive fail counts) across sequential requests. func (u *Upstream) fillDynamicHost() { dynamicHostsMu.Lock() entry, ok := dynamicHosts[u.String()] if ok { entry.lastSeen = time.Now() dynamicHosts[u.String()] = entry u.Host = entry.host } else { h := new(Host) dynamicHosts[u.String()] = dynamicHostEntry{host: h, lastSeen: time.Now()} u.Host = h } dynamicHostsMu.Unlock() // ensure the cleanup goroutine is running dynamicHostsCleanerOnce.Do(func() { go func() { for { time.Sleep(dynamicHostCleanupInterval) dynamicHostsMu.Lock() for addr, entry := range dynamicHosts { if time.Since(entry.lastSeen) > dynamicHostIdleExpiry { delete(dynamicHosts, addr) } } dynamicHostsMu.Unlock() } }() }) } // Host is the basic, in-memory representation of the state of a remote host. // Its fields are accessed atomically and Host values must not be copied. type Host struct { numRequests int64 // must be 64-bit aligned on 32-bit systems (see https://golang.org/pkg/sync/atomic/#pkg-note-BUG) fails int64 activePasses int64 activeFails int64 } // NumRequests returns the number of active requests to the upstream. func (h *Host) NumRequests() int { return int(atomic.LoadInt64(&h.numRequests)) } // Fails returns the number of recent failures with the upstream. func (h *Host) Fails() int { return int(atomic.LoadInt64(&h.fails)) } // activeHealthPasses returns the number of consecutive active health check passes with the upstream. func (h *Host) activeHealthPasses() int { return int(atomic.LoadInt64(&h.activePasses)) } // activeHealthFails returns the number of consecutive active health check failures with the upstream. func (h *Host) activeHealthFails() int { return int(atomic.LoadInt64(&h.activeFails)) } // countRequest mutates the active request count by // delta. It returns an error if the adjustment fails. func (h *Host) countRequest(delta int) error { result := atomic.AddInt64(&h.numRequests, int64(delta)) if result < 0 { return fmt.Errorf("count below 0: %d", result) } return nil } // countFail mutates the recent failures count by // delta. It returns an error if the adjustment fails. func (h *Host) countFail(delta int) error { result := atomic.AddInt64(&h.fails, int64(delta)) if result < 0 { return fmt.Errorf("count below 0: %d", result) } return nil } // countHealthPass mutates the recent passes count by // delta. It returns an error if the adjustment fails. func (h *Host) countHealthPass(delta int) error { result := atomic.AddInt64(&h.activePasses, int64(delta)) if result < 0 { return fmt.Errorf("count below 0: %d", result) } return nil } // countHealthFail mutates the recent failures count by // delta. It returns an error if the adjustment fails. func (h *Host) countHealthFail(delta int) error { result := atomic.AddInt64(&h.activeFails, int64(delta)) if result < 0 { return fmt.Errorf("count below 0: %d", result) } return nil } // resetHealth resets the health check counters. func (h *Host) resetHealth() { atomic.StoreInt64(&h.activePasses, 0) atomic.StoreInt64(&h.activeFails, 0) } // healthy returns true if the upstream is not actively marked as unhealthy. // (This returns the status only from the "active" health checks.) func (u *Upstream) healthy() bool { return atomic.LoadInt32(&u.unhealthy) == 0 } // SetHealthy sets the upstream has healthy or unhealthy // and returns true if the new value is different. This // sets the status only for the "active" health checks. func (u *Upstream) setHealthy(healthy bool) bool { var unhealthy, compare int32 = 1, 0 if healthy { unhealthy, compare = 0, 1 } return atomic.CompareAndSwapInt32(&u.unhealthy, compare, unhealthy) } // DialInfo contains information needed to dial a // connection to an upstream host. This information // may be different than that which is represented // in a URL (for example, unix sockets don't have // a host that can be represented in a URL, but // they certainly have a network name and address). type DialInfo struct { // Upstream is the Upstream associated with // this DialInfo. It may be nil. Upstream *Upstream // The network to use. This should be one of // the values that is accepted by net.Dial: // https://golang.org/pkg/net/#Dial Network string // The address to dial. Follows the same // semantics and rules as net.Dial. Address string // Host and Port are components of Address. Host, Port string } // String returns the Caddy network address form // by joining the network and address with a // forward slash. func (di DialInfo) String() string { return caddy.JoinNetworkAddress(di.Network, di.Host, di.Port) } // GetDialInfo gets the upstream dialing info out of the context, // and returns true if there was a valid value; false otherwise. func GetDialInfo(ctx context.Context) (DialInfo, bool) { dialInfo, ok := caddyhttp.GetVar(ctx, dialInfoVarKey).(DialInfo) return dialInfo, ok } // hosts is the global repository for hosts that are // currently in use by active configuration(s). This // allows the state of remote hosts to be preserved // through config reloads. var hosts = caddy.NewUsagePool() // dynamicHosts tracks hosts that were provisioned from dynamic upstream // sources. Unlike static upstreams which are reference-counted via the // UsagePool, dynamic upstream hosts are not reference-counted. Instead, // their last-seen time is updated on each request, and a background // goroutine evicts entries that have been idle for dynamicHostIdleExpiry. // This preserves health state (e.g. passive fail counts) across requests // to the same dynamic backend. var ( dynamicHosts = make(map[string]dynamicHostEntry) dynamicHostsMu sync.RWMutex dynamicHostsCleanerOnce sync.Once dynamicHostCleanupInterval = 5 * time.Minute dynamicHostIdleExpiry = time.Hour ) // dynamicHostEntry holds a Host and the last time it was seen // in a set of dynamic upstreams returned for a request. type dynamicHostEntry struct { host *Host lastSeen time.Time } // dialInfoVarKey is the key used for the variable that holds // the dial info for the upstream connection. const dialInfoVarKey = "reverse_proxy.dial_info" // proxyProtocolInfoVarKey is the key used for the variable that holds // the proxy protocol info for the upstream connection. const proxyProtocolInfoVarKey = "reverse_proxy.proxy_protocol_info" // ProxyProtocolInfo contains information needed to write proxy protocol to a // connection to an upstream host. type ProxyProtocolInfo struct { AddrPort netip.AddrPort } // tlsH1OnlyVarKey is the key used that indicates the connection will use h1 only for TLS. // https://github.com/caddyserver/caddy/issues/7292 const tlsH1OnlyVarKey = "reverse_proxy.tls_h1_only" // proxyVarKey is the key used that indicates the proxy server used for a request. const proxyVarKey = "reverse_proxy.proxy" ================================================ FILE: modules/caddyhttp/reverseproxy/httptransport.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "context" "crypto/tls" "crypto/x509" "encoding/base64" "encoding/json" "fmt" weakrand "math/rand/v2" "net" "net/http" "net/url" "os" "reflect" "slices" "strings" "time" "github.com/pires/go-proxyproto" "github.com/quic-go/quic-go/http3" "go.uber.org/zap" "go.uber.org/zap/zapcore" "golang.org/x/net/http2" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers" "github.com/caddyserver/caddy/v2/modules/caddytls" "github.com/caddyserver/caddy/v2/modules/internal/network" ) func init() { caddy.RegisterModule(HTTPTransport{}) } // HTTPTransport is essentially a configuration wrapper for http.Transport. // It defines a JSON structure useful when configuring the HTTP transport // for Caddy's reverse proxy. It builds its http.Transport at Provision. type HTTPTransport struct { // TODO: It's possible that other transports (like fastcgi) might be // able to borrow/use at least some of these config fields; if so, // maybe move them into a type called CommonTransport and embed it? // Configures the DNS resolver used to resolve the IP address of upstream hostnames. Resolver *UpstreamResolver `json:"resolver,omitempty"` // Configures TLS to the upstream. Setting this to an empty struct // is sufficient to enable TLS with reasonable defaults. TLS *TLSConfig `json:"tls,omitempty"` // Configures HTTP Keep-Alive (enabled by default). Should only be // necessary if rigorous testing has shown that tuning this helps // improve performance. KeepAlive *KeepAlive `json:"keep_alive,omitempty"` // Whether to enable compression to upstream. Default: true Compression *bool `json:"compression,omitempty"` // Maximum number of connections per host. Default: 0 (no limit) MaxConnsPerHost int `json:"max_conns_per_host,omitempty"` // If non-empty, which PROXY protocol version to send when // connecting to an upstream. Default: off. ProxyProtocol string `json:"proxy_protocol,omitempty"` // URL to the server that the HTTP transport will use to proxy // requests to the upstream. See http.Transport.Proxy for // information regarding supported protocols. This value takes // precedence over `HTTP_PROXY`, etc. // // Providing a value to this parameter results in // requests flowing through the reverse_proxy in the following // way: // // User Agent -> // reverse_proxy -> // forward_proxy_url -> upstream // // Default: http.ProxyFromEnvironment // DEPRECATED: Use NetworkProxyRaw|`network_proxy` instead. Subject to removal. ForwardProxyURL string `json:"forward_proxy_url,omitempty"` // How long to wait before timing out trying to connect to // an upstream. Default: `3s`. DialTimeout caddy.Duration `json:"dial_timeout,omitempty"` // How long to wait before spawning an RFC 6555 Fast Fallback // connection. A negative value disables this. Default: `300ms`. FallbackDelay caddy.Duration `json:"dial_fallback_delay,omitempty"` // How long to wait for reading response headers from server. Default: No timeout. ResponseHeaderTimeout caddy.Duration `json:"response_header_timeout,omitempty"` // The length of time to wait for a server's first response // headers after fully writing the request headers if the // request has a header "Expect: 100-continue". Default: No timeout. ExpectContinueTimeout caddy.Duration `json:"expect_continue_timeout,omitempty"` // The maximum bytes to read from response headers. Default: `10MiB`. MaxResponseHeaderSize int64 `json:"max_response_header_size,omitempty"` // The size of the write buffer in bytes. Default: `4KiB`. WriteBufferSize int `json:"write_buffer_size,omitempty"` // The size of the read buffer in bytes. Default: `4KiB`. ReadBufferSize int `json:"read_buffer_size,omitempty"` // The maximum time to wait for next read from backend. Default: no timeout. ReadTimeout caddy.Duration `json:"read_timeout,omitempty"` // The maximum time to wait for next write to backend. Default: no timeout. WriteTimeout caddy.Duration `json:"write_timeout,omitempty"` // The versions of HTTP to support. As a special case, "h2c" // can be specified to use H2C (HTTP/2 over Cleartext) to the // upstream (this feature is experimental and subject to // change or removal). Default: ["1.1", "2"] // // EXPERIMENTAL: "3" enables HTTP/3, but it must be the only // version specified if enabled. Additionally, HTTPS must be // enabled to the upstream as HTTP/3 requires TLS. Subject // to change or removal while experimental. Versions []string `json:"versions,omitempty"` // Specify the address to bind to when connecting to an upstream. In other words, // it is the address the upstream sees as the remote address. LocalAddress string `json:"local_address,omitempty"` // The pre-configured underlying HTTP transport. Transport *http.Transport `json:"-"` // The module that provides the network (forward) proxy // URL that the HTTP transport will use to proxy // requests to the upstream. See [http.Transport.Proxy](https://pkg.go.dev/net/http#Transport.Proxy) // for information regarding supported protocols. // // Providing a value to this parameter results in requests // flowing through the reverse_proxy in the following way: // // User Agent -> // reverse_proxy -> // [proxy provided by the module] -> upstream // // If nil, defaults to reading the `HTTP_PROXY`, // `HTTPS_PROXY`, and `NO_PROXY` environment variables. NetworkProxyRaw json.RawMessage `json:"network_proxy,omitempty" caddy:"namespace=caddy.network_proxy inline_key=from"` h3Transport *http3.Transport // TODO: EXPERIMENTAL (May 2024) } // CaddyModule returns the Caddy module information. func (HTTPTransport) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.transport.http", New: func() caddy.Module { return new(HTTPTransport) }, } } var ( allowedVersions = []string{"1.1", "2", "h2c", "3"} allowedVersionsString = strings.Join(allowedVersions, ", ") ) // Provision sets up h.Transport with a *http.Transport // that is ready to use. func (h *HTTPTransport) Provision(ctx caddy.Context) error { if len(h.Versions) == 0 { h.Versions = []string{"1.1", "2"} } // some users may provide http versions not recognized by caddy, instead of trying to // guess the version, we just error out and let the user fix their config // see: https://github.com/caddyserver/caddy/issues/7111 for _, v := range h.Versions { if !slices.Contains(allowedVersions, v) { return fmt.Errorf("unsupported HTTP version: %s, supported version: %s", v, allowedVersionsString) } } rt, err := h.NewTransport(ctx) if err != nil { return err } h.Transport = rt return nil } // NewTransport builds a standard-lib-compatible http.Transport value from h. func (h *HTTPTransport) NewTransport(caddyCtx caddy.Context) (*http.Transport, error) { // Set keep-alive defaults if it wasn't otherwise configured if h.KeepAlive == nil { h.KeepAlive = new(KeepAlive) } if h.KeepAlive.ProbeInterval == 0 { h.KeepAlive.ProbeInterval = caddy.Duration(30 * time.Second) } if h.KeepAlive.IdleConnTimeout == 0 { h.KeepAlive.IdleConnTimeout = caddy.Duration(2 * time.Minute) } if h.KeepAlive.MaxIdleConnsPerHost == 0 { h.KeepAlive.MaxIdleConnsPerHost = 32 // seems about optimal, see #2805 } // Set a relatively short default dial timeout. // This is helpful to make load-balancer retries more speedy. if h.DialTimeout == 0 { h.DialTimeout = caddy.Duration(3 * time.Second) } dialer := &net.Dialer{ Timeout: time.Duration(h.DialTimeout), FallbackDelay: time.Duration(h.FallbackDelay), } if h.LocalAddress != "" { netaddr, err := caddy.ParseNetworkAddressWithDefaults(h.LocalAddress, "tcp", 0) if err != nil { return nil, err } if netaddr.PortRangeSize() > 1 { return nil, fmt.Errorf("local_address must be a single address, not a port range") } switch netaddr.Network { case "tcp", "tcp4", "tcp6": dialer.LocalAddr, err = net.ResolveTCPAddr(netaddr.Network, netaddr.JoinHostPort(0)) if err != nil { return nil, err } case "unix", "unixgram", "unixpacket": dialer.LocalAddr, err = net.ResolveUnixAddr(netaddr.Network, netaddr.JoinHostPort(0)) if err != nil { return nil, err } case "udp", "udp4", "udp6": return nil, fmt.Errorf("local_address must be a TCP address, not a UDP address") default: return nil, fmt.Errorf("unsupported network") } } if h.Resolver != nil { err := h.Resolver.ParseAddresses() if err != nil { return nil, err } d := &net.Dialer{ Timeout: time.Duration(h.DialTimeout), FallbackDelay: time.Duration(h.FallbackDelay), } dialer.Resolver = &net.Resolver{ PreferGo: true, Dial: func(ctx context.Context, _, _ string) (net.Conn, error) { //nolint:gosec addr := h.Resolver.netAddrs[weakrand.IntN(len(h.Resolver.netAddrs))] return d.DialContext(ctx, addr.Network, addr.JoinHostPort(0)) }, } } dialContext := func(ctx context.Context, network, address string) (net.Conn, error) { // The network is usually tcp, and the address is the host in http.Request.URL.Host // and that's been overwritten in directRequest // However, if proxy is used according to http.ProxyFromEnvironment or proxy providers, // address will be the address of the proxy server. // This means we can safely use the address in dialInfo if proxy is not used (the address and network will be same any way) // or if the upstream is unix (because there is no way socks or http proxy can be used for unix address). if dialInfo, ok := GetDialInfo(ctx); ok { if caddyhttp.GetVar(ctx, proxyVarKey) == nil || strings.HasPrefix(dialInfo.Network, "unix") { network = dialInfo.Network address = dialInfo.Address } } conn, err := dialer.DialContext(ctx, network, address) if err != nil { // identify this error as one that occurred during // dialing, which can be important when trying to // decide whether to retry a request return nil, DialError{err} } if h.ProxyProtocol != "" { proxyProtocolInfo, ok := caddyhttp.GetVar(ctx, proxyProtocolInfoVarKey).(ProxyProtocolInfo) if !ok { return nil, fmt.Errorf("failed to get proxy protocol info from context") } var proxyv byte switch h.ProxyProtocol { case "v1": proxyv = 1 case "v2": proxyv = 2 default: return nil, fmt.Errorf("unexpected proxy protocol version") } // The src and dst have to be of the same address family. As we don't know the original // dst address (it's kind of impossible to know) and this address is generally of very // little interest, we just set it to all zeros. var destAddr net.Addr switch { case proxyProtocolInfo.AddrPort.Addr().Is4(): destAddr = &net.TCPAddr{ IP: net.IPv4zero, } case proxyProtocolInfo.AddrPort.Addr().Is6(): destAddr = &net.TCPAddr{ IP: net.IPv6zero, } default: return nil, fmt.Errorf("unexpected remote addr type in proxy protocol info") } sourceAddr := &net.TCPAddr{ IP: proxyProtocolInfo.AddrPort.Addr().AsSlice(), Port: int(proxyProtocolInfo.AddrPort.Port()), Zone: proxyProtocolInfo.AddrPort.Addr().Zone(), } header := proxyproto.HeaderProxyFromAddrs(proxyv, sourceAddr, destAddr) // retain the log message structure switch h.ProxyProtocol { case "v1": caddyCtx.Logger().Debug("sending proxy protocol header v1", zap.Any("header", header)) case "v2": caddyCtx.Logger().Debug("sending proxy protocol header v2", zap.Any("header", header)) } _, err = header.WriteTo(conn) if err != nil { // identify this error as one that occurred during // dialing, which can be important when trying to // decide whether to retry a request return nil, DialError{err} } } // if read/write timeouts are configured and this is a TCP connection, // enforce the timeouts by wrapping the connection with our own type if tcpConn, ok := conn.(*net.TCPConn); ok && (h.ReadTimeout > 0 || h.WriteTimeout > 0) { conn = &tcpRWTimeoutConn{ TCPConn: tcpConn, readTimeout: time.Duration(h.ReadTimeout), writeTimeout: time.Duration(h.WriteTimeout), logger: caddyCtx.Logger(), } } return conn, nil } // negotiate any HTTP/SOCKS proxy for the HTTP transport proxy := http.ProxyFromEnvironment if h.ForwardProxyURL != "" { caddyCtx.Logger().Warn("forward_proxy_url is deprecated; use network_proxy instead") u := network.ProxyFromURL{URL: h.ForwardProxyURL} h.NetworkProxyRaw = caddyconfig.JSONModuleObject(u, "from", "url", nil) } if len(h.NetworkProxyRaw) != 0 { proxyMod, err := caddyCtx.LoadModule(h, "NetworkProxyRaw") if err != nil { return nil, fmt.Errorf("failed to load network_proxy module: %v", err) } if m, ok := proxyMod.(caddy.ProxyFuncProducer); ok { proxy = m.ProxyFunc() } else { return nil, fmt.Errorf("network_proxy module is not `(func(*http.Request) (*url.URL, error))``") } } // we need to keep track if a proxy is used for a request proxyWrapper := func(req *http.Request) (*url.URL, error) { if proxy == nil { return nil, nil } u, err := proxy(req) if u == nil || err != nil { return u, err } // there must be a proxy for this request caddyhttp.SetVar(req.Context(), proxyVarKey, u) return u, nil } rt := &http.Transport{ Proxy: proxyWrapper, DialContext: dialContext, MaxConnsPerHost: h.MaxConnsPerHost, ResponseHeaderTimeout: time.Duration(h.ResponseHeaderTimeout), ExpectContinueTimeout: time.Duration(h.ExpectContinueTimeout), MaxResponseHeaderBytes: h.MaxResponseHeaderSize, WriteBufferSize: h.WriteBufferSize, ReadBufferSize: h.ReadBufferSize, } if h.TLS != nil { rt.TLSHandshakeTimeout = time.Duration(h.TLS.HandshakeTimeout) var err error rt.TLSClientConfig, err = h.TLS.MakeTLSClientConfig(caddyCtx) if err != nil { return nil, fmt.Errorf("making TLS client config: %v", err) } serverNameHasPlaceholder := strings.Contains(h.TLS.ServerName, "{") // We need to use custom DialTLSContext if: // 1. ServerName has a placeholder that needs to be replaced at request-time, OR // 2. ProxyProtocol is enabled, because req.URL.Host is modified to include // client address info with "->" separator which breaks Go's address parsing if serverNameHasPlaceholder || h.ProxyProtocol != "" { rt.DialTLSContext = func(ctx context.Context, network, addr string) (net.Conn, error) { // reuses the dialer from above to establish a plaintext connection conn, err := dialContext(ctx, network, addr) if err != nil { return nil, err } // but add our own handshake logic tlsConfig := rt.TLSClientConfig.Clone() if serverNameHasPlaceholder { repl := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer) tlsConfig.ServerName = repl.ReplaceAll(tlsConfig.ServerName, "") } // h1 only if caddyhttp.GetVar(ctx, tlsH1OnlyVarKey) == true { // stdlib does this // https://github.com/golang/go/blob/4837fbe4145cd47b43eed66fee9eed9c2b988316/src/net/http/transport.go#L1701 tlsConfig.NextProtos = nil } tlsConn := tls.Client(conn, tlsConfig) // complete the handshake before returning the connection if rt.TLSHandshakeTimeout != 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeoutCause(ctx, rt.TLSHandshakeTimeout, fmt.Errorf("HTTP transport TLS handshake %ds timeout", int(rt.TLSHandshakeTimeout.Seconds()))) defer cancel() } err = tlsConn.HandshakeContext(ctx) if err != nil { _ = tlsConn.Close() return nil, err } return tlsConn, nil } } } if h.KeepAlive != nil { // according to https://pkg.go.dev/net#Dialer.KeepAliveConfig, // KeepAlive is ignored if KeepAliveConfig.Enable is true. // If configured to 0, a system-dependent default is used. // To disable tcp keepalive, choose a negative value, // so KeepAliveConfig.Enable is false and KeepAlive is negative. // This is different from http keepalive where a tcp connection // can transfer multiple http requests/responses. dialer.KeepAlive = time.Duration(h.KeepAlive.ProbeInterval) dialer.KeepAliveConfig = net.KeepAliveConfig{ Enable: h.KeepAlive.ProbeInterval > 0, Interval: time.Duration(h.KeepAlive.ProbeInterval), } if h.KeepAlive.Enabled != nil { rt.DisableKeepAlives = !*h.KeepAlive.Enabled } rt.MaxIdleConns = h.KeepAlive.MaxIdleConns rt.MaxIdleConnsPerHost = h.KeepAlive.MaxIdleConnsPerHost rt.IdleConnTimeout = time.Duration(h.KeepAlive.IdleConnTimeout) } if h.Compression != nil { rt.DisableCompression = !*h.Compression } // configure HTTP/3 transport if enabled; however, this does not // automatically fall back to lower versions like most web browsers // do (that'd add latency and complexity, besides, we expect that // site owners control the backends), so it must be exclusive if len(h.Versions) == 1 && h.Versions[0] == "3" { h.h3Transport = new(http3.Transport) if h.TLS != nil { var err error h.h3Transport.TLSClientConfig, err = h.TLS.MakeTLSClientConfig(caddyCtx) if err != nil { return nil, fmt.Errorf("making TLS client config for HTTP/3 transport: %v", err) } } } else if len(h.Versions) > 1 && slices.Contains(h.Versions, "3") { return nil, fmt.Errorf("if HTTP/3 is enabled to the upstream, no other HTTP versions are supported") } // if h2/c is enabled, configure it explicitly if slices.Contains(h.Versions, "2") || slices.Contains(h.Versions, "h2c") { if err := http2.ConfigureTransport(rt); err != nil { return nil, err } // DisableCompression from h2 is configured by http2.ConfigureTransport // Likewise, DisableKeepAlives from h1 is used too. // Protocols field is only used when the request is not using TLS, // http1/2 over tls is still allowed if slices.Contains(h.Versions, "h2c") { rt.Protocols = new(http.Protocols) rt.Protocols.SetUnencryptedHTTP2(true) rt.Protocols.SetHTTP1(false) } } return rt, nil } // RequestHeaderOps implements TransportHeaderOpsProvider. It returns header // operations for requests when the transport's configuration indicates they // should be applied. In particular, when TLS is enabled for this transport, // return an operation to set the Host header to the upstream host:port // placeholder so HTTPS upstreams get the proper Host by default. // // Note: this is a provision-time hook; the Handler will call this during // its Provision and cache the resulting HeaderOps. The HeaderOps are // applied per-request (so placeholders are expanded at request time). func (h *HTTPTransport) RequestHeaderOps() *headers.HeaderOps { // If TLS is not configured for this transport, don't inject Host // defaults. TLS being non-nil indicates HTTPS to the upstream. if h.TLS == nil { return nil } return &headers.HeaderOps{ Set: http.Header{ "Host": []string{"{http.reverse_proxy.upstream.hostport}"}, }, } } // RoundTrip implements http.RoundTripper. func (h *HTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) { h.SetScheme(req) // use HTTP/3 if enabled (TODO: This is EXPERIMENTAL) if h.h3Transport != nil { return h.h3Transport.RoundTrip(req) } return h.Transport.RoundTrip(req) } // SetScheme ensures that the outbound request req // has the scheme set in its URL; the underlying // http.Transport requires a scheme to be set. // // This method may be used by other transport modules // that wrap/use this one. func (h *HTTPTransport) SetScheme(req *http.Request) { if req.URL.Scheme != "" { return } if h.shouldUseTLS(req) { req.URL.Scheme = "https" } else { req.URL.Scheme = "http" } } // shouldUseTLS returns true if TLS should be used for req. func (h *HTTPTransport) shouldUseTLS(req *http.Request) bool { if h.TLS == nil { return false } port := req.URL.Port() return !slices.Contains(h.TLS.ExceptPorts, port) } // TLSEnabled returns true if TLS is enabled. func (h HTTPTransport) TLSEnabled() bool { return h.TLS != nil } // EnableTLS enables TLS on the transport. func (h *HTTPTransport) EnableTLS(base *TLSConfig) error { h.TLS = base return nil } // EnableH2C enables H2C (HTTP/2 over Cleartext) on the transport. func (h *HTTPTransport) EnableH2C() error { h.Versions = []string{"h2c", "2"} return nil } // OverrideHealthCheckScheme overrides the scheme of the given URL // used for health checks. func (h HTTPTransport) OverrideHealthCheckScheme(base *url.URL, port string) { // if tls is enabled and the port isn't in the except list, use HTTPs if h.TLSEnabled() && !slices.Contains(h.TLS.ExceptPorts, port) { base.Scheme = "https" } } // ProxyProtocolEnabled returns true if proxy protocol is enabled. func (h HTTPTransport) ProxyProtocolEnabled() bool { return h.ProxyProtocol != "" } // Cleanup implements caddy.CleanerUpper and closes any idle connections. func (h HTTPTransport) Cleanup() error { if h.Transport == nil { return nil } h.Transport.CloseIdleConnections() return nil } // TLSConfig holds configuration related to the TLS configuration for the // transport/client. type TLSConfig struct { // Certificate authority module which provides the certificate pool of trusted certificates CARaw json.RawMessage `json:"ca,omitempty" caddy:"namespace=tls.ca_pool.source inline_key=provider"` // Deprecated: Use the `ca` field with the `tls.ca_pool.source.inline` module instead. // Optional list of base64-encoded DER-encoded CA certificates to trust. RootCAPool []string `json:"root_ca_pool,omitempty"` // Deprecated: Use the `ca` field with the `tls.ca_pool.source.file` module instead. // List of PEM-encoded CA certificate files to add to the same trust // store as RootCAPool (or root_ca_pool in the JSON). RootCAPEMFiles []string `json:"root_ca_pem_files,omitempty"` // PEM-encoded client certificate filename to present to servers. ClientCertificateFile string `json:"client_certificate_file,omitempty"` // PEM-encoded key to use with the client certificate. ClientCertificateKeyFile string `json:"client_certificate_key_file,omitempty"` // If specified, Caddy will use and automate a client certificate // with this subject name. ClientCertificateAutomate string `json:"client_certificate_automate,omitempty"` // If true, TLS verification of server certificates will be disabled. // This is insecure and may be removed in the future. Do not use this // option except in testing or local development environments. InsecureSkipVerify bool `json:"insecure_skip_verify,omitempty"` // The duration to allow a TLS handshake to a server. Default: No timeout. HandshakeTimeout caddy.Duration `json:"handshake_timeout,omitempty"` // The server name used when verifying the certificate received in the TLS // handshake. By default, this will use the upstream address' host part. // You only need to override this if your upstream address does not match the // certificate the upstream is likely to use. For example if the upstream // address is an IP address, then you would need to configure this to the // hostname being served by the upstream server. Currently, this does not // support placeholders because the TLS config is not provisioned on each // connection, so a static value must be used. ServerName string `json:"server_name,omitempty"` // TLS renegotiation level. TLS renegotiation is the act of performing // subsequent handshakes on a connection after the first. // The level can be: // - "never": (the default) disables renegotiation. // - "once": allows a remote server to request renegotiation once per connection. // - "freely": allows a remote server to repeatedly request renegotiation. Renegotiation string `json:"renegotiation,omitempty"` // Skip TLS ports specifies a list of upstream ports on which TLS should not be // attempted even if it is configured. Handy when using dynamic upstreams that // return HTTP and HTTPS endpoints too. // When specified, TLS will automatically be configured on the transport. // The value can be a list of any valid tcp port numbers, default empty. ExceptPorts []string `json:"except_ports,omitempty"` // The list of elliptic curves to support. Caddy's // defaults are modern and secure. Curves []string `json:"curves,omitempty"` } // MakeTLSClientConfig returns a tls.Config usable by a client to a backend. // If there is no custom TLS configuration, a nil config may be returned. func (t *TLSConfig) MakeTLSClientConfig(ctx caddy.Context) (*tls.Config, error) { cfg := new(tls.Config) // client auth if t.ClientCertificateFile != "" && t.ClientCertificateKeyFile == "" { return nil, fmt.Errorf("client_certificate_file specified without client_certificate_key_file") } if t.ClientCertificateFile == "" && t.ClientCertificateKeyFile != "" { return nil, fmt.Errorf("client_certificate_key_file specified without client_certificate_file") } if t.ClientCertificateFile != "" && t.ClientCertificateKeyFile != "" { cert, err := tls.LoadX509KeyPair(t.ClientCertificateFile, t.ClientCertificateKeyFile) if err != nil { return nil, fmt.Errorf("loading client certificate key pair: %v", err) } cfg.Certificates = []tls.Certificate{cert} } if t.ClientCertificateAutomate != "" { // TODO: use or enable ctx.IdentityCredentials() ... tlsAppIface, err := ctx.App("tls") if err != nil { return nil, fmt.Errorf("getting tls app: %v", err) } tlsApp := tlsAppIface.(*caddytls.TLS) err = tlsApp.Manage(map[string]struct{}{t.ClientCertificateAutomate: {}}) if err != nil { return nil, fmt.Errorf("managing client certificate: %v", err) } cfg.GetClientCertificate = func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) { certs := caddytls.AllMatchingCertificates(t.ClientCertificateAutomate) var err error for _, cert := range certs { certCertificate := cert.Certificate // avoid taking address of iteration variable (gosec warning) err = cri.SupportsCertificate(&certCertificate) if err == nil { return &cert.Certificate, nil } } if err == nil { err = fmt.Errorf("no client certificate found for automate name: %s", t.ClientCertificateAutomate) } return nil, err } } // trusted root CAs if len(t.RootCAPool) > 0 || len(t.RootCAPEMFiles) > 0 { ctx.Logger().Warn("root_ca_pool and root_ca_pem_files are deprecated. Use one of the tls.ca_pool.source modules instead") rootPool := x509.NewCertPool() for _, encodedCACert := range t.RootCAPool { caCert, err := decodeBase64DERCert(encodedCACert) if err != nil { return nil, fmt.Errorf("parsing CA certificate: %v", err) } rootPool.AddCert(caCert) } for _, pemFile := range t.RootCAPEMFiles { pemData, err := os.ReadFile(pemFile) if err != nil { return nil, fmt.Errorf("failed reading ca cert: %v", err) } rootPool.AppendCertsFromPEM(pemData) } cfg.RootCAs = rootPool } if t.CARaw != nil { if len(t.RootCAPool) > 0 || len(t.RootCAPEMFiles) > 0 { return nil, fmt.Errorf("conflicting config for Root CA pool") } caRaw, err := ctx.LoadModule(t, "CARaw") if err != nil { return nil, fmt.Errorf("failed to load ca module: %v", err) } ca, ok := caRaw.(caddytls.CA) if !ok { return nil, fmt.Errorf("CA module '%s' is not a certificate pool provider", ca) } cfg.RootCAs = ca.CertPool() } // Renegotiation switch t.Renegotiation { case "never", "": cfg.Renegotiation = tls.RenegotiateNever case "once": cfg.Renegotiation = tls.RenegotiateOnceAsClient case "freely": cfg.Renegotiation = tls.RenegotiateFreelyAsClient default: return nil, fmt.Errorf("invalid TLS renegotiation level: %v", t.Renegotiation) } // override for the server name used verify the TLS handshake cfg.ServerName = t.ServerName // throw all security out the window cfg.InsecureSkipVerify = t.InsecureSkipVerify curvesAdded := make(map[tls.CurveID]struct{}) for _, curveName := range t.Curves { curveID := caddytls.SupportedCurves[curveName] if _, ok := curvesAdded[curveID]; !ok { curvesAdded[curveID] = struct{}{} cfg.CurvePreferences = append(cfg.CurvePreferences, curveID) } } // only return a config if it's not empty if reflect.DeepEqual(cfg, new(tls.Config)) { return nil, nil } return cfg, nil } // KeepAlive holds configuration pertaining to HTTP Keep-Alive. type KeepAlive struct { // Whether HTTP Keep-Alive is enabled. Default: `true` Enabled *bool `json:"enabled,omitempty"` // How often to probe for liveness. Default: `30s`. ProbeInterval caddy.Duration `json:"probe_interval,omitempty"` // Maximum number of idle connections. Default: `0`, which means no limit. MaxIdleConns int `json:"max_idle_conns,omitempty"` // Maximum number of idle connections per host. Default: `32`. MaxIdleConnsPerHost int `json:"max_idle_conns_per_host,omitempty"` // How long connections should be kept alive when idle. Default: `2m`. IdleConnTimeout caddy.Duration `json:"idle_timeout,omitempty"` } // tcpRWTimeoutConn enforces read/write timeouts for a TCP connection. // If it fails to set deadlines, the error is logged but does not abort // the read/write attempt (ignoring the error is consistent with what // the standard library does: https://github.com/golang/go/blob/c5da4fb7ac5cb7434b41fc9a1df3bee66c7f1a4d/src/net/http/server.go#L981-L986) type tcpRWTimeoutConn struct { *net.TCPConn readTimeout, writeTimeout time.Duration logger *zap.Logger } func (c *tcpRWTimeoutConn) Read(b []byte) (int, error) { if c.readTimeout > 0 { err := c.TCPConn.SetReadDeadline(time.Now().Add(c.readTimeout)) if err != nil { if ce := c.logger.Check(zapcore.ErrorLevel, "failed to set read deadline"); ce != nil { ce.Write(zap.Error(err)) } } } return c.TCPConn.Read(b) } func (c *tcpRWTimeoutConn) Write(b []byte) (int, error) { if c.writeTimeout > 0 { err := c.TCPConn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) if err != nil { if ce := c.logger.Check(zapcore.ErrorLevel, "failed to set write deadline"); ce != nil { ce.Write(zap.Error(err)) } } } return c.TCPConn.Write(b) } // decodeBase64DERCert base64-decodes, then DER-decodes, certStr. func decodeBase64DERCert(certStr string) (*x509.Certificate, error) { // decode base64 derBytes, err := base64.StdEncoding.DecodeString(certStr) if err != nil { return nil, err } // parse the DER-encoded certificate return x509.ParseCertificate(derBytes) } // Interface guards var ( _ caddy.Provisioner = (*HTTPTransport)(nil) _ http.RoundTripper = (*HTTPTransport)(nil) _ caddy.CleanerUpper = (*HTTPTransport)(nil) _ TLSTransport = (*HTTPTransport)(nil) _ H2CTransport = (*HTTPTransport)(nil) _ HealthCheckSchemeOverriderTransport = (*HTTPTransport)(nil) _ ProxyProtocolTransport = (*HTTPTransport)(nil) ) ================================================ FILE: modules/caddyhttp/reverseproxy/httptransport_test.go ================================================ package reverseproxy import ( "context" "encoding/json" "fmt" "reflect" "testing" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func TestHTTPTransportUnmarshalCaddyFileWithCaPools(t *testing.T) { const test_der_1 = `MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==` type args struct { d *caddyfile.Dispenser } tests := []struct { name string args args expectedTLSConfig TLSConfig wantErr bool }{ { name: "tls_trust_pool without a module argument returns an error", args: args{ d: caddyfile.NewTestDispenser( `http { tls_trust_pool }`), }, wantErr: true, }, { name: "providing both 'tls_trust_pool' and 'tls_trusted_ca_certs' returns an error", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf( `http { tls_trust_pool inline %s tls_trusted_ca_certs %s }`, test_der_1, test_der_1)), }, wantErr: true, }, { name: "setting 'tls_trust_pool' and 'tls_trusted_ca_certs' produces an error", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf( `http { tls_trust_pool inline { trust_der %s } tls_trusted_ca_certs %s }`, test_der_1, test_der_1)), }, wantErr: true, }, { name: "using 'inline' tls_trust_pool loads the module successfully", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf( `http { tls_trust_pool inline { trust_der %s } } `, test_der_1)), }, expectedTLSConfig: TLSConfig{CARaw: json.RawMessage(fmt.Sprintf(`{"provider":"inline","trusted_ca_certs":["%s"]}`, test_der_1))}, }, { name: "setting 'tls_trusted_ca_certs' and 'tls_trust_pool' produces an error", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf( `http { tls_trusted_ca_certs %s tls_trust_pool inline { trust_der %s } }`, test_der_1, test_der_1)), }, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ht := &HTTPTransport{} if err := ht.UnmarshalCaddyfile(tt.args.d); (err != nil) != tt.wantErr { t.Errorf("HTTPTransport.UnmarshalCaddyfile() error = %v, wantErr %v", err, tt.wantErr) return } if !tt.wantErr && !reflect.DeepEqual(&tt.expectedTLSConfig, ht.TLS) { t.Errorf("HTTPTransport.UnmarshalCaddyfile() = %v, want %v", ht, tt.expectedTLSConfig) } }) } } func TestHTTPTransport_RequestHeaderOps_TLS(t *testing.T) { var ht HTTPTransport // When TLS is nil, expect no header ops if ops := ht.RequestHeaderOps(); ops != nil { t.Fatalf("expected nil HeaderOps when TLS is nil, got: %#v", ops) } // When TLS is configured, expect a HeaderOps that sets Host ht.TLS = &TLSConfig{} ops := ht.RequestHeaderOps() if ops == nil { t.Fatal("expected non-nil HeaderOps when TLS is set") } if ops.Set == nil { t.Fatalf("expected ops.Set to be non-nil, got nil") } if got := ops.Set.Get("Host"); got != "{http.reverse_proxy.upstream.hostport}" { t.Fatalf("unexpected Host value; want placeholder, got: %s", got) } } // TestHTTPTransport_DialTLSContext_ProxyProtocol verifies that when TLS and // ProxyProtocol are both enabled, DialTLSContext is set. This is critical because // ProxyProtocol modifies req.URL.Host to include client info with "->" separator // (e.g., "[2001:db8::1]:12345->127.0.0.1:443"), which breaks Go's address parsing. // Without a custom DialTLSContext, Go's HTTP library would fail with // "too many colons in address" when trying to parse the mangled host. func TestHTTPTransport_DialTLSContext_ProxyProtocol(t *testing.T) { ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() tests := []struct { name string tls *TLSConfig proxyProtocol string serverNameHasPlaceholder bool expectDialTLSContext bool }{ { name: "no TLS, no proxy protocol", tls: nil, proxyProtocol: "", expectDialTLSContext: false, }, { name: "TLS without proxy protocol", tls: &TLSConfig{}, proxyProtocol: "", expectDialTLSContext: false, }, { name: "TLS with proxy protocol v1", tls: &TLSConfig{}, proxyProtocol: "v1", expectDialTLSContext: true, }, { name: "TLS with proxy protocol v2", tls: &TLSConfig{}, proxyProtocol: "v2", expectDialTLSContext: true, }, { name: "TLS with placeholder ServerName", tls: &TLSConfig{ServerName: "{http.request.host}"}, proxyProtocol: "", serverNameHasPlaceholder: true, expectDialTLSContext: true, }, { name: "TLS with placeholder ServerName and proxy protocol", tls: &TLSConfig{ServerName: "{http.request.host}"}, proxyProtocol: "v2", serverNameHasPlaceholder: true, expectDialTLSContext: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ht := &HTTPTransport{ TLS: tt.tls, ProxyProtocol: tt.proxyProtocol, } rt, err := ht.NewTransport(ctx) if err != nil { t.Fatalf("NewTransport() error = %v", err) } hasDialTLSContext := rt.DialTLSContext != nil if hasDialTLSContext != tt.expectDialTLSContext { t.Errorf("DialTLSContext set = %v, want %v", hasDialTLSContext, tt.expectDialTLSContext) } }) } } ================================================ FILE: modules/caddyhttp/reverseproxy/metrics.go ================================================ package reverseproxy import ( "errors" "runtime/debug" "sync" "time" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" ) var reverseProxyMetrics = struct { once sync.Once upstreamsHealthy *prometheus.GaugeVec logger *zap.Logger }{} func initReverseProxyMetrics(handler *Handler, registry *prometheus.Registry) { const ns, sub = "caddy", "reverse_proxy" upstreamsLabels := []string{"upstream"} reverseProxyMetrics.once.Do(func() { reverseProxyMetrics.upstreamsHealthy = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: ns, Subsystem: sub, Name: "upstreams_healthy", Help: "Health status of reverse proxy upstreams.", }, upstreamsLabels) }) // duplicate registration could happen if multiple sites with reverse proxy are configured; so ignore the error because // there's no good way to capture having multiple sites with reverse proxy. If this happens, the metrics will be // registered twice, but the second registration will be ignored. if err := registry.Register(reverseProxyMetrics.upstreamsHealthy); err != nil && !errors.Is(err, prometheus.AlreadyRegisteredError{ ExistingCollector: reverseProxyMetrics.upstreamsHealthy, NewCollector: reverseProxyMetrics.upstreamsHealthy, }) { panic(err) } reverseProxyMetrics.logger = handler.logger.Named("reverse_proxy.metrics") } type metricsUpstreamsHealthyUpdater struct { handler *Handler } func newMetricsUpstreamsHealthyUpdater(handler *Handler, ctx caddy.Context) *metricsUpstreamsHealthyUpdater { initReverseProxyMetrics(handler, ctx.GetMetricsRegistry()) reverseProxyMetrics.upstreamsHealthy.Reset() return &metricsUpstreamsHealthyUpdater{handler} } func (m *metricsUpstreamsHealthyUpdater) init() { go func() { defer func() { if err := recover(); err != nil { if c := reverseProxyMetrics.logger.Check(zapcore.ErrorLevel, "upstreams healthy metrics updater panicked"); c != nil { c.Write( zap.Any("error", err), zap.ByteString("stack", debug.Stack()), ) } } }() m.update() ticker := time.NewTicker(10 * time.Second) for { select { case <-ticker.C: m.update() case <-m.handler.ctx.Done(): ticker.Stop() return } } }() } func (m *metricsUpstreamsHealthyUpdater) update() { for _, upstream := range m.handler.Upstreams { labels := prometheus.Labels{"upstream": upstream.Dial} gaugeValue := 0.0 if upstream.Healthy() { gaugeValue = 1.0 } reverseProxyMetrics.upstreamsHealthy.With(labels).Set(gaugeValue) } } ================================================ FILE: modules/caddyhttp/reverseproxy/passive_health_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "context" "testing" "time" "github.com/caddyserver/caddy/v2" ) // newPassiveHandler builds a minimal Handler with passive health checks // configured and a live caddy.Context so the fail-forgetter goroutine can // be cancelled cleanly. The caller must call cancel() when done. func newPassiveHandler(t *testing.T, maxFails int, failDuration time.Duration) (*Handler, context.CancelFunc) { t.Helper() caddyCtx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) h := &Handler{ ctx: caddyCtx, HealthChecks: &HealthChecks{ Passive: &PassiveHealthChecks{ MaxFails: maxFails, FailDuration: caddy.Duration(failDuration), }, }, } return h, cancel } // provisionedStaticUpstream creates a static upstream, registers it in the // UsagePool, and returns a cleanup func that removes it from the pool. func provisionedStaticUpstream(t *testing.T, h *Handler, addr string) (*Upstream, func()) { t.Helper() u := &Upstream{Dial: addr} h.provisionUpstream(u, false) return u, func() { _, _ = hosts.Delete(addr) } } // provisionedDynamicUpstream creates a dynamic upstream, registers it in // dynamicHosts, and returns a cleanup func that removes it. func provisionedDynamicUpstream(t *testing.T, h *Handler, addr string) (*Upstream, func()) { t.Helper() u := &Upstream{Dial: addr} h.provisionUpstream(u, true) return u, func() { dynamicHostsMu.Lock() delete(dynamicHosts, addr) dynamicHostsMu.Unlock() } } // --- countFailure behaviour --- // TestCountFailureNoopWhenNoHealthChecks verifies that countFailure is a no-op // when HealthChecks is nil. func TestCountFailureNoopWhenNoHealthChecks(t *testing.T) { resetDynamicHosts() h := &Handler{} u := &Upstream{Dial: "10.1.0.1:80", Host: new(Host)} h.countFailure(u) if u.Host.Fails() != 0 { t.Errorf("expected 0 fails with no HealthChecks config, got %d", u.Host.Fails()) } } // TestCountFailureNoopWhenZeroDuration verifies that countFailure is a no-op // when FailDuration is 0 (the zero value disables passive checks). func TestCountFailureNoopWhenZeroDuration(t *testing.T) { resetDynamicHosts() caddyCtx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() h := &Handler{ ctx: caddyCtx, HealthChecks: &HealthChecks{ Passive: &PassiveHealthChecks{MaxFails: 1, FailDuration: 0}, }, } u := &Upstream{Dial: "10.1.0.2:80", Host: new(Host)} h.countFailure(u) if u.Host.Fails() != 0 { t.Errorf("expected 0 fails with zero FailDuration, got %d", u.Host.Fails()) } } // TestCountFailureIncrementsCount verifies that countFailure increments the // fail count on the upstream's Host. func TestCountFailureIncrementsCount(t *testing.T) { resetDynamicHosts() h, cancel := newPassiveHandler(t, 2, time.Minute) defer cancel() u := &Upstream{Dial: "10.1.0.3:80", Host: new(Host)} h.countFailure(u) if u.Host.Fails() != 1 { t.Errorf("expected 1 fail after countFailure, got %d", u.Host.Fails()) } } // TestCountFailureDecrementsAfterDuration verifies that the fail count is // decremented back after FailDuration elapses. func TestCountFailureDecrementsAfterDuration(t *testing.T) { resetDynamicHosts() const failDuration = 50 * time.Millisecond h, cancel := newPassiveHandler(t, 2, failDuration) defer cancel() u := &Upstream{Dial: "10.1.0.4:80", Host: new(Host)} h.countFailure(u) if u.Host.Fails() != 1 { t.Fatalf("expected 1 fail immediately after countFailure, got %d", u.Host.Fails()) } // Wait long enough for the forgetter goroutine to fire. time.Sleep(3 * failDuration) if u.Host.Fails() != 0 { t.Errorf("expected fail count to return to 0 after FailDuration, got %d", u.Host.Fails()) } } // TestCountFailureCancelledContextForgets verifies that cancelling the handler // context (simulating a config unload) also triggers the forgetter to run, // decrementing the fail count. func TestCountFailureCancelledContextForgets(t *testing.T) { resetDynamicHosts() h, cancel := newPassiveHandler(t, 2, time.Hour) // very long duration u := &Upstream{Dial: "10.1.0.5:80", Host: new(Host)} h.countFailure(u) if u.Host.Fails() != 1 { t.Fatalf("expected 1 fail immediately after countFailure, got %d", u.Host.Fails()) } // Cancelling the context should cause the forgetter goroutine to exit and // decrement the count. cancel() time.Sleep(50 * time.Millisecond) if u.Host.Fails() != 0 { t.Errorf("expected fail count to be decremented after context cancel, got %d", u.Host.Fails()) } } // --- static upstream passive health check --- // TestStaticUpstreamHealthyWithNoFailures verifies that a static upstream with // no recorded failures is considered healthy. func TestStaticUpstreamHealthyWithNoFailures(t *testing.T) { resetDynamicHosts() h, cancel := newPassiveHandler(t, 2, time.Minute) defer cancel() u, cleanup := provisionedStaticUpstream(t, h, "10.2.0.1:80") defer cleanup() if !u.Healthy() { t.Error("upstream with no failures should be healthy") } } // TestStaticUpstreamUnhealthyAtMaxFails verifies that a static upstream is // marked unhealthy once its fail count reaches MaxFails. func TestStaticUpstreamUnhealthyAtMaxFails(t *testing.T) { resetDynamicHosts() h, cancel := newPassiveHandler(t, 2, time.Minute) defer cancel() u, cleanup := provisionedStaticUpstream(t, h, "10.2.0.2:80") defer cleanup() h.countFailure(u) if !u.Healthy() { t.Error("upstream should still be healthy after 1 of 2 allowed failures") } h.countFailure(u) if u.Healthy() { t.Error("upstream should be unhealthy after reaching MaxFails=2") } } // TestStaticUpstreamRecoversAfterFailDuration verifies that a static upstream // returns to healthy once its failures expire. func TestStaticUpstreamRecoversAfterFailDuration(t *testing.T) { resetDynamicHosts() const failDuration = 50 * time.Millisecond h, cancel := newPassiveHandler(t, 1, failDuration) defer cancel() u, cleanup := provisionedStaticUpstream(t, h, "10.2.0.3:80") defer cleanup() h.countFailure(u) if u.Healthy() { t.Fatal("upstream should be unhealthy immediately after MaxFails failure") } time.Sleep(3 * failDuration) if !u.Healthy() { t.Errorf("upstream should recover to healthy after FailDuration, Fails=%d", u.Host.Fails()) } } // TestStaticUpstreamHealthPersistedAcrossReprovisioning verifies that static // upstreams share a Host via the UsagePool, so a second call to provisionUpstream // for the same address (as happens on config reload) sees the accumulated state. func TestStaticUpstreamHealthPersistedAcrossReprovisioning(t *testing.T) { resetDynamicHosts() h, cancel := newPassiveHandler(t, 2, time.Minute) defer cancel() u1, cleanup1 := provisionedStaticUpstream(t, h, "10.2.0.4:80") defer cleanup1() h.countFailure(u1) h.countFailure(u1) // Simulate a second handler instance referencing the same upstream // (e.g. after a config reload that keeps the same backend address). u2, cleanup2 := provisionedStaticUpstream(t, h, "10.2.0.4:80") defer cleanup2() if u1.Host != u2.Host { t.Fatal("expected both Upstream structs to share the same *Host via UsagePool") } if u2.Healthy() { t.Error("re-provisioned upstream should still see the prior fail count and be unhealthy") } } // --- dynamic upstream passive health check --- // TestDynamicUpstreamHealthyWithNoFailures verifies that a freshly provisioned // dynamic upstream is healthy. func TestDynamicUpstreamHealthyWithNoFailures(t *testing.T) { resetDynamicHosts() h, cancel := newPassiveHandler(t, 2, time.Minute) defer cancel() u, cleanup := provisionedDynamicUpstream(t, h, "10.3.0.1:80") defer cleanup() if !u.Healthy() { t.Error("dynamic upstream with no failures should be healthy") } } // TestDynamicUpstreamUnhealthyAtMaxFails verifies that a dynamic upstream is // marked unhealthy once its fail count reaches MaxFails. func TestDynamicUpstreamUnhealthyAtMaxFails(t *testing.T) { resetDynamicHosts() h, cancel := newPassiveHandler(t, 2, time.Minute) defer cancel() u, cleanup := provisionedDynamicUpstream(t, h, "10.3.0.2:80") defer cleanup() h.countFailure(u) if !u.Healthy() { t.Error("dynamic upstream should still be healthy after 1 of 2 allowed failures") } h.countFailure(u) if u.Healthy() { t.Error("dynamic upstream should be unhealthy after reaching MaxFails=2") } } // TestDynamicUpstreamFailCountPersistedBetweenRequests is the core regression // test: it simulates two sequential (non-concurrent) requests to the same // dynamic upstream. Before the fix, the UsagePool entry would be deleted // between requests, wiping the fail count. Now it should survive. func TestDynamicUpstreamFailCountPersistedBetweenRequests(t *testing.T) { resetDynamicHosts() h, cancel := newPassiveHandler(t, 2, time.Minute) defer cancel() // --- first request --- u1 := &Upstream{Dial: "10.3.0.3:80"} h.provisionUpstream(u1, true) h.countFailure(u1) if u1.Host.Fails() != 1 { t.Fatalf("expected 1 fail after first request, got %d", u1.Host.Fails()) } // Simulate end of first request: no delete from any pool (key difference // vs. the old behaviour where hosts.Delete was deferred). // --- second request: brand-new *Upstream struct, same dial address --- u2 := &Upstream{Dial: "10.3.0.3:80"} h.provisionUpstream(u2, true) if u1.Host != u2.Host { t.Fatal("expected both requests to share the same *Host pointer from dynamicHosts") } if u2.Host.Fails() != 1 { t.Errorf("expected fail count to persist across requests, got %d", u2.Host.Fails()) } // A second failure now tips it over MaxFails=2. h.countFailure(u2) if u2.Healthy() { t.Error("upstream should be unhealthy after accumulated failures across requests") } // Cleanup. dynamicHostsMu.Lock() delete(dynamicHosts, "10.3.0.3:80") dynamicHostsMu.Unlock() } // TestDynamicUpstreamRecoveryAfterFailDuration verifies that a dynamic // upstream's fail count expires and it returns to healthy. func TestDynamicUpstreamRecoveryAfterFailDuration(t *testing.T) { resetDynamicHosts() const failDuration = 50 * time.Millisecond h, cancel := newPassiveHandler(t, 1, failDuration) defer cancel() u, cleanup := provisionedDynamicUpstream(t, h, "10.3.0.4:80") defer cleanup() h.countFailure(u) if u.Healthy() { t.Fatal("upstream should be unhealthy immediately after MaxFails failure") } time.Sleep(3 * failDuration) // Re-provision (as a new request would) to get fresh *Upstream with policy set. u2 := &Upstream{Dial: "10.3.0.4:80"} h.provisionUpstream(u2, true) if !u2.Healthy() { t.Errorf("dynamic upstream should recover to healthy after FailDuration, Fails=%d", u2.Host.Fails()) } } // TestDynamicUpstreamMaxRequestsFromUnhealthyRequestCount verifies that // UnhealthyRequestCount is copied into MaxRequests so Full() works correctly. func TestDynamicUpstreamMaxRequestsFromUnhealthyRequestCount(t *testing.T) { resetDynamicHosts() caddyCtx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() h := &Handler{ ctx: caddyCtx, HealthChecks: &HealthChecks{ Passive: &PassiveHealthChecks{ UnhealthyRequestCount: 3, }, }, } u, cleanup := provisionedDynamicUpstream(t, h, "10.3.0.5:80") defer cleanup() if u.MaxRequests != 3 { t.Errorf("expected MaxRequests=3 from UnhealthyRequestCount, got %d", u.MaxRequests) } // Should not be full with fewer requests than the limit. _ = u.Host.countRequest(2) if u.Full() { t.Error("upstream should not be full with 2 of 3 allowed requests") } _ = u.Host.countRequest(1) if !u.Full() { t.Error("upstream should be full at UnhealthyRequestCount concurrent requests") } } ================================================ FILE: modules/caddyhttp/reverseproxy/retries_test.go ================================================ package reverseproxy import ( "errors" "io" "net" "net/http" "net/http/httptest" "strings" "sync" "testing" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) // prepareTestRequest injects the context values that ServeHTTP and // proxyLoopIteration require (caddy.ReplacerCtxKey, VarsCtxKey, etc.) using // the same helper that the real HTTP server uses. // // A zero-value Server is passed so that caddyhttp.ServerCtxKey is set to a // non-nil pointer; reverseProxy dereferences it to check ShouldLogCredentials. func prepareTestRequest(req *http.Request) *http.Request { repl := caddy.NewReplacer() return caddyhttp.PrepareRequest(req, repl, nil, &caddyhttp.Server{}) } // closeOnCloseReader is an io.ReadCloser whose Close method actually makes // subsequent reads fail, mimicking the behaviour of a real HTTP request body // (as opposed to io.NopCloser, whose Close is a no-op and would mask the bug // we are testing). type closeOnCloseReader struct { mu sync.Mutex r *strings.Reader closed bool } func newCloseOnCloseReader(s string) *closeOnCloseReader { return &closeOnCloseReader{r: strings.NewReader(s)} } func (c *closeOnCloseReader) Read(p []byte) (int, error) { c.mu.Lock() defer c.mu.Unlock() if c.closed { return 0, errors.New("http: invalid Read on closed Body") } return c.r.Read(p) } func (c *closeOnCloseReader) Close() error { c.mu.Lock() defer c.mu.Unlock() c.closed = true return nil } // deadUpstreamAddr returns a TCP address that is guaranteed to refuse // connections: we bind a listener, note its address, close it immediately, // and return the address. Any dial to that address will get ECONNREFUSED. func deadUpstreamAddr(t *testing.T) string { t.Helper() ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("failed to create dead upstream listener: %v", err) } addr := ln.Addr().String() ln.Close() return addr } // testTransport wraps http.Transport to: // 1. Set the URL scheme to "http" when it is empty (matching what // HTTPTransport.SetScheme does in production; cloneRequest strips the // scheme intentionally so a plain *http.Transport would fail with // "unsupported protocol scheme"). // 2. Wrap dial errors as DialError so that tryAgain correctly identifies them // as safe-to-retry regardless of request method (as HTTPTransport does in // production via its custom dialer). type testTransport struct{ *http.Transport } func (t testTransport) RoundTrip(req *http.Request) (*http.Response, error) { if req.URL.Scheme == "" { req.URL.Scheme = "http" } resp, err := t.Transport.RoundTrip(req) if err != nil { // Wrap dial errors as DialError to match production behaviour. // Without this wrapping, tryAgain treats ECONNREFUSED on a POST // request as non-retryable (only GET is retried by default when // the error is not a DialError). var opErr *net.OpError if errors.As(err, &opErr) && opErr.Op == "dial" { return nil, DialError{err} } } return resp, err } // minimalHandler returns a Handler with only the fields required by ServeHTTP // set directly, bypassing Provision (which requires a full Caddy runtime). // RoundRobinSelection is used so that successive iterations of the proxy loop // advance through the upstream pool in a predictable order. func minimalHandler(retries int, upstreams ...*Upstream) *Handler { return &Handler{ logger: zap.NewNop(), Transport: testTransport{&http.Transport{}}, Upstreams: upstreams, LoadBalancing: &LoadBalancing{ Retries: retries, SelectionPolicy: &RoundRobinSelection{}, // RetryMatch intentionally nil: dial errors are always retried // regardless of RetryMatch or request method. }, // ctx, connections, connectionsMu, events: zero/nil values are safe // for the code paths exercised by these tests (TryInterval=0 so // ctx.Done() is never consulted; no WebSocket hijacking; no passive // health-check event emission). } } // TestDialErrorBodyRetry verifies that a POST request whose body has NOT been // pre-buffered via request_buffers can still be retried after a dial error. // // Before the fix, a dial error caused Go's transport to close the shared body // (via cloneRequest's shallow copy), so the retry attempt would read from an // already-closed io.ReadCloser and produce: // // http: invalid Read on closed Body → HTTP 502 // // After the fix the handler wraps the body in noCloseBody when retries are // configured, preventing the transport's Close() from propagating to the // shared body. Since dial errors never read any bytes, the body remains at // position 0 for the retry. func TestDialErrorBodyRetry(t *testing.T) { // Good upstream: echoes the request body with 200 OK. goodServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { http.Error(w, "read body: "+err.Error(), http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) _, _ = w.Write(body) })) t.Cleanup(goodServer.Close) const requestBody = "hello, retry" tests := []struct { name string method string body string retries int wantStatus int wantBody string }{ { // Core regression case: POST with a body, no request_buffers, // dial error on first upstream → retry to second upstream succeeds. name: "POST body retried after dial error", method: http.MethodPost, body: requestBody, retries: 1, wantStatus: http.StatusOK, wantBody: requestBody, }, { // Dial errors are always retried regardless of method, but there // is no body to re-read, so GET has always worked. Keep it as a // sanity check that we did not break the no-body path. name: "GET without body retried after dial error", method: http.MethodGet, body: "", retries: 1, wantStatus: http.StatusOK, wantBody: "", }, { // Without any retry configuration the handler must give up on the // first dial error and return a 502. Confirms no wrapping occurs // in the no-retry path. name: "no retries configured returns 502 on dial error", method: http.MethodPost, body: requestBody, retries: 0, wantStatus: http.StatusBadGateway, wantBody: "", }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { dead := deadUpstreamAddr(t) // Build the upstream pool. RoundRobinSelection starts its // counter at 0 and increments before returning, so with a // two-element pool it picks index 1 first, then index 0. // Put the good upstream at index 0 and the dead one at // index 1 so that: // attempt 1 → pool[1] = dead → DialError (ECONNREFUSED) // attempt 2 → pool[0] = good → 200 upstreams := []*Upstream{ {Host: new(Host), Dial: goodServer.Listener.Addr().String()}, {Host: new(Host), Dial: dead}, } if tc.retries == 0 { // For the "no retries" case use only the dead upstream so // there is nowhere to retry to. upstreams = []*Upstream{ {Host: new(Host), Dial: dead}, } } h := minimalHandler(tc.retries, upstreams...) // Use closeOnCloseReader so that Close() truly prevents further // reads, matching real http.body semantics. io.NopCloser would // mask the bug because its Close is a no-op. var bodyReader io.ReadCloser if tc.body != "" { bodyReader = newCloseOnCloseReader(tc.body) } req := httptest.NewRequest(tc.method, "http://example.com/", bodyReader) if bodyReader != nil { // httptest.NewRequest wraps the reader in NopCloser; replace // it with our close-aware reader so Close() is propagated. req.Body = bodyReader req.ContentLength = int64(len(tc.body)) } req = prepareTestRequest(req) rec := httptest.NewRecorder() err := h.ServeHTTP(rec, req, caddyhttp.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { return nil })) // For error cases (e.g. 502) ServeHTTP returns a HandlerError // rather than writing the status itself. gotStatus := rec.Code if err != nil { if herr, ok := err.(caddyhttp.HandlerError); ok { gotStatus = herr.StatusCode } } if gotStatus != tc.wantStatus { t.Errorf("status: got %d, want %d (err=%v)", gotStatus, tc.wantStatus, err) } if tc.wantBody != "" && rec.Body.String() != tc.wantBody { t.Errorf("body: got %q, want %q", rec.Body.String(), tc.wantBody) } }) } } ================================================ FILE: modules/caddyhttp/reverseproxy/reverseproxy.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "bytes" "context" "crypto/rand" "encoding/base64" "encoding/json" "errors" "fmt" "io" "net" "net/http" "net/http/httptrace" "net/netip" "net/textproto" "net/url" "strconv" "strings" "sync" "sync/atomic" "time" "go.uber.org/zap" "go.uber.org/zap/zapcore" "golang.org/x/net/http/httpguts" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyevents" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers" "github.com/caddyserver/caddy/v2/modules/caddyhttp/rewrite" ) // inFlightRequests uses sync.Map with atomic.Int64 for lock-free updates on the hot path var inFlightRequests sync.Map func incInFlightRequest(address string) { v, _ := inFlightRequests.LoadOrStore(address, new(atomic.Int64)) v.(*atomic.Int64).Add(1) } func decInFlightRequest(address string) { if v, ok := inFlightRequests.Load(address); ok { if v.(*atomic.Int64).Add(-1) <= 0 { inFlightRequests.Delete(address) } } } func getInFlightRequests() map[string]int64 { copyMap := make(map[string]int64) inFlightRequests.Range(func(key, value any) bool { copyMap[key.(string)] = value.(*atomic.Int64).Load() return true }) return copyMap } func init() { caddy.RegisterModule(Handler{}) } // Handler implements a highly configurable and production-ready reverse proxy. // // Upon proxying, this module sets the following placeholders (which can be used // both within and after this handler; for example, in response headers): // // Placeholder | Description // ------------|------------- // `{http.reverse_proxy.upstream.address}` | The full address to the upstream as given in the config // `{http.reverse_proxy.upstream.hostport}` | The host:port of the upstream // `{http.reverse_proxy.upstream.host}` | The host of the upstream // `{http.reverse_proxy.upstream.port}` | The port of the upstream // `{http.reverse_proxy.upstream.requests}` | The approximate current number of requests to the upstream // `{http.reverse_proxy.upstream.max_requests}` | The maximum approximate number of requests allowed to the upstream // `{http.reverse_proxy.upstream.fails}` | The number of recent failed requests to the upstream // `{http.reverse_proxy.upstream.latency}` | How long it took the proxy upstream to write the response header. // `{http.reverse_proxy.upstream.latency_ms}` | Same as 'latency', but in milliseconds. // `{http.reverse_proxy.upstream.duration}` | Time spent proxying to the upstream, including writing response body to client. // `{http.reverse_proxy.upstream.duration_ms}` | Same as 'upstream.duration', but in milliseconds. // `{http.reverse_proxy.duration}` | Total time spent proxying, including selecting an upstream, retries, and writing response. // `{http.reverse_proxy.duration_ms}` | Same as 'duration', but in milliseconds. // `{http.reverse_proxy.retries}` | The number of retries actually performed to communicate with an upstream. type Handler struct { // Configures the method of transport for the proxy. A transport // is what performs the actual "round trip" to the backend. // The default transport is plaintext HTTP. TransportRaw json.RawMessage `json:"transport,omitempty" caddy:"namespace=http.reverse_proxy.transport inline_key=protocol"` // A circuit breaker may be used to relieve pressure on a backend // that is beginning to exhibit symptoms of stress or latency. // By default, there is no circuit breaker. CBRaw json.RawMessage `json:"circuit_breaker,omitempty" caddy:"namespace=http.reverse_proxy.circuit_breakers inline_key=type"` // Load balancing distributes load/requests between backends. LoadBalancing *LoadBalancing `json:"load_balancing,omitempty"` // Health checks update the status of backends, whether they are // up or down. Down backends will not be proxied to. HealthChecks *HealthChecks `json:"health_checks,omitempty"` // Upstreams is the static list of backends to proxy to. Upstreams UpstreamPool `json:"upstreams,omitempty"` // A module for retrieving the list of upstreams dynamically. Dynamic // upstreams are retrieved at every iteration of the proxy loop for // each request (i.e. before every proxy attempt within every request). // Active health checks do not work on dynamic upstreams, and passive // health checks are only effective on dynamic upstreams if the proxy // server is busy enough that concurrent requests to the same backends // are continuous. Instead of health checks for dynamic upstreams, it // is recommended that the dynamic upstream module only return available // backends in the first place. DynamicUpstreamsRaw json.RawMessage `json:"dynamic_upstreams,omitempty" caddy:"namespace=http.reverse_proxy.upstreams inline_key=source"` // Adjusts how often to flush the response buffer. By default, // no periodic flushing is done. A negative value disables // response buffering, and flushes immediately after each // write to the client. This option is ignored when the upstream's // response is recognized as a streaming response, or if its // content length is -1; for such responses, writes are flushed // to the client immediately. FlushInterval caddy.Duration `json:"flush_interval,omitempty"` // A list of IP ranges (supports CIDR notation) from which // X-Forwarded-* header values should be trusted. By default, // no proxies are trusted, so existing values will be ignored // when setting these headers. If the proxy is trusted, then // existing values will be used when constructing the final // header values. TrustedProxies []string `json:"trusted_proxies,omitempty"` // Headers manipulates headers between Caddy and the backend. // By default, all headers are passed-thru without changes, // with the exceptions of special hop-by-hop headers. // // X-Forwarded-For, X-Forwarded-Proto and X-Forwarded-Host // are also set implicitly. Headers *headers.Handler `json:"headers,omitempty"` // If nonzero, the entire request body up to this size will be read // and buffered in memory before being proxied to the backend. This // should be avoided if at all possible for performance reasons, but // could be useful if the backend is intolerant of read latency or // chunked encodings. RequestBuffers int64 `json:"request_buffers,omitempty"` // If nonzero, the entire response body up to this size will be read // and buffered in memory before being proxied to the client. This // should be avoided if at all possible for performance reasons, but // could be useful if the backend has tighter memory constraints. ResponseBuffers int64 `json:"response_buffers,omitempty"` // If nonzero, streaming requests such as WebSockets will be // forcibly closed at the end of the timeout. Default: no timeout. StreamTimeout caddy.Duration `json:"stream_timeout,omitempty"` // If nonzero, streaming requests such as WebSockets will not be // closed when the proxy config is unloaded, and instead the stream // will remain open until the delay is complete. In other words, // enabling this prevents streams from closing when Caddy's config // is reloaded. Enabling this may be a good idea to avoid a thundering // herd of reconnecting clients which had their connections closed // by the previous config closing. Default: no delay. StreamCloseDelay caddy.Duration `json:"stream_close_delay,omitempty"` // If configured, rewrites the copy of the upstream request. // Allows changing the request method and URI (path and query). // Since the rewrite is applied to the copy, it does not persist // past the reverse proxy handler. // If the method is changed to `GET` or `HEAD`, the request body // will not be copied to the backend. This allows a later request // handler -- either in a `handle_response` route, or after -- to // read the body. // By default, no rewrite is performed, and the method and URI // from the incoming request is used as-is for proxying. Rewrite *rewrite.Rewrite `json:"rewrite,omitempty"` // List of handlers and their associated matchers to evaluate // after successful roundtrips. The first handler that matches // the response from a backend will be invoked. The response // body from the backend will not be written to the client; // it is up to the handler to finish handling the response. // If passive health checks are enabled, any errors from the // handler chain will not affect the health status of the // backend. // // Three new placeholders are available in this handler chain: // - `{http.reverse_proxy.status_code}` The status code from the response // - `{http.reverse_proxy.status_text}` The status text from the response // - `{http.reverse_proxy.header.*}` The headers from the response HandleResponse []caddyhttp.ResponseHandler `json:"handle_response,omitempty"` // If set, the proxy will write very detailed logs about its // inner workings. Enable this only when debugging, as it // will produce a lot of output. // // EXPERIMENTAL: This feature is subject to change or removal. VerboseLogs bool `json:"verbose_logs,omitempty"` Transport http.RoundTripper `json:"-"` CB CircuitBreaker `json:"-"` DynamicUpstreams UpstreamSource `json:"-"` // transportHeaderOps is a set of header operations provided // by the transport at provision time, if the transport // implements TransportHeaderOpsProvider. These ops are // applied before any user-configured header ops so the // user can override transport defaults. transportHeaderOps *headers.HeaderOps // Holds the parsed CIDR ranges from TrustedProxies trustedProxies []netip.Prefix // Holds the named response matchers from the Caddyfile while adapting responseMatchers map[string]caddyhttp.ResponseMatcher // Holds the handle_response Caddyfile tokens while adapting handleResponseSegments []*caddyfile.Dispenser // Stores upgraded requests (hijacked connections) for proper cleanup connections map[io.ReadWriteCloser]openConnection connectionsCloseTimer *time.Timer connectionsMu *sync.Mutex ctx caddy.Context logger *zap.Logger events *caddyevents.App } // CaddyModule returns the Caddy module information. func (Handler) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.reverse_proxy", New: func() caddy.Module { return new(Handler) }, } } // Provision ensures that h is set up properly before use. func (h *Handler) Provision(ctx caddy.Context) error { eventAppIface, err := ctx.App("events") if err != nil { return fmt.Errorf("getting events app: %v", err) } h.events = eventAppIface.(*caddyevents.App) h.ctx = ctx h.logger = ctx.Logger() h.connections = make(map[io.ReadWriteCloser]openConnection) h.connectionsMu = new(sync.Mutex) // warn about unsafe buffering config if h.RequestBuffers == -1 || h.ResponseBuffers == -1 { h.logger.Warn("UNLIMITED BUFFERING: buffering is enabled without any cap on buffer size, which can result in OOM crashes") } // start by loading modules if h.TransportRaw != nil { mod, err := ctx.LoadModule(h, "TransportRaw") if err != nil { return fmt.Errorf("loading transport: %v", err) } h.Transport = mod.(http.RoundTripper) // set default buffer sizes if applicable if bt, ok := h.Transport.(BufferedTransport); ok { reqBuffers, respBuffers := bt.DefaultBufferSizes() if h.RequestBuffers == 0 { h.RequestBuffers = reqBuffers } if h.ResponseBuffers == 0 { h.ResponseBuffers = respBuffers } } } if h.LoadBalancing != nil && h.LoadBalancing.SelectionPolicyRaw != nil { mod, err := ctx.LoadModule(h.LoadBalancing, "SelectionPolicyRaw") if err != nil { return fmt.Errorf("loading load balancing selection policy: %s", err) } h.LoadBalancing.SelectionPolicy = mod.(Selector) } if h.CBRaw != nil { mod, err := ctx.LoadModule(h, "CBRaw") if err != nil { return fmt.Errorf("loading circuit breaker: %s", err) } h.CB = mod.(CircuitBreaker) } if h.DynamicUpstreamsRaw != nil { mod, err := ctx.LoadModule(h, "DynamicUpstreamsRaw") if err != nil { return fmt.Errorf("loading upstream source module: %v", err) } h.DynamicUpstreams = mod.(UpstreamSource) } // parse trusted proxy CIDRs ahead of time for _, str := range h.TrustedProxies { if strings.Contains(str, "/") { ipNet, err := netip.ParsePrefix(str) if err != nil { return fmt.Errorf("parsing CIDR expression: '%s': %v", str, err) } h.trustedProxies = append(h.trustedProxies, ipNet) } else { ipAddr, err := netip.ParseAddr(str) if err != nil { return fmt.Errorf("invalid IP address: '%s': %v", str, err) } ipNew := netip.PrefixFrom(ipAddr, ipAddr.BitLen()) h.trustedProxies = append(h.trustedProxies, ipNew) } } // ensure any embedded headers handler module gets provisioned // (see https://caddy.community/t/set-cookie-manipulation-in-reverse-proxy/7666?u=matt // for what happens if we forget to provision it) if h.Headers != nil { err := h.Headers.Provision(ctx) if err != nil { return fmt.Errorf("provisioning embedded headers handler: %v", err) } } if h.Rewrite != nil { err := h.Rewrite.Provision(ctx) if err != nil { return fmt.Errorf("provisioning rewrite: %v", err) } } // set up transport if h.Transport == nil { t := &HTTPTransport{} err := t.Provision(ctx) if err != nil { return fmt.Errorf("provisioning default transport: %v", err) } h.Transport = t } // If the transport can provide header ops, cache them now so we don't // have to compute them per-request. Provision the HeaderOps if present // so any runtime artifacts (like precompiled regex) are prepared. if tph, ok := h.Transport.(RequestHeaderOpsTransport); ok { h.transportHeaderOps = tph.RequestHeaderOps() if h.transportHeaderOps != nil { if err := h.transportHeaderOps.Provision(ctx); err != nil { return fmt.Errorf("provisioning transport header ops: %v", err) } } } // set up load balancing if h.LoadBalancing == nil { h.LoadBalancing = new(LoadBalancing) } if h.LoadBalancing.SelectionPolicy == nil { h.LoadBalancing.SelectionPolicy = RandomSelection{} } if h.LoadBalancing.TryDuration > 0 && h.LoadBalancing.TryInterval == 0 { // a non-zero try_duration with a zero try_interval // will always spin the CPU for try_duration if the // upstream is local or low-latency; avoid that by // defaulting to a sane wait period between attempts h.LoadBalancing.TryInterval = caddy.Duration(250 * time.Millisecond) } lbMatcherSets, err := ctx.LoadModule(h.LoadBalancing, "RetryMatchRaw") if err != nil { return err } err = h.LoadBalancing.RetryMatch.FromInterface(lbMatcherSets) if err != nil { return err } // set up upstreams for _, u := range h.Upstreams { h.provisionUpstream(u, false) } if h.HealthChecks != nil { // set defaults on passive health checks, if necessary if h.HealthChecks.Passive != nil { h.HealthChecks.Passive.logger = h.logger.Named("health_checker.passive") if h.HealthChecks.Passive.MaxFails == 0 { h.HealthChecks.Passive.MaxFails = 1 } } // if active health checks are enabled, configure them and start a worker if h.HealthChecks.Active != nil { err := h.HealthChecks.Active.Provision(ctx, h) if err != nil { return err } if h.HealthChecks.Active.IsEnabled() { go h.activeHealthChecker() } } } // set up any response routes for i, rh := range h.HandleResponse { err := rh.Provision(ctx) if err != nil { return fmt.Errorf("provisioning response handler %d: %v", i, err) } } upstreamHealthyUpdater := newMetricsUpstreamsHealthyUpdater(h, ctx) upstreamHealthyUpdater.init() return nil } // Cleanup cleans up the resources made by h. func (h *Handler) Cleanup() error { err := h.cleanupConnections() // remove hosts from our config from the pool for _, upstream := range h.Upstreams { _, _ = hosts.Delete(upstream.String()) } return err } func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) // prepare the request for proxying; this is needed only once clonedReq, err := h.prepareRequest(r, repl) if err != nil { return caddyhttp.Error(http.StatusInternalServerError, fmt.Errorf("preparing request for upstream round-trip: %v", err)) } // websocket over http2 or http3 if extended connect is enabled, assuming backend doesn't support this, the request will be modified to http1.1 upgrade // Both use the same upgrade mechanism: server advertizes extended connect support, and client sends the pseudo header :protocol in a CONNECT request // The quic-go http3 implementation also puts :protocol in r.Proto for CONNECT requests (quic-go/http3/headers.go@70-72,185,203) // TODO: once we can reliably detect backend support this, it can be removed for those backends if (r.ProtoMajor == 2 && r.Method == http.MethodConnect && r.Header.Get(":protocol") == "websocket") || (r.ProtoMajor == 3 && r.Method == http.MethodConnect && r.Proto == "websocket") { clonedReq.Header.Del(":protocol") // keep the body for later use. http1.1 upgrade uses http.NoBody caddyhttp.SetVar(clonedReq.Context(), "extended_connect_websocket_body", clonedReq.Body) clonedReq.Body = http.NoBody clonedReq.Method = http.MethodGet clonedReq.Header.Set("Upgrade", "websocket") clonedReq.Header.Set("Connection", "Upgrade") key := make([]byte, 16) _, randErr := rand.Read(key) if randErr != nil { return randErr } clonedReq.Header["Sec-WebSocket-Key"] = []string{base64.StdEncoding.EncodeToString(key)} } // we will need the original headers and Host value if // header operations are configured; this is so that each // retry can apply the modifications, because placeholders // may be used which depend on the selected upstream for // their values reqHost := clonedReq.Host reqHeader := clonedReq.Header // When retries are configured and there is a body, wrap it in // io.NopCloser to prevent Go's transport from closing it on dial // errors. cloneRequest does a shallow copy, so clonedReq.Body and // r.Body share the same io.ReadCloser — a dial-failure Close() // would kill the original body for all subsequent retry attempts. // The real body is closed by the HTTP server when the handler // returns. // // If the body was already fully buffered (via request_buffers), // we also extract the buffer so the retry loop can replay it // from the beginning on each attempt. (see #6259, #7546) var bufferedReqBody *bytes.Buffer if clonedReq.Body != nil && h.LoadBalancing != nil && (h.LoadBalancing.Retries > 0 || h.LoadBalancing.TryDuration > 0) { if reqBodyBuf, ok := clonedReq.Body.(bodyReadCloser); ok && reqBodyBuf.body == nil && reqBodyBuf.buf != nil { bufferedReqBody = reqBodyBuf.buf reqBodyBuf.buf = nil clonedReq.Body = io.NopCloser(bytes.NewReader(bufferedReqBody.Bytes())) defer func() { bufferedReqBody.Reset() bufPool.Put(bufferedReqBody) }() } else { clonedReq.Body = io.NopCloser(clonedReq.Body) } } start := time.Now() defer func() { // total proxying duration, including time spent on LB and retries repl.Set("http.reverse_proxy.duration", time.Since(start)) repl.Set("http.reverse_proxy.duration_ms", time.Since(start).Seconds()*1e3) // multiply seconds to preserve decimal (see #4666) }() // in the proxy loop, each iteration is an attempt to proxy the request, // and because we may retry some number of times, carry over the error // from previous tries because of the nuances of load balancing & retries var proxyErr error var retries int for { // if the request body was buffered (and only the entire body, hence no body // set to read from after the buffer), make reading from the body idempotent // and reusable, so if a backend partially or fully reads the body but then // produces an error, the request can be repeated to the next backend with // the full body (retries should only happen for idempotent requests) (see #6259) if bufferedReqBody != nil { clonedReq.Body = io.NopCloser(bytes.NewReader(bufferedReqBody.Bytes())) } var done bool done, proxyErr = h.proxyLoopIteration(clonedReq, r, w, proxyErr, start, retries, repl, reqHeader, reqHost, next) if done { break } if h.VerboseLogs { var lbWait time.Duration if h.LoadBalancing != nil { lbWait = time.Duration(h.LoadBalancing.TryInterval) } if c := h.logger.Check(zapcore.DebugLevel, "retrying"); c != nil { c.Write(zap.Error(proxyErr), zap.Duration("after", lbWait)) } } retries++ } // number of retries actually performed repl.Set("http.reverse_proxy.retries", retries) if proxyErr != nil { return statusError(proxyErr) } return nil } // proxyLoopIteration implements an iteration of the proxy loop. Despite the enormous amount of local state // that has to be passed in, we brought this into its own method so that we could run defer more easily. // It returns true when the loop is done and should break; false otherwise. The error value returned should // be assigned to the proxyErr value for the next iteration of the loop (or the error handled after break). func (h *Handler) proxyLoopIteration(r *http.Request, origReq *http.Request, w http.ResponseWriter, proxyErr error, start time.Time, retries int, repl *caddy.Replacer, reqHeader http.Header, reqHost string, next caddyhttp.Handler, ) (bool, error) { // get the updated list of upstreams upstreams := h.Upstreams if h.DynamicUpstreams != nil { dUpstreams, err := h.DynamicUpstreams.GetUpstreams(r) if err != nil { if c := h.logger.Check(zapcore.ErrorLevel, "failed getting dynamic upstreams; falling back to static upstreams"); c != nil { c.Write(zap.Error(err)) } } else { upstreams = dUpstreams for _, dUp := range dUpstreams { h.provisionUpstream(dUp, true) } if c := h.logger.Check(zapcore.DebugLevel, "provisioned dynamic upstreams"); c != nil { c.Write(zap.Int("count", len(dUpstreams))) } } } // choose an available upstream upstream := h.LoadBalancing.SelectionPolicy.Select(upstreams, r, w) if upstream == nil { if proxyErr == nil { proxyErr = caddyhttp.Error(http.StatusServiceUnavailable, errNoUpstream) } if !h.LoadBalancing.tryAgain(h.ctx, start, retries, proxyErr, r, h.logger) { return true, proxyErr } return false, proxyErr } // the dial address may vary per-request if placeholders are // used, so perform those replacements here; the resulting // DialInfo struct should have valid network address syntax dialInfo, err := upstream.fillDialInfo(repl) if err != nil { return true, fmt.Errorf("making dial info: %v", err) } if c := h.logger.Check(zapcore.DebugLevel, "selected upstream"); c != nil { c.Write( zap.String("dial", dialInfo.Address), zap.Int("total_upstreams", len(upstreams)), ) } // attach to the request information about how to dial the upstream; // this is necessary because the information cannot be sufficiently // or satisfactorily represented in a URL caddyhttp.SetVar(r.Context(), dialInfoVarKey, dialInfo) // set placeholders with information about this upstream repl.Set("http.reverse_proxy.upstream.address", dialInfo.String()) repl.Set("http.reverse_proxy.upstream.hostport", dialInfo.Address) repl.Set("http.reverse_proxy.upstream.host", dialInfo.Host) repl.Set("http.reverse_proxy.upstream.port", dialInfo.Port) repl.Set("http.reverse_proxy.upstream.requests", upstream.Host.NumRequests()) repl.Set("http.reverse_proxy.upstream.max_requests", upstream.MaxRequests) repl.Set("http.reverse_proxy.upstream.fails", upstream.Host.Fails()) // mutate request headers according to this upstream; // because we're in a retry loop, we have to copy headers // (and the r.Host value) from the original so that each // retry is identical to the first. If either transport or // user ops exist, apply them in order (transport first, // then user, so user's config wins). var userOps *headers.HeaderOps if h.Headers != nil { userOps = h.Headers.Request } transportOps := h.transportHeaderOps if transportOps != nil || userOps != nil { r.Header = make(http.Header) copyHeader(r.Header, reqHeader) r.Host = reqHost if transportOps != nil { transportOps.ApplyToRequest(r) } if userOps != nil { userOps.ApplyToRequest(r) } } // proxy the request to that upstream proxyErr = h.reverseProxy(w, r, origReq, repl, dialInfo, next) if proxyErr == nil || errors.Is(proxyErr, context.Canceled) { // context.Canceled happens when the downstream client // cancels the request, which is not our failure return true, nil } // if the roundtrip was successful, don't retry the request or // ding the health status of the upstream (an error can still // occur after the roundtrip if, for example, a response handler // after the roundtrip returns an error) if succ, ok := proxyErr.(roundtripSucceededError); ok { return true, succ.error } // remember this failure (if enabled) h.countFailure(upstream) // if we've tried long enough, break if !h.LoadBalancing.tryAgain(h.ctx, start, retries, proxyErr, r, h.logger) { return true, proxyErr } return false, proxyErr } // Mapping of the canonical form of the headers, to the RFC 6455 form, // i.e. `WebSocket` with uppercase 'S'. var websocketHeaderMapping = map[string]string{ "Sec-Websocket-Accept": "Sec-WebSocket-Accept", "Sec-Websocket-Extensions": "Sec-WebSocket-Extensions", "Sec-Websocket-Key": "Sec-WebSocket-Key", "Sec-Websocket-Protocol": "Sec-WebSocket-Protocol", "Sec-Websocket-Version": "Sec-WebSocket-Version", } // normalizeWebsocketHeaders ensures we use the standard casing as per // RFC 6455, i.e. `WebSocket` with uppercase 'S'. Most servers don't // care about this difference (read headers case insensitively), but // some do, so this maximizes compatibility with upstreams. // See https://github.com/caddyserver/caddy/pull/6621 func normalizeWebsocketHeaders(header http.Header) { for k, rk := range websocketHeaderMapping { if v, ok := header[k]; ok { delete(header, k) header[rk] = v } } } // prepareRequest clones req so that it can be safely modified without // changing the original request or introducing data races. It then // modifies it so that it is ready to be proxied, except for directing // to a specific upstream. This method adjusts headers and other relevant // properties of the cloned request and should be done just once (before // proxying) regardless of proxy retries. This assumes that no mutations // of the cloned request are performed by h during or after proxying. func (h Handler) prepareRequest(req *http.Request, repl *caddy.Replacer) (*http.Request, error) { req = cloneRequest(req) // if enabled, perform rewrites on the cloned request; if // the method is GET or HEAD, prevent the request body // from being copied to the upstream if h.Rewrite != nil { changed := h.Rewrite.Rewrite(req, repl) if changed && (h.Rewrite.Method == "GET" || h.Rewrite.Method == "HEAD") { req.ContentLength = 0 req.Body = nil } } // if enabled, buffer client request; this should only be // enabled if the upstream requires it and does not work // with "slow clients" (gunicorn, etc.) - this obviously // has a perf overhead and makes the proxy at risk of // exhausting memory and more susceptible to slowloris // attacks, so it is strongly recommended to only use this // feature if absolutely required, if read timeouts are // set, and if body size is limited if h.RequestBuffers != 0 && req.Body != nil { var readBytes int64 req.Body, readBytes = h.bufferedBody(req.Body, h.RequestBuffers) // set Content-Length when body is fully buffered if b, ok := req.Body.(bodyReadCloser); ok && b.body == nil { req.ContentLength = readBytes req.Header.Set("Content-Length", strconv.FormatInt(req.ContentLength, 10)) } } if req.ContentLength == 0 { req.Body = nil // Issue golang/go#16036: nil Body for http.Transport retries } req.Close = false // if User-Agent is not set by client, then explicitly // disable it so it's not set to default value by std lib if _, ok := req.Header["User-Agent"]; !ok { req.Header.Set("User-Agent", "") } // Indicate if request has been conveyed in early data. // RFC 8470: "An intermediary that forwards a request prior to the // completion of the TLS handshake with its client MUST send it with // the Early-Data header field set to “1” (i.e., it adds it if not // present in the request). An intermediary MUST use the Early-Data // header field if the request might have been subject to a replay and // might already have been forwarded by it or another instance // (see Section 6.2)." if req.TLS != nil && !req.TLS.HandshakeComplete { req.Header.Set("Early-Data", "1") } reqUpgradeType := upgradeType(req.Header) removeConnectionHeaders(req.Header) // Remove hop-by-hop headers to the backend. Especially // important is "Connection" because we want a persistent // connection, regardless of what the client sent to us. // Issue golang/go#46313: don't skip if field is empty. for _, h := range hopHeaders { // Issue golang/go#21096: tell backend applications that care about trailer support // that we support trailers. (We do, but we don't go out of our way to // advertise that unless the incoming client request thought it was worth // mentioning.) if h == "Te" && httpguts.HeaderValuesContainsToken(req.Header["Te"], "trailers") { req.Header.Set("Te", "trailers") continue } req.Header.Del(h) } // After stripping all the hop-by-hop connection headers above, add back any // necessary for protocol upgrades, such as for websockets. if reqUpgradeType != "" { req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", reqUpgradeType) normalizeWebsocketHeaders(req.Header) } // Set up the PROXY protocol info address := caddyhttp.GetVar(req.Context(), caddyhttp.ClientIPVarKey).(string) addrPort, err := netip.ParseAddrPort(address) if err != nil { // OK; probably didn't have a port addr, err := netip.ParseAddr(address) if err != nil { // Doesn't seem like a valid ip address at all } else { // Ok, only the port was missing addrPort = netip.AddrPortFrom(addr, 0) } } proxyProtocolInfo := ProxyProtocolInfo{AddrPort: addrPort} caddyhttp.SetVar(req.Context(), proxyProtocolInfoVarKey, proxyProtocolInfo) // some of the outbound requests require h1 (e.g. websocket) // https://github.com/golang/go/blob/4837fbe4145cd47b43eed66fee9eed9c2b988316/src/net/http/request.go#L1579 if isWebsocket(req) { caddyhttp.SetVar(req.Context(), tlsH1OnlyVarKey, true) } // Add the supported X-Forwarded-* headers err = h.addForwardedHeaders(req) if err != nil { return nil, err } // Via header(s) req.Header.Add("Via", fmt.Sprintf("%d.%d Caddy", req.ProtoMajor, req.ProtoMinor)) return req, nil } // addForwardedHeaders adds the de-facto standard X-Forwarded-* // headers to the request before it is sent upstream. // // These headers are security sensitive, so care is taken to only // use existing values for these headers from the incoming request // if the client IP is trusted (i.e. coming from a trusted proxy // sitting in front of this server). If the request didn't have // the headers at all, then they will be added with the values // that we can glean from the request. func (h Handler) addForwardedHeaders(req *http.Request) error { // Check if the client is a trusted proxy trusted := caddyhttp.GetVar(req.Context(), caddyhttp.TrustedProxyVarKey).(bool) var clientIP string if req.RemoteAddr == "@" { // For Unix socket connections, RemoteAddr is "@" which cannot // be parsed as host:port. If untrusted, strip forwarded headers // for security. If trusted, there is no peer IP to append to // X-Forwarded-For, so clientIP stays empty. if !trusted { req.Header.Del("X-Forwarded-For") req.Header.Del("X-Forwarded-Proto") req.Header.Del("X-Forwarded-Host") return nil } } else { // Parse the remote IP, ignore the error as non-fatal, // but the remote IP is required to continue, so we // just return early. This should probably never happen // though, unless some other module manipulated the request's // remote address and used an invalid value. var err error clientIP, _, err = net.SplitHostPort(req.RemoteAddr) if err != nil { // Remove the `X-Forwarded-*` headers to avoid upstreams // potentially trusting a header that came from the client req.Header.Del("X-Forwarded-For") req.Header.Del("X-Forwarded-Proto") req.Header.Del("X-Forwarded-Host") return nil } // Client IP may contain a zone if IPv6, so we need // to pull that out before parsing the IP clientIP, _, _ = strings.Cut(clientIP, "%") ipAddr, err := netip.ParseAddr(clientIP) // If ParseAddr fails (e.g. non-IP network like SCION), we cannot check // if it is a trusted proxy by IP range. In this case, we ignore the // error and treat the connection as untrusted (or retain existing status). if err == nil { for _, ipRange := range h.trustedProxies { if ipRange.Contains(ipAddr) { trusted = true break } } } } // If we aren't the first proxy, and the proxy is trusted, // retain prior X-Forwarded-For information as a comma+space // separated list and fold multiple headers into one. prior, ok, omit := allHeaderValues(req.Header, "X-Forwarded-For") if !omit { if trusted && ok && prior != "" { if clientIP != "" { req.Header.Set("X-Forwarded-For", prior+", "+clientIP) } else { req.Header.Set("X-Forwarded-For", prior) } } else if clientIP != "" { req.Header.Set("X-Forwarded-For", clientIP) } } // Set X-Forwarded-Proto; many backend apps expect this, // so that they can properly craft URLs with the right // scheme to match the original request proto := "https" if req.TLS == nil { proto = "http" } prior, ok, omit = lastHeaderValue(req.Header, "X-Forwarded-Proto") if trusted && ok && prior != "" { proto = prior } if !omit { req.Header.Set("X-Forwarded-Proto", proto) } // Set X-Forwarded-Host; often this is redundant because // we pass through the request Host as-is, but in situations // where we proxy over HTTPS, the user may need to override // Host themselves, so it's helpful to send the original too. host := req.Host prior, ok, omit = lastHeaderValue(req.Header, "X-Forwarded-Host") if trusted && ok && prior != "" { host = prior } if !omit { req.Header.Set("X-Forwarded-Host", host) } return nil } // reverseProxy performs a round-trip to the given backend and processes the response with the client. // (This method is mostly the beginning of what was borrowed from the net/http/httputil package in the // Go standard library which was used as the foundation.) func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, origReq *http.Request, repl *caddy.Replacer, di DialInfo, next caddyhttp.Handler) error { _ = di.Upstream.Host.countRequest(1) // Increment the in-flight request count incInFlightRequest(di.Address) //nolint:errcheck defer func() { di.Upstream.Host.countRequest(-1) // Decrement the in-flight request count decInFlightRequest(di.Address) }() // point the request to this upstream h.directRequest(req, di) server := req.Context().Value(caddyhttp.ServerCtxKey).(*caddyhttp.Server) shouldLogCredentials := server.Logs != nil && server.Logs.ShouldLogCredentials // Forward 1xx status codes, backported from https://github.com/golang/go/pull/53164 var ( roundTripMutex sync.Mutex roundTripDone bool ) trace := &httptrace.ClientTrace{ Got1xxResponse: func(code int, header textproto.MIMEHeader) error { roundTripMutex.Lock() defer roundTripMutex.Unlock() if roundTripDone { // If RoundTrip has returned, don't try to further modify // the ResponseWriter's header map. return nil } h := rw.Header() copyHeader(h, http.Header(header)) rw.WriteHeader(code) // Clear headers coming from the backend // (it's not automatically done by ResponseWriter.WriteHeader() for 1xx responses) clear(h) return nil }, } req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace)) // do the round-trip start := time.Now() res, err := h.Transport.RoundTrip(req) duration := time.Since(start) // record that the round trip is done for the 1xx response handler roundTripMutex.Lock() roundTripDone = true roundTripMutex.Unlock() // emit debug log with values we know are safe, // or if there is no error, emit fuller log entry logger := h.logger.With( zap.String("upstream", di.Upstream.String()), zap.Duration("duration", duration), zap.Object("request", caddyhttp.LoggableHTTPRequest{ Request: req, ShouldLogCredentials: shouldLogCredentials, }), ) const logMessage = "upstream roundtrip" if err != nil { if c := logger.Check(zapcore.DebugLevel, logMessage); c != nil { c.Write(zap.Error(err)) } return err } if c := logger.Check(zapcore.DebugLevel, logMessage); c != nil { c.Write( zap.Object("headers", caddyhttp.LoggableHTTPHeader{ Header: res.Header, ShouldLogCredentials: shouldLogCredentials, }), zap.Int("status", res.StatusCode), ) } // duration until upstream wrote response headers (roundtrip duration) repl.Set("http.reverse_proxy.upstream.latency", duration) repl.Set("http.reverse_proxy.upstream.latency_ms", duration.Seconds()*1e3) // multiply seconds to preserve decimal (see #4666) // update circuit breaker on current conditions if di.Upstream.cb != nil { di.Upstream.cb.RecordMetric(res.StatusCode, duration) } // perform passive health checks (if enabled) if h.HealthChecks != nil && h.HealthChecks.Passive != nil { // strike if the status code matches one that is "bad" for _, badStatus := range h.HealthChecks.Passive.UnhealthyStatus { if caddyhttp.StatusCodeMatches(res.StatusCode, badStatus) { h.countFailure(di.Upstream) } } // strike if the roundtrip took too long if h.HealthChecks.Passive.UnhealthyLatency > 0 && duration >= time.Duration(h.HealthChecks.Passive.UnhealthyLatency) { h.countFailure(di.Upstream) } } // if enabled, buffer the response body if h.ResponseBuffers != 0 { res.Body, _ = h.bufferedBody(res.Body, h.ResponseBuffers) } // see if any response handler is configured for this response from the backend for i, rh := range h.HandleResponse { if rh.Match != nil && !rh.Match.Match(res.StatusCode, res.Header) { continue } // if configured to only change the status code, // do that then continue regular proxy response if statusCodeStr := rh.StatusCode.String(); statusCodeStr != "" { statusCode, err := strconv.Atoi(repl.ReplaceAll(statusCodeStr, "")) if err != nil { return caddyhttp.Error(http.StatusInternalServerError, err) } if statusCode != 0 { res.StatusCode = statusCode } break } // set up the replacer so that parts of the original response can be // used for routing decisions for field, value := range res.Header { repl.Set("http.reverse_proxy.header."+field, strings.Join(value, ",")) } repl.Set("http.reverse_proxy.status_code", res.StatusCode) repl.Set("http.reverse_proxy.status_text", res.Status) if c := logger.Check(zapcore.DebugLevel, "handling response"); c != nil { c.Write(zap.Int("handler", i)) } // we make some data available via request context to child routes // so that they may inherit some options and functions from the // handler, and be able to copy the response. // we use the original request here, so that any routes from 'next' // see the original request rather than the proxy cloned request. hrc := &handleResponseContext{ handler: h, response: res, start: start, logger: logger, } ctx := origReq.Context() ctx = context.WithValue(ctx, proxyHandleResponseContextCtxKey, hrc) // pass the request through the response handler routes routeErr := rh.Routes.Compile(next).ServeHTTP(rw, origReq.WithContext(ctx)) // close the response body afterwards, since we don't need it anymore; // either a route had 'copy_response' which already consumed the body, // or some other terminal handler ran which doesn't need the response // body after that point (e.g. 'file_server' for X-Accel-Redirect flow), // or we fell through to subsequent handlers past this proxy // (e.g. forward auth's 2xx response flow). if !hrc.isFinalized { res.Body.Close() } // wrap any route error in roundtripSucceededError so caller knows that // the roundtrip was successful and to not retry if routeErr != nil { return roundtripSucceededError{routeErr} } // we're done handling the response, and we don't want to // fall through to the default finalize/copy behaviour return nil } // copy the response body and headers back to the upstream client return h.finalizeResponse(rw, req, res, repl, start, logger) } // finalizeResponse prepares and copies the response. func (h *Handler) finalizeResponse( rw http.ResponseWriter, req *http.Request, res *http.Response, repl *caddy.Replacer, start time.Time, logger *zap.Logger, ) error { // deal with 101 Switching Protocols responses: (WebSocket, h2c, etc) if res.StatusCode == http.StatusSwitchingProtocols { var wg sync.WaitGroup h.handleUpgradeResponse(logger, &wg, rw, req, res) wg.Wait() return nil } removeConnectionHeaders(res.Header) for _, h := range hopHeaders { res.Header.Del(h) } // delete our Server header and use Via instead (see #6275) rw.Header().Del("Server") var protoPrefix string if !strings.HasPrefix(strings.ToUpper(res.Proto), "HTTP/") { protoPrefix = res.Proto[:strings.Index(res.Proto, "/")+1] } rw.Header().Add("Via", fmt.Sprintf("%s%d.%d Caddy", protoPrefix, res.ProtoMajor, res.ProtoMinor)) // apply any response header operations if h.Headers != nil && h.Headers.Response != nil { if h.Headers.Response.Require == nil || h.Headers.Response.Require.Match(res.StatusCode, res.Header) { h.Headers.Response.ApplyTo(res.Header, repl) } } copyHeader(rw.Header(), res.Header) // The "Trailer" header isn't included in the Transport's response, // at least for *http.Transport. Build it up from Trailer. announcedTrailers := len(res.Trailer) if announcedTrailers > 0 { trailerKeys := make([]string, 0, len(res.Trailer)) for k := range res.Trailer { trailerKeys = append(trailerKeys, k) } rw.Header().Add("Trailer", strings.Join(trailerKeys, ", ")) } rw.WriteHeader(res.StatusCode) if h.VerboseLogs { logger.Debug("wrote header") } err := h.copyResponse(rw, res.Body, h.flushInterval(req, res), logger) errClose := res.Body.Close() // close now, instead of defer, to populate res.Trailer if h.VerboseLogs || errClose != nil { if c := logger.Check(zapcore.DebugLevel, "closed response body from upstream"); c != nil { c.Write(zap.Error(errClose)) } } if err != nil { // we're streaming the response and we've already written headers, so // there's nothing an error handler can do to recover at this point; // we'll just log the error and abort the stream here and panic just as // the standard lib's proxy to propagate the stream error. // see issue https://github.com/caddyserver/caddy/issues/5951 if c := logger.Check(zapcore.WarnLevel, "aborting with incomplete response"); c != nil { c.Write(zap.Error(err)) } // no extra logging from stdlib panic(http.ErrAbortHandler) } if len(res.Trailer) > 0 { // Force chunking if we saw a response trailer. // This prevents net/http from calculating the length for short // bodies and adding a Content-Length. //nolint:bodyclose http.NewResponseController(rw).Flush() } // total duration spent proxying, including writing response body repl.Set("http.reverse_proxy.upstream.duration", time.Since(start)) repl.Set("http.reverse_proxy.upstream.duration_ms", time.Since(start).Seconds()*1e3) if len(res.Trailer) == announcedTrailers { copyHeader(rw.Header(), res.Trailer) return nil } for k, vv := range res.Trailer { k = http.TrailerPrefix + k for _, v := range vv { rw.Header().Add(k, v) } } if h.VerboseLogs { logger.Debug("response finalized") } return nil } // tryAgain takes the time that the handler was initially invoked, // the amount of retries already performed, as well as any error // currently obtained, and the request being tried, and returns // true if another attempt should be made at proxying the request. // If true is returned, it has already blocked long enough before // the next retry (i.e. no more sleeping is needed). If false is // returned, the handler should stop trying to proxy the request. func (lb LoadBalancing) tryAgain(ctx caddy.Context, start time.Time, retries int, proxyErr error, req *http.Request, logger *zap.Logger) bool { // no retries are configured if lb.TryDuration == 0 && lb.Retries == 0 { return false } // if we've tried long enough, break if lb.TryDuration > 0 && time.Since(start) >= time.Duration(lb.TryDuration) { return false } // if we've reached the retry limit, break if lb.Retries > 0 && retries >= lb.Retries { return false } // if the error occurred while dialing (i.e. a connection // could not even be established to the upstream), then it // should be safe to retry, since without a connection, no // HTTP request can be transmitted; but if the error is not // specifically a dialer error, we need to be careful if proxyErr != nil { _, isDialError := proxyErr.(DialError) herr, isHandlerError := proxyErr.(caddyhttp.HandlerError) // if the error occurred after a connection was established, // we have to assume the upstream received the request, and // retries need to be carefully decided, because some requests // are not idempotent if !isDialError && (!isHandlerError || !errors.Is(herr, errNoUpstream)) { if lb.RetryMatch == nil && req.Method != "GET" { // by default, don't retry requests if they aren't GET return false } match, err := lb.RetryMatch.AnyMatchWithError(req) if err != nil { logger.Error("error matching request for retry", zap.Error(err)) return false } if !match { return false } } } // fast path; if the interval is zero, we don't need to wait if lb.TryInterval == 0 { return true } // otherwise, wait and try the next available host timer := time.NewTimer(time.Duration(lb.TryInterval)) select { case <-timer.C: return true case <-ctx.Done(): if !timer.Stop() { // if the timer has been stopped then read from the channel <-timer.C } return false } } // directRequest modifies only req.URL so that it points to the upstream // in the given DialInfo. It must modify ONLY the request URL. func (h *Handler) directRequest(req *http.Request, di DialInfo) { // we need a host, so set the upstream's host address reqHost := di.Address // if the port equates to the scheme, strip the port because // it's weird to make a request like http://example.com:80/. if (req.URL.Scheme == "http" && di.Port == "80") || (req.URL.Scheme == "https" && di.Port == "443") { reqHost = di.Host } // add client address to the host to let transport differentiate requests from different clients if ppt, ok := h.Transport.(ProxyProtocolTransport); ok && ppt.ProxyProtocolEnabled() { if proxyProtocolInfo, ok := caddyhttp.GetVar(req.Context(), proxyProtocolInfoVarKey).(ProxyProtocolInfo); ok { // encode the request so it plays well with h2 transport, it's unnecessary for h1 but anyway // The issue is that h2 transport will use the address to determine if new connections are needed // to roundtrip requests but the without escaping, new connections are constantly created and closed until // file descriptors are exhausted. // see: https://github.com/caddyserver/caddy/issues/7529 reqHost = url.QueryEscape(proxyProtocolInfo.AddrPort.String() + "->" + reqHost) } } req.URL.Host = reqHost } func (h Handler) provisionUpstream(upstream *Upstream, dynamic bool) { // create or get the host representation for this upstream; // dynamic upstreams are tracked in a separate map with last-seen // timestamps so their health state persists across requests without // being reference-counted (and thus discarded between requests). if dynamic { upstream.fillDynamicHost() } else { upstream.fillHost() } // give it the circuit breaker, if any upstream.cb = h.CB // if the passive health checker has a non-zero UnhealthyRequestCount // but the upstream has no MaxRequests set (they are the same thing, // but the passive health checker is a default value for upstreams // without MaxRequests), copy the value into this upstream, since the // value in the upstream (MaxRequests) is what is used during // availability checks if h.HealthChecks != nil && h.HealthChecks.Passive != nil && h.HealthChecks.Passive.UnhealthyRequestCount > 0 && upstream.MaxRequests == 0 { upstream.MaxRequests = h.HealthChecks.Passive.UnhealthyRequestCount } // upstreams need independent access to the passive // health check policy because passive health checks // run without access to h. if h.HealthChecks != nil { upstream.healthCheckPolicy = h.HealthChecks.Passive } } // bufferedBody reads originalBody into a buffer with maximum size of limit (-1 for unlimited), // then returns a reader for the buffer along with how many bytes were buffered. Always close // the return value when done with it, just like if it was the original body! If limit is 0 // (which it shouldn't be), this function returns its input; i.e. is a no-op, for safety. // Otherwise, it returns bodyReadCloser, the original body will be closed and body will be nil // if it's explicitly configured to buffer all or EOF is reached when reading. // TODO: the error during reading is discarded if the limit is negative, should the error be propagated // to upstream/downstream? func (h Handler) bufferedBody(originalBody io.ReadCloser, limit int64) (io.ReadCloser, int64) { if limit == 0 { return originalBody, 0 } var written int64 buf := bufPool.Get().(*bytes.Buffer) buf.Reset() if limit > 0 { var err error written, err = io.CopyN(buf, originalBody, limit) if (err != nil && err != io.EOF) || written == limit { return bodyReadCloser{ Reader: io.MultiReader(buf, originalBody), buf: buf, body: originalBody, }, written } } else { written, _ = io.Copy(buf, originalBody) } originalBody.Close() // no point in keeping it open return bodyReadCloser{ Reader: buf, buf: buf, }, written } // cloneRequest makes a semi-deep clone of origReq. // // Most of this code is borrowed from the Go stdlib reverse proxy, // but we make a shallow-ish clone the request (deep clone only // the headers and URL) so we can avoid manipulating the original // request when using it to proxy upstream. This prevents request // corruption and data races. func cloneRequest(origReq *http.Request) *http.Request { req := new(http.Request) *req = *origReq if origReq.URL != nil { newURL := new(url.URL) *newURL = *origReq.URL if origReq.URL.User != nil { newURL.User = new(url.Userinfo) *newURL.User = *origReq.URL.User } // sanitize the request URL; we expect it to not contain the // scheme and host since those should be determined by r.TLS // and r.Host respectively, but some clients may include it // in the request-line, which is technically valid in HTTP, // but breaks reverseproxy behaviour, overriding how the // dialer will behave. See #4237 for context. newURL.Scheme = "" newURL.Host = "" req.URL = newURL } if origReq.Header != nil { req.Header = origReq.Header.Clone() } if origReq.Trailer != nil { req.Trailer = origReq.Trailer.Clone() } return req } func copyHeader(dst, src http.Header) { for k, vv := range src { for _, v := range vv { dst.Add(k, v) } } } // allHeaderValues gets all values for a given header field, // joined by a comma and space if more than one is set. If the // header field is nil, then the omit is true, meaning some // earlier logic in the server wanted to prevent this header from // getting written at all. If the header is empty, then ok is // false. Callers should still check that the value is not empty // (the header field may be set but have an empty value). func allHeaderValues(h http.Header, field string) (value string, ok bool, omit bool) { values, ok := h[http.CanonicalHeaderKey(field)] if ok && values == nil { return "", true, true } if len(values) == 0 { return "", false, false } return strings.Join(values, ", "), true, false } // lastHeaderValue gets the last value for a given header field // if more than one is set. If the header field is nil, then // the omit is true, meaning some earlier logic in the server // wanted to prevent this header from getting written at all. // If the header is empty, then ok is false. Callers should // still check that the value is not empty (the header field // may be set but have an empty value). func lastHeaderValue(h http.Header, field string) (value string, ok bool, omit bool) { values, ok := h[http.CanonicalHeaderKey(field)] if ok && values == nil { return "", true, true } if len(values) == 0 { return "", false, false } return values[len(values)-1], true, false } func upgradeType(h http.Header) string { if !httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade") { return "" } return strings.ToLower(h.Get("Upgrade")) } // removeConnectionHeaders removes hop-by-hop headers listed in the "Connection" header of h. // See RFC 7230, section 6.1 func removeConnectionHeaders(h http.Header) { for _, f := range h["Connection"] { for sf := range strings.SplitSeq(f, ",") { if sf = textproto.TrimString(sf); sf != "" { h.Del(sf) } } } } // statusError returns an error value that has a status code. func statusError(err error) error { // errors proxying usually mean there is a problem with the upstream(s) statusCode := http.StatusBadGateway // timeout errors have a standard status code (see issue #4823) if err, ok := err.(net.Error); ok && err.Timeout() { statusCode = http.StatusGatewayTimeout } // if the client canceled the request (usually this means they closed // the connection, so they won't see any response), we can report it // as a client error (4xx) and not a server error (5xx); unfortunately // the Go standard library, at least at time of writing in late 2020, // obnoxiously wraps the exported, standard context.Canceled error with // an unexported garbage value that we have to do a substring check for: // https://github.com/golang/go/blob/6965b01ea248cabb70c3749fd218b36089a21efb/src/net/net.go#L416-L430 if errors.Is(err, context.Canceled) || strings.Contains(err.Error(), "operation was canceled") { // regrettably, there is no standard error code for "client closed connection", but // for historical reasons we can use a code that a lot of people are already using; // using 5xx is problematic for users; see #3748 statusCode = 499 } return caddyhttp.Error(statusCode, err) } // LoadBalancing has parameters related to load balancing. type LoadBalancing struct { // A selection policy is how to choose an available backend. // The default policy is random selection. SelectionPolicyRaw json.RawMessage `json:"selection_policy,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"` // How many times to retry selecting available backends for each // request if the next available host is down. If try_duration is // also configured, then retries may stop early if the duration // is reached. By default, retries are disabled (zero). Retries int `json:"retries,omitempty"` // How long to try selecting available backends for each request // if the next available host is down. Clients will wait for up // to this long while the load balancer tries to find an available // upstream host. If retries is also configured, tries may stop // early if the maximum retries is reached. By default, retries // are disabled (zero duration). TryDuration caddy.Duration `json:"try_duration,omitempty"` // How long to wait between selecting the next host from the pool. // Default is 250ms if try_duration is enabled, otherwise zero. Only // relevant when a request to an upstream host fails. Be aware that // setting this to 0 with a non-zero try_duration can cause the CPU // to spin if all backends are down and latency is very low. TryInterval caddy.Duration `json:"try_interval,omitempty"` // A list of matcher sets that restricts with which requests retries are // allowed. A request must match any of the given matcher sets in order // to be retried if the connection to the upstream succeeded but the // subsequent round-trip failed. If the connection to the upstream failed, // a retry is always allowed. If unspecified, only GET requests will be // allowed to be retried. Note that a retry is done with the next available // host according to the load balancing policy. RetryMatchRaw caddyhttp.RawMatcherSets `json:"retry_match,omitempty" caddy:"namespace=http.matchers"` SelectionPolicy Selector `json:"-"` RetryMatch caddyhttp.MatcherSets `json:"-"` } // Selector selects an available upstream from the pool. type Selector interface { Select(UpstreamPool, *http.Request, http.ResponseWriter) *Upstream } // UpstreamSource gets the list of upstreams that can be used when // proxying a request. Returned upstreams will be load balanced and // health-checked. This should be a very fast function -- instant // if possible -- and the return value must be as stable as possible. // In other words, the list of upstreams should ideally not change much // across successive calls. If the list of upstreams changes or the // ordering is not stable, load balancing will suffer. This function // may be called during each retry, multiple times per request, and as // such, needs to be instantaneous. The returned slice will not be // modified. type UpstreamSource interface { GetUpstreams(*http.Request) ([]*Upstream, error) } // Hop-by-hop headers. These are removed when sent to the backend. // As of RFC 7230, hop-by-hop headers are required to appear in the // Connection header field. These are the headers defined by the // obsoleted RFC 2616 (section 13.5.1) and are used for backward // compatibility. var hopHeaders = []string{ "Alt-Svc", "Connection", "Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google "Keep-Alive", "Proxy-Authenticate", "Proxy-Authorization", "Te", // canonicalized version of "TE" "Trailer", // not Trailers per URL above; https://www.rfc-editor.org/errata_search.php?eid=4522 "Transfer-Encoding", "Upgrade", } // DialError is an error that specifically occurs // in a call to Dial or DialContext. type DialError struct{ error } // TLSTransport is implemented by transports // that are capable of using TLS. type TLSTransport interface { // TLSEnabled returns true if the transport // has TLS enabled, false otherwise. TLSEnabled() bool // EnableTLS enables TLS within the transport // if it is not already, using the provided // value as a basis for the TLS config. EnableTLS(base *TLSConfig) error } // H2CTransport is implemented by transports // that are capable of using h2c. type H2CTransport interface { EnableH2C() error } // ProxyProtocolTransport is implemented by transports // that are capable of using proxy protocol. type ProxyProtocolTransport interface { ProxyProtocolEnabled() bool } // HealthCheckSchemeOverriderTransport is implemented by transports // that can override the scheme used for health checks. type HealthCheckSchemeOverriderTransport interface { OverrideHealthCheckScheme(base *url.URL, port string) } // BufferedTransport is implemented by transports // that needs to buffer requests and/or responses. type BufferedTransport interface { // DefaultBufferSizes returns the default buffer sizes // for requests and responses, respectively if buffering isn't enabled. DefaultBufferSizes() (int64, int64) } // RequestHeaderOpsTransport may be implemented by a transport to provide // header operations to apply to requests immediately before the RoundTrip. // For example, overriding the default Host when TLS is enabled. type RequestHeaderOpsTransport interface { // RequestHeaderOps allows a transport to provide header operations // to apply to the request. The transport is asked at provision time // to return a HeaderOps (or nil) that will be applied before // user-configured header ops. RequestHeaderOps() *headers.HeaderOps } // roundtripSucceededError is an error type that is returned if the // roundtrip succeeded, but an error occurred after-the-fact. type roundtripSucceededError struct{ error } // bodyReadCloser is a reader that, upon closing, will return // its buffer to the pool and close the underlying body reader. type bodyReadCloser struct { io.Reader buf *bytes.Buffer body io.ReadCloser } func (brc bodyReadCloser) Close() error { // Inside this package this will be set to nil for fully-buffered // requests due to the possibility of retrial. if brc.buf != nil { bufPool.Put(brc.buf) } // For fully-buffered bodies, body is nil, so Close is a no-op. if brc.body != nil { return brc.body.Close() } return nil } // bufPool is used for buffering requests and responses. var bufPool = sync.Pool{ New: func() any { return new(bytes.Buffer) }, } // handleResponseContext carries some contextual information about the // current proxy handling. type handleResponseContext struct { // handler is the active proxy handler instance, so that // routes like copy_response may inherit some config // options and have access to handler methods. handler *Handler // response is the actual response received from the proxy // roundtrip, to potentially be copied if a copy_response // handler is in the handle_response routes. response *http.Response // start is the time just before the proxy roundtrip was // performed, used for logging. start time.Time // logger is the prepared logger which is used to write logs // with the request, duration, and selected upstream attached. logger *zap.Logger // isFinalized is whether the response has been finalized, // i.e. copied and closed, to make sure that it doesn't // happen twice. isFinalized bool } // proxyHandleResponseContextCtxKey is the context key for the active proxy handler // so that handle_response routes can inherit some config options // from the proxy handler. const proxyHandleResponseContextCtxKey caddy.CtxKey = "reverse_proxy_handle_response_context" // errNoUpstream occurs when there are no upstream available. var errNoUpstream = fmt.Errorf("no upstreams available") // Interface guards var ( _ caddy.Provisioner = (*Handler)(nil) _ caddy.CleanerUpper = (*Handler)(nil) _ caddyhttp.MiddlewareHandler = (*Handler)(nil) ) ================================================ FILE: modules/caddyhttp/reverseproxy/selectionpolicies.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "crypto/hmac" "crypto/sha256" "encoding/hex" "encoding/json" "fmt" weakrand "math/rand/v2" "net" "net/http" "strconv" "strings" "sync/atomic" "time" "github.com/cespare/xxhash/v2" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(RandomSelection{}) caddy.RegisterModule(RandomChoiceSelection{}) caddy.RegisterModule(LeastConnSelection{}) caddy.RegisterModule(RoundRobinSelection{}) caddy.RegisterModule(WeightedRoundRobinSelection{}) caddy.RegisterModule(FirstSelection{}) caddy.RegisterModule(IPHashSelection{}) caddy.RegisterModule(ClientIPHashSelection{}) caddy.RegisterModule(URIHashSelection{}) caddy.RegisterModule(QueryHashSelection{}) caddy.RegisterModule(HeaderHashSelection{}) caddy.RegisterModule(CookieHashSelection{}) } // RandomSelection is a policy that selects // an available host at random. type RandomSelection struct{} // CaddyModule returns the Caddy module information. func (RandomSelection) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.selection_policies.random", New: func() caddy.Module { return new(RandomSelection) }, } } // Select returns an available host, if any. func (r RandomSelection) Select(pool UpstreamPool, request *http.Request, _ http.ResponseWriter) *Upstream { return selectRandomHost(pool) } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (r *RandomSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume policy name if d.NextArg() { return d.ArgErr() } return nil } // WeightedRoundRobinSelection is a policy that selects // a host based on weighted round-robin ordering. type WeightedRoundRobinSelection struct { // The weight of each upstream in order, // corresponding with the list of upstreams configured. Weights []int `json:"weights,omitempty"` index uint32 totalWeight int } // CaddyModule returns the Caddy module information. func (WeightedRoundRobinSelection) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.selection_policies.weighted_round_robin", New: func() caddy.Module { return new(WeightedRoundRobinSelection) }, } } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (r *WeightedRoundRobinSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume policy name args := d.RemainingArgs() if len(args) == 0 { return d.ArgErr() } for _, weight := range args { weightInt, err := strconv.Atoi(weight) if err != nil { return d.Errf("invalid weight value '%s': %v", weight, err) } if weightInt < 0 { return d.Errf("invalid weight value '%s': weight should be non-negative", weight) } r.Weights = append(r.Weights, weightInt) } return nil } // Provision sets up r. func (r *WeightedRoundRobinSelection) Provision(ctx caddy.Context) error { for _, weight := range r.Weights { r.totalWeight += weight } return nil } // Select returns an available host, if any. func (r *WeightedRoundRobinSelection) Select(pool UpstreamPool, _ *http.Request, _ http.ResponseWriter) *Upstream { if len(pool) == 0 { return nil } if len(r.Weights) < 2 { return pool[0] } var index, totalWeight int var weights []int for _, w := range r.Weights { if w > 0 { weights = append(weights, w) } } currentWeight := int(atomic.AddUint32(&r.index, 1)) % r.totalWeight for i, weight := range weights { totalWeight += weight if currentWeight < totalWeight { index = i break } } upstreams := make([]*Upstream, 0, len(weights)) for i, upstream := range pool { if !upstream.Available() || r.Weights[i] == 0 { continue } upstreams = append(upstreams, upstream) if len(upstreams) == cap(upstreams) { break } } if len(upstreams) == 0 { return nil } return upstreams[index%len(upstreams)] } // RandomChoiceSelection is a policy that selects // two or more available hosts at random, then // chooses the one with the least load. type RandomChoiceSelection struct { // The size of the sub-pool created from the larger upstream pool. The default value // is 2 and the maximum at selection time is the size of the upstream pool. Choose int `json:"choose,omitempty"` } // CaddyModule returns the Caddy module information. func (RandomChoiceSelection) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.selection_policies.random_choose", New: func() caddy.Module { return new(RandomChoiceSelection) }, } } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (r *RandomChoiceSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume policy name if !d.NextArg() { return d.ArgErr() } chooseStr := d.Val() choose, err := strconv.Atoi(chooseStr) if err != nil { return d.Errf("invalid choice value '%s': %v", chooseStr, err) } r.Choose = choose return nil } // Provision sets up r. func (r *RandomChoiceSelection) Provision(ctx caddy.Context) error { if r.Choose == 0 { r.Choose = 2 } return nil } // Validate ensures that r's configuration is valid. func (r RandomChoiceSelection) Validate() error { if r.Choose < 2 { return fmt.Errorf("choose must be at least 2") } return nil } // Select returns an available host, if any. func (r RandomChoiceSelection) Select(pool UpstreamPool, _ *http.Request, _ http.ResponseWriter) *Upstream { k := min(r.Choose, len(pool)) choices := make([]*Upstream, k) for i, upstream := range pool { if !upstream.Available() { continue } j := weakrand.IntN(i + 1) //nolint:gosec if j < k { choices[j] = upstream } } return leastRequests(choices) } // LeastConnSelection is a policy that selects the // host with the least active requests. If multiple // hosts have the same fewest number, one is chosen // randomly. The term "conn" or "connection" is used // in this policy name due to its similar meaning in // other software, but our load balancer actually // counts active requests rather than connections, // since these days requests are multiplexed onto // shared connections. type LeastConnSelection struct{} // CaddyModule returns the Caddy module information. func (LeastConnSelection) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.selection_policies.least_conn", New: func() caddy.Module { return new(LeastConnSelection) }, } } // Select selects the up host with the least number of connections in the // pool. If more than one host has the same least number of connections, // one of the hosts is chosen at random. func (LeastConnSelection) Select(pool UpstreamPool, _ *http.Request, _ http.ResponseWriter) *Upstream { var bestHost *Upstream var count int leastReqs := -1 for _, host := range pool { if !host.Available() { continue } numReqs := host.NumRequests() if leastReqs == -1 || numReqs < leastReqs { leastReqs = numReqs count = 0 } // among hosts with same least connections, perform a reservoir // sample: https://en.wikipedia.org/wiki/Reservoir_sampling if numReqs == leastReqs { count++ if count == 1 || weakrand.IntN(count) == 0 { //nolint:gosec bestHost = host } } } return bestHost } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (r *LeastConnSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume policy name if d.NextArg() { return d.ArgErr() } return nil } // RoundRobinSelection is a policy that selects // a host based on round-robin ordering. type RoundRobinSelection struct { robin uint32 } // CaddyModule returns the Caddy module information. func (RoundRobinSelection) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.selection_policies.round_robin", New: func() caddy.Module { return new(RoundRobinSelection) }, } } // Select returns an available host, if any. func (r *RoundRobinSelection) Select(pool UpstreamPool, _ *http.Request, _ http.ResponseWriter) *Upstream { n := uint32(len(pool)) if n == 0 { return nil } for range n { robin := atomic.AddUint32(&r.robin, 1) host := pool[robin%n] if host.Available() { return host } } return nil } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (r *RoundRobinSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume policy name if d.NextArg() { return d.ArgErr() } return nil } // FirstSelection is a policy that selects // the first available host. type FirstSelection struct{} // CaddyModule returns the Caddy module information. func (FirstSelection) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.selection_policies.first", New: func() caddy.Module { return new(FirstSelection) }, } } // Select returns an available host, if any. func (FirstSelection) Select(pool UpstreamPool, _ *http.Request, _ http.ResponseWriter) *Upstream { for _, host := range pool { if host.Available() { return host } } return nil } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (r *FirstSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume policy name if d.NextArg() { return d.ArgErr() } return nil } // IPHashSelection is a policy that selects a host // based on hashing the remote IP of the request. type IPHashSelection struct{} // CaddyModule returns the Caddy module information. func (IPHashSelection) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.selection_policies.ip_hash", New: func() caddy.Module { return new(IPHashSelection) }, } } // Select returns an available host, if any. func (IPHashSelection) Select(pool UpstreamPool, req *http.Request, _ http.ResponseWriter) *Upstream { clientIP, _, err := net.SplitHostPort(req.RemoteAddr) if err != nil { clientIP = req.RemoteAddr } return hostByHashing(pool, clientIP) } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (r *IPHashSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume policy name if d.NextArg() { return d.ArgErr() } return nil } // ClientIPHashSelection is a policy that selects a host // based on hashing the client IP of the request, as determined // by the HTTP app's trusted proxies settings. type ClientIPHashSelection struct{} // CaddyModule returns the Caddy module information. func (ClientIPHashSelection) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.selection_policies.client_ip_hash", New: func() caddy.Module { return new(ClientIPHashSelection) }, } } // Select returns an available host, if any. func (ClientIPHashSelection) Select(pool UpstreamPool, req *http.Request, _ http.ResponseWriter) *Upstream { address := caddyhttp.GetVar(req.Context(), caddyhttp.ClientIPVarKey).(string) clientIP, _, err := net.SplitHostPort(address) if err != nil { clientIP = address // no port } return hostByHashing(pool, clientIP) } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (r *ClientIPHashSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume policy name if d.NextArg() { return d.ArgErr() } return nil } // URIHashSelection is a policy that selects a // host by hashing the request URI. type URIHashSelection struct{} // CaddyModule returns the Caddy module information. func (URIHashSelection) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.selection_policies.uri_hash", New: func() caddy.Module { return new(URIHashSelection) }, } } // Select returns an available host, if any. func (URIHashSelection) Select(pool UpstreamPool, req *http.Request, _ http.ResponseWriter) *Upstream { return hostByHashing(pool, req.RequestURI) } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (r *URIHashSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume policy name if d.NextArg() { return d.ArgErr() } return nil } // QueryHashSelection is a policy that selects // a host based on a given request query parameter. type QueryHashSelection struct { // The query key whose value is to be hashed and used for upstream selection. Key string `json:"key,omitempty"` // The fallback policy to use if the query key is not present. Defaults to `random`. FallbackRaw json.RawMessage `json:"fallback,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"` fallback Selector } // CaddyModule returns the Caddy module information. func (QueryHashSelection) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.selection_policies.query", New: func() caddy.Module { return new(QueryHashSelection) }, } } // Provision sets up the module. func (s *QueryHashSelection) Provision(ctx caddy.Context) error { if s.Key == "" { return fmt.Errorf("query key is required") } if s.FallbackRaw == nil { s.FallbackRaw = caddyconfig.JSONModuleObject(RandomSelection{}, "policy", "random", nil) } mod, err := ctx.LoadModule(s, "FallbackRaw") if err != nil { return fmt.Errorf("loading fallback selection policy: %s", err) } s.fallback = mod.(Selector) return nil } // Select returns an available host, if any. func (s QueryHashSelection) Select(pool UpstreamPool, req *http.Request, _ http.ResponseWriter) *Upstream { // Since the query may have multiple values for the same key, // we'll join them to avoid a problem where the user can control // the upstream that the request goes to by sending multiple values // for the same key, when the upstream only considers the first value. // Keep in mind that a client changing the order of the values may // affect which upstream is selected, but this is a semantically // different request, because the order of the values is significant. vals := strings.Join(req.URL.Query()[s.Key], ",") if vals == "" { return s.fallback.Select(pool, req, nil) } return hostByHashing(pool, vals) } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (s *QueryHashSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume policy name if !d.NextArg() { return d.ArgErr() } s.Key = d.Val() for d.NextBlock(0) { switch d.Val() { case "fallback": if !d.NextArg() { return d.ArgErr() } if s.FallbackRaw != nil { return d.Err("fallback selection policy already specified") } mod, err := loadFallbackPolicy(d) if err != nil { return err } s.FallbackRaw = mod default: return d.Errf("unrecognized option '%s'", d.Val()) } } return nil } // HeaderHashSelection is a policy that selects // a host based on a given request header. type HeaderHashSelection struct { // The HTTP header field whose value is to be hashed and used for upstream selection. Field string `json:"field,omitempty"` // The fallback policy to use if the header is not present. Defaults to `random`. FallbackRaw json.RawMessage `json:"fallback,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"` fallback Selector } // CaddyModule returns the Caddy module information. func (HeaderHashSelection) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.selection_policies.header", New: func() caddy.Module { return new(HeaderHashSelection) }, } } // Provision sets up the module. func (s *HeaderHashSelection) Provision(ctx caddy.Context) error { if s.Field == "" { return fmt.Errorf("header field is required") } if s.FallbackRaw == nil { s.FallbackRaw = caddyconfig.JSONModuleObject(RandomSelection{}, "policy", "random", nil) } mod, err := ctx.LoadModule(s, "FallbackRaw") if err != nil { return fmt.Errorf("loading fallback selection policy: %s", err) } s.fallback = mod.(Selector) return nil } // Select returns an available host, if any. func (s HeaderHashSelection) Select(pool UpstreamPool, req *http.Request, _ http.ResponseWriter) *Upstream { // The Host header should be obtained from the req.Host field // since net/http removes it from the header map. if s.Field == "Host" && req.Host != "" { return hostByHashing(pool, req.Host) } val := req.Header.Get(s.Field) if val == "" { return s.fallback.Select(pool, req, nil) } return hostByHashing(pool, val) } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (s *HeaderHashSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume policy name if !d.NextArg() { return d.ArgErr() } s.Field = d.Val() for d.NextBlock(0) { switch d.Val() { case "fallback": if !d.NextArg() { return d.ArgErr() } if s.FallbackRaw != nil { return d.Err("fallback selection policy already specified") } mod, err := loadFallbackPolicy(d) if err != nil { return err } s.FallbackRaw = mod default: return d.Errf("unrecognized option '%s'", d.Val()) } } return nil } // CookieHashSelection is a policy that selects // a host based on a given cookie name. type CookieHashSelection struct { // The HTTP cookie name whose value is to be hashed and used for upstream selection. Name string `json:"name,omitempty"` // Secret to hash (Hmac256) chosen upstream in cookie Secret string `json:"secret,omitempty"` //nolint:gosec // yes it's exported because it needs to encode to JSON // The cookie's Max-Age before it expires. Default is no expiry. MaxAge caddy.Duration `json:"max_age,omitempty"` // The fallback policy to use if the cookie is not present. Defaults to `random`. FallbackRaw json.RawMessage `json:"fallback,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"` fallback Selector } // CaddyModule returns the Caddy module information. func (CookieHashSelection) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.selection_policies.cookie", New: func() caddy.Module { return new(CookieHashSelection) }, } } // Provision sets up the module. func (s *CookieHashSelection) Provision(ctx caddy.Context) error { if s.Name == "" { s.Name = "lb" } if s.FallbackRaw == nil { s.FallbackRaw = caddyconfig.JSONModuleObject(RandomSelection{}, "policy", "random", nil) } mod, err := ctx.LoadModule(s, "FallbackRaw") if err != nil { return fmt.Errorf("loading fallback selection policy: %s", err) } s.fallback = mod.(Selector) return nil } // Select returns an available host, if any. func (s CookieHashSelection) Select(pool UpstreamPool, req *http.Request, w http.ResponseWriter) *Upstream { // selects a new Host using the fallback policy (typically random) // and write a sticky session cookie to the response. selectNewHost := func() *Upstream { upstream := s.fallback.Select(pool, req, w) if upstream == nil { return nil } sha, err := hashCookie(s.Secret, upstream.Dial) if err != nil { return upstream } cookie := &http.Cookie{ Name: s.Name, Value: sha, Path: "/", Secure: false, } isProxyHttps := false if trusted, ok := caddyhttp.GetVar(req.Context(), caddyhttp.TrustedProxyVarKey).(bool); ok && trusted { xfp, xfpOk, _ := lastHeaderValue(req.Header, "X-Forwarded-Proto") isProxyHttps = xfpOk && xfp == "https" } if req.TLS != nil || isProxyHttps { cookie.Secure = true cookie.SameSite = http.SameSiteNoneMode } if s.MaxAge > 0 { cookie.MaxAge = int(time.Duration(s.MaxAge).Seconds()) } http.SetCookie(w, cookie) return upstream } cookie, err := req.Cookie(s.Name) // If there's no cookie, select a host using the fallback policy if err != nil || cookie == nil { return selectNewHost() } // If the cookie is present, loop over the available upstreams until we find a match cookieValue := cookie.Value for _, upstream := range pool { if !upstream.Available() { continue } sha, err := hashCookie(s.Secret, upstream.Dial) if err == nil && sha == cookieValue { return upstream } } // If there is no matching host, select a host using the fallback policy return selectNewHost() } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. Syntax: // // lb_policy cookie [ []] { // fallback // max_age // } // // By default name is `lb` func (s *CookieHashSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { args := d.RemainingArgs() switch len(args) { case 1: case 2: s.Name = args[1] case 3: s.Name = args[1] s.Secret = args[2] default: return d.ArgErr() } for d.NextBlock(0) { switch d.Val() { case "fallback": if !d.NextArg() { return d.ArgErr() } if s.FallbackRaw != nil { return d.Err("fallback selection policy already specified") } mod, err := loadFallbackPolicy(d) if err != nil { return err } s.FallbackRaw = mod case "max_age": if !d.NextArg() { return d.ArgErr() } if s.MaxAge != 0 { return d.Err("cookie max_age already specified") } maxAge, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("invalid duration: %s", d.Val()) } if maxAge <= 0 { return d.Errf("invalid duration: %s, max_age should be non-zero and positive", d.Val()) } if d.NextArg() { return d.ArgErr() } s.MaxAge = caddy.Duration(maxAge) default: return d.Errf("unrecognized option '%s'", d.Val()) } } return nil } // hashCookie hashes (HMAC 256) some data with the secret func hashCookie(secret string, data string) (string, error) { h := hmac.New(sha256.New, []byte(secret)) _, err := h.Write([]byte(data)) if err != nil { return "", err } return hex.EncodeToString(h.Sum(nil)), nil } // selectRandomHost returns a random available host func selectRandomHost(pool []*Upstream) *Upstream { // use reservoir sampling because the number of available // hosts isn't known: https://en.wikipedia.org/wiki/Reservoir_sampling var randomHost *Upstream var count int for _, upstream := range pool { if !upstream.Available() { continue } // (n % 1 == 0) holds for all n, therefore a // upstream will always be chosen if there is at // least one available count++ if weakrand.IntN(count) == 0 { //nolint:gosec randomHost = upstream } } return randomHost } // leastRequests returns the host with the // least number of active requests to it. // If more than one host has the same // least number of active requests, then // one of those is chosen at random. func leastRequests(upstreams []*Upstream) *Upstream { if len(upstreams) == 0 { return nil } var best []*Upstream bestReqs := -1 for _, upstream := range upstreams { if upstream == nil { continue } reqs := upstream.NumRequests() if reqs == 0 { return upstream } // If bestReqs was just initialized to -1 // we need to append upstream also if reqs <= bestReqs || bestReqs == -1 { bestReqs = reqs best = append(best, upstream) } } if len(best) == 0 { return nil } if len(best) == 1 { return best[0] } return best[weakrand.IntN(len(best))] //nolint:gosec } // hostByHashing returns an available host from pool based on a hashable string s. func hostByHashing(pool []*Upstream, s string) *Upstream { // Highest Random Weight (HRW, or "Rendezvous") hashing, // guarantees stability when the list of upstreams changes; // see https://medium.com/i0exception/rendezvous-hashing-8c00e2fb58b0, // https://randorithms.com/2020/12/26/rendezvous-hashing.html, // and https://en.wikipedia.org/wiki/Rendezvous_hashing. var highestHash uint64 var upstream *Upstream for _, up := range pool { if !up.Available() { continue } h := hash(up.String() + s) // important to hash key and server together if h > highestHash { highestHash = h upstream = up } } return upstream } // hash calculates a fast hash based on s. func hash(s string) uint64 { h := xxhash.New() _, _ = h.Write([]byte(s)) return h.Sum64() } func loadFallbackPolicy(d *caddyfile.Dispenser) (json.RawMessage, error) { name := d.Val() modID := "http.reverse_proxy.selection_policies." + name unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return nil, err } sel, ok := unm.(Selector) if !ok { return nil, d.Errf("module %s (%T) is not a reverseproxy.Selector", modID, unm) } return caddyconfig.JSONModuleObject(sel, "policy", name, nil), nil } // Interface guards var ( _ Selector = (*RandomSelection)(nil) _ Selector = (*RandomChoiceSelection)(nil) _ Selector = (*LeastConnSelection)(nil) _ Selector = (*RoundRobinSelection)(nil) _ Selector = (*WeightedRoundRobinSelection)(nil) _ Selector = (*FirstSelection)(nil) _ Selector = (*IPHashSelection)(nil) _ Selector = (*ClientIPHashSelection)(nil) _ Selector = (*URIHashSelection)(nil) _ Selector = (*QueryHashSelection)(nil) _ Selector = (*HeaderHashSelection)(nil) _ Selector = (*CookieHashSelection)(nil) _ caddy.Validator = (*RandomChoiceSelection)(nil) _ caddy.Provisioner = (*RandomChoiceSelection)(nil) _ caddy.Provisioner = (*WeightedRoundRobinSelection)(nil) _ caddyfile.Unmarshaler = (*RandomChoiceSelection)(nil) _ caddyfile.Unmarshaler = (*WeightedRoundRobinSelection)(nil) ) ================================================ FILE: modules/caddyhttp/reverseproxy/selectionpolicies_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reverseproxy import ( "context" "net/http" "net/http/httptest" "testing" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func testPool() UpstreamPool { return UpstreamPool{ {Host: new(Host), Dial: "0.0.0.1"}, {Host: new(Host), Dial: "0.0.0.2"}, {Host: new(Host), Dial: "0.0.0.3"}, } } func TestRoundRobinPolicy(t *testing.T) { pool := testPool() rrPolicy := RoundRobinSelection{} req, _ := http.NewRequest("GET", "/", nil) h := rrPolicy.Select(pool, req, nil) // First selected host is 1, because counter starts at 0 // and increments before host is selected if h != pool[1] { t.Error("Expected first round robin host to be second host in the pool.") } h = rrPolicy.Select(pool, req, nil) if h != pool[2] { t.Error("Expected second round robin host to be third host in the pool.") } h = rrPolicy.Select(pool, req, nil) if h != pool[0] { t.Error("Expected third round robin host to be first host in the pool.") } // mark host as down pool[1].setHealthy(false) h = rrPolicy.Select(pool, req, nil) if h != pool[2] { t.Error("Expected to skip down host.") } // mark host as up pool[1].setHealthy(true) h = rrPolicy.Select(pool, req, nil) if h == pool[2] { t.Error("Expected to balance evenly among healthy hosts") } // mark host as full pool[1].countRequest(1) pool[1].MaxRequests = 1 h = rrPolicy.Select(pool, req, nil) if h != pool[2] { t.Error("Expected to skip full host.") } } func TestWeightedRoundRobinPolicy(t *testing.T) { pool := testPool() wrrPolicy := WeightedRoundRobinSelection{ Weights: []int{3, 2, 1}, totalWeight: 6, } req, _ := http.NewRequest("GET", "/", nil) h := wrrPolicy.Select(pool, req, nil) if h != pool[0] { t.Error("Expected first weighted round robin host to be first host in the pool.") } h = wrrPolicy.Select(pool, req, nil) if h != pool[0] { t.Error("Expected second weighted round robin host to be first host in the pool.") } // Third selected host is 1, because counter starts at 0 // and increments before host is selected h = wrrPolicy.Select(pool, req, nil) if h != pool[1] { t.Error("Expected third weighted round robin host to be second host in the pool.") } h = wrrPolicy.Select(pool, req, nil) if h != pool[1] { t.Error("Expected fourth weighted round robin host to be second host in the pool.") } h = wrrPolicy.Select(pool, req, nil) if h != pool[2] { t.Error("Expected fifth weighted round robin host to be third host in the pool.") } h = wrrPolicy.Select(pool, req, nil) if h != pool[0] { t.Error("Expected sixth weighted round robin host to be first host in the pool.") } // mark host as down pool[0].setHealthy(false) h = wrrPolicy.Select(pool, req, nil) if h != pool[1] { t.Error("Expected to skip down host.") } // mark host as up pool[0].setHealthy(true) h = wrrPolicy.Select(pool, req, nil) if h != pool[0] { t.Error("Expected to select first host on availability.") } // mark host as full pool[1].countRequest(1) pool[1].MaxRequests = 1 h = wrrPolicy.Select(pool, req, nil) if h != pool[2] { t.Error("Expected to skip full host.") } } func TestWeightedRoundRobinPolicyWithZeroWeight(t *testing.T) { pool := testPool() wrrPolicy := WeightedRoundRobinSelection{ Weights: []int{0, 2, 1}, totalWeight: 3, } req, _ := http.NewRequest("GET", "/", nil) h := wrrPolicy.Select(pool, req, nil) if h != pool[1] { t.Error("Expected first weighted round robin host to be second host in the pool.") } h = wrrPolicy.Select(pool, req, nil) if h != pool[2] { t.Error("Expected second weighted round robin host to be third host in the pool.") } h = wrrPolicy.Select(pool, req, nil) if h != pool[1] { t.Error("Expected third weighted round robin host to be second host in the pool.") } // mark second host as down pool[1].setHealthy(false) h = wrrPolicy.Select(pool, req, nil) if h != pool[2] { t.Error("Expect select next available host.") } h = wrrPolicy.Select(pool, req, nil) if h != pool[2] { t.Error("Expect select only available host.") } // mark second host as up pool[1].setHealthy(true) h = wrrPolicy.Select(pool, req, nil) if h != pool[1] { t.Error("Expect select first host on availability.") } // test next select in full cycle expected := []*Upstream{pool[1], pool[2], pool[1], pool[1], pool[2], pool[1]} for i, want := range expected { got := wrrPolicy.Select(pool, req, nil) if want != got { t.Errorf("Selection %d: got host[%s], want host[%s]", i+1, got, want) } } } func TestLeastConnPolicy(t *testing.T) { pool := testPool() lcPolicy := LeastConnSelection{} req, _ := http.NewRequest("GET", "/", nil) pool[0].countRequest(10) pool[1].countRequest(10) h := lcPolicy.Select(pool, req, nil) if h != pool[2] { t.Error("Expected least connection host to be third host.") } pool[2].countRequest(100) h = lcPolicy.Select(pool, req, nil) if h != pool[0] && h != pool[1] { t.Error("Expected least connection host to be first or second host.") } } func TestIPHashPolicy(t *testing.T) { pool := testPool() ipHash := IPHashSelection{} req, _ := http.NewRequest("GET", "/", nil) // We should be able to predict where every request is routed. req.RemoteAddr = "172.0.0.1:80" h := ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } req.RemoteAddr = "172.0.0.2:80" h = ipHash.Select(pool, req, nil) if h != pool[1] { t.Error("Expected ip hash policy host to be the second host.") } req.RemoteAddr = "172.0.0.3:80" h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } req.RemoteAddr = "172.0.0.4:80" h = ipHash.Select(pool, req, nil) if h != pool[1] { t.Error("Expected ip hash policy host to be the second host.") } // we should get the same results without a port req.RemoteAddr = "172.0.0.1" h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } req.RemoteAddr = "172.0.0.2" h = ipHash.Select(pool, req, nil) if h != pool[1] { t.Error("Expected ip hash policy host to be the second host.") } req.RemoteAddr = "172.0.0.3" h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } req.RemoteAddr = "172.0.0.4" h = ipHash.Select(pool, req, nil) if h != pool[1] { t.Error("Expected ip hash policy host to be the second host.") } // we should get a healthy host if the original host is unhealthy and a // healthy host is available req.RemoteAddr = "172.0.0.4" pool[1].setHealthy(false) h = ipHash.Select(pool, req, nil) if h != pool[2] { t.Error("Expected ip hash policy host to be the third host.") } req.RemoteAddr = "172.0.0.2" h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } pool[1].setHealthy(true) req.RemoteAddr = "172.0.0.3" pool[2].setHealthy(false) h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } req.RemoteAddr = "172.0.0.4" h = ipHash.Select(pool, req, nil) if h != pool[1] { t.Error("Expected ip hash policy host to be the second host.") } // We should be able to resize the host pool and still be able to predict // where a req will be routed with the same IP's used above pool = UpstreamPool{ {Host: new(Host), Dial: "0.0.0.2"}, {Host: new(Host), Dial: "0.0.0.3"}, } req.RemoteAddr = "172.0.0.1:80" h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } req.RemoteAddr = "172.0.0.2:80" h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } req.RemoteAddr = "172.0.0.3:80" h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } req.RemoteAddr = "172.0.0.4:80" h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } // We should get nil when there are no healthy hosts pool[0].setHealthy(false) pool[1].setHealthy(false) h = ipHash.Select(pool, req, nil) if h != nil { t.Error("Expected ip hash policy host to be nil.") } // Reproduce #4135 pool = UpstreamPool{ {Host: new(Host)}, {Host: new(Host)}, {Host: new(Host)}, {Host: new(Host)}, {Host: new(Host)}, {Host: new(Host)}, {Host: new(Host)}, {Host: new(Host)}, {Host: new(Host)}, } pool[0].setHealthy(false) pool[1].setHealthy(false) pool[2].setHealthy(false) pool[3].setHealthy(false) pool[4].setHealthy(false) pool[5].setHealthy(false) pool[6].setHealthy(false) pool[7].setHealthy(false) pool[8].setHealthy(true) // We should get a result back when there is one healthy host left. h = ipHash.Select(pool, req, nil) if h == nil { // If it is nil, it means we missed a host even though one is available t.Error("Expected ip hash policy host to not be nil, but it is nil.") } } func TestClientIPHashPolicy(t *testing.T) { pool := testPool() ipHash := ClientIPHashSelection{} req, _ := http.NewRequest("GET", "/", nil) req = req.WithContext(context.WithValue(req.Context(), caddyhttp.VarsCtxKey, make(map[string]any))) // We should be able to predict where every request is routed. caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.1:80") h := ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.2:80") h = ipHash.Select(pool, req, nil) if h != pool[1] { t.Error("Expected ip hash policy host to be the second host.") } caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.3:80") h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.4:80") h = ipHash.Select(pool, req, nil) if h != pool[1] { t.Error("Expected ip hash policy host to be the second host.") } // we should get the same results without a port caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.1") h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.2") h = ipHash.Select(pool, req, nil) if h != pool[1] { t.Error("Expected ip hash policy host to be the second host.") } caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.3") h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.4") h = ipHash.Select(pool, req, nil) if h != pool[1] { t.Error("Expected ip hash policy host to be the second host.") } // we should get a healthy host if the original host is unhealthy and a // healthy host is available caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.4") pool[1].setHealthy(false) h = ipHash.Select(pool, req, nil) if h != pool[2] { t.Error("Expected ip hash policy host to be the third host.") } caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.2") h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } pool[1].setHealthy(true) caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.3") pool[2].setHealthy(false) h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.4") h = ipHash.Select(pool, req, nil) if h != pool[1] { t.Error("Expected ip hash policy host to be the second host.") } // We should be able to resize the host pool and still be able to predict // where a req will be routed with the same IP's used above pool = UpstreamPool{ {Host: new(Host), Dial: "0.0.0.2"}, {Host: new(Host), Dial: "0.0.0.3"}, } caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.1:80") h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.2:80") h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.3:80") h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } caddyhttp.SetVar(req.Context(), caddyhttp.ClientIPVarKey, "172.0.0.4:80") h = ipHash.Select(pool, req, nil) if h != pool[0] { t.Error("Expected ip hash policy host to be the first host.") } // We should get nil when there are no healthy hosts pool[0].setHealthy(false) pool[1].setHealthy(false) h = ipHash.Select(pool, req, nil) if h != nil { t.Error("Expected ip hash policy host to be nil.") } // Reproduce #4135 pool = UpstreamPool{ {Host: new(Host)}, {Host: new(Host)}, {Host: new(Host)}, {Host: new(Host)}, {Host: new(Host)}, {Host: new(Host)}, {Host: new(Host)}, {Host: new(Host)}, {Host: new(Host)}, } pool[0].setHealthy(false) pool[1].setHealthy(false) pool[2].setHealthy(false) pool[3].setHealthy(false) pool[4].setHealthy(false) pool[5].setHealthy(false) pool[6].setHealthy(false) pool[7].setHealthy(false) pool[8].setHealthy(true) // We should get a result back when there is one healthy host left. h = ipHash.Select(pool, req, nil) if h == nil { // If it is nil, it means we missed a host even though one is available t.Error("Expected ip hash policy host to not be nil, but it is nil.") } } func TestFirstPolicy(t *testing.T) { pool := testPool() firstPolicy := FirstSelection{} req := httptest.NewRequest(http.MethodGet, "/", nil) h := firstPolicy.Select(pool, req, nil) if h != pool[0] { t.Error("Expected first policy host to be the first host.") } pool[0].setHealthy(false) h = firstPolicy.Select(pool, req, nil) if h != pool[1] { t.Error("Expected first policy host to be the second host.") } } func TestQueryHashPolicy(t *testing.T) { ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() queryPolicy := QueryHashSelection{Key: "foo"} if err := queryPolicy.Provision(ctx); err != nil { t.Errorf("Provision error: %v", err) t.FailNow() } pool := testPool() request := httptest.NewRequest(http.MethodGet, "/?foo=1", nil) h := queryPolicy.Select(pool, request, nil) if h != pool[0] { t.Error("Expected query policy host to be the first host.") } request = httptest.NewRequest(http.MethodGet, "/?foo=100000", nil) h = queryPolicy.Select(pool, request, nil) if h != pool[1] { t.Error("Expected query policy host to be the second host.") } request = httptest.NewRequest(http.MethodGet, "/?foo=1", nil) pool[0].setHealthy(false) h = queryPolicy.Select(pool, request, nil) if h != pool[2] { t.Error("Expected query policy host to be the third host.") } request = httptest.NewRequest(http.MethodGet, "/?foo=100000", nil) h = queryPolicy.Select(pool, request, nil) if h != pool[1] { t.Error("Expected query policy host to be the second host.") } // We should be able to resize the host pool and still be able to predict // where a request will be routed with the same query used above pool = UpstreamPool{ {Host: new(Host)}, {Host: new(Host)}, } request = httptest.NewRequest(http.MethodGet, "/?foo=1", nil) h = queryPolicy.Select(pool, request, nil) if h != pool[0] { t.Error("Expected query policy host to be the first host.") } pool[0].setHealthy(false) h = queryPolicy.Select(pool, request, nil) if h != pool[1] { t.Error("Expected query policy host to be the second host.") } request = httptest.NewRequest(http.MethodGet, "/?foo=4", nil) h = queryPolicy.Select(pool, request, nil) if h != pool[1] { t.Error("Expected query policy host to be the second host.") } pool[0].setHealthy(false) pool[1].setHealthy(false) h = queryPolicy.Select(pool, request, nil) if h != nil { t.Error("Expected query policy policy host to be nil.") } request = httptest.NewRequest(http.MethodGet, "/?foo=aa11&foo=bb22", nil) pool = testPool() h = queryPolicy.Select(pool, request, nil) if h != pool[0] { t.Error("Expected query policy host to be the first host.") } } func TestURIHashPolicy(t *testing.T) { pool := testPool() uriPolicy := URIHashSelection{} request := httptest.NewRequest(http.MethodGet, "/test", nil) h := uriPolicy.Select(pool, request, nil) if h != pool[2] { t.Error("Expected uri policy host to be the third host.") } pool[2].setHealthy(false) h = uriPolicy.Select(pool, request, nil) if h != pool[0] { t.Error("Expected uri policy host to be the first host.") } request = httptest.NewRequest(http.MethodGet, "/test_2", nil) h = uriPolicy.Select(pool, request, nil) if h != pool[0] { t.Error("Expected uri policy host to be the first host.") } // We should be able to resize the host pool and still be able to predict // where a request will be routed with the same URI's used above pool = UpstreamPool{ {Host: new(Host)}, {Host: new(Host)}, } request = httptest.NewRequest(http.MethodGet, "/test", nil) h = uriPolicy.Select(pool, request, nil) if h != pool[0] { t.Error("Expected uri policy host to be the first host.") } pool[0].setHealthy(false) h = uriPolicy.Select(pool, request, nil) if h != pool[1] { t.Error("Expected uri policy host to be the first host.") } request = httptest.NewRequest(http.MethodGet, "/test_2", nil) h = uriPolicy.Select(pool, request, nil) if h != pool[1] { t.Error("Expected uri policy host to be the second host.") } pool[0].setHealthy(false) pool[1].setHealthy(false) h = uriPolicy.Select(pool, request, nil) if h != nil { t.Error("Expected uri policy policy host to be nil.") } } func TestLeastRequests(t *testing.T) { pool := testPool() pool[0].Dial = "localhost:8080" pool[1].Dial = "localhost:8081" pool[2].Dial = "localhost:8082" pool[0].setHealthy(true) pool[1].setHealthy(true) pool[2].setHealthy(true) pool[0].countRequest(10) pool[1].countRequest(20) pool[2].countRequest(30) result := leastRequests(pool) if result == nil { t.Error("Least request should not return nil") } if result != pool[0] { t.Error("Least request should return pool[0]") } } func TestRandomChoicePolicy(t *testing.T) { pool := testPool() pool[0].Dial = "localhost:8080" pool[1].Dial = "localhost:8081" pool[2].Dial = "localhost:8082" pool[0].setHealthy(false) pool[1].setHealthy(true) pool[2].setHealthy(true) pool[0].countRequest(10) pool[1].countRequest(20) pool[2].countRequest(30) request := httptest.NewRequest(http.MethodGet, "/test", nil) randomChoicePolicy := RandomChoiceSelection{Choose: 2} h := randomChoicePolicy.Select(pool, request, nil) if h == nil { t.Error("RandomChoicePolicy should not return nil") } if h == pool[0] { t.Error("RandomChoicePolicy should not choose pool[0]") } } func TestCookieHashPolicy(t *testing.T) { ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() cookieHashPolicy := CookieHashSelection{} if err := cookieHashPolicy.Provision(ctx); err != nil { t.Errorf("Provision error: %v", err) t.FailNow() } pool := testPool() pool[0].Dial = "localhost:8080" pool[1].Dial = "localhost:8081" pool[2].Dial = "localhost:8082" pool[0].setHealthy(true) pool[1].setHealthy(false) pool[2].setHealthy(false) request := httptest.NewRequest(http.MethodGet, "/test", nil) w := httptest.NewRecorder() h := cookieHashPolicy.Select(pool, request, w) cookieServer1 := w.Result().Cookies()[0] if cookieServer1 == nil { t.Fatal("cookieHashPolicy should set a cookie") } if cookieServer1.Name != "lb" { t.Error("cookieHashPolicy should set a cookie with name lb") } if cookieServer1.Secure { t.Error("cookieHashPolicy should set cookie Secure attribute to false when request is not secure") } if h != pool[0] { t.Error("Expected cookieHashPolicy host to be the first only available host.") } pool[1].setHealthy(true) pool[2].setHealthy(true) request = httptest.NewRequest(http.MethodGet, "/test", nil) w = httptest.NewRecorder() request.AddCookie(cookieServer1) h = cookieHashPolicy.Select(pool, request, w) if h != pool[0] { t.Error("Expected cookieHashPolicy host to stick to the first host (matching cookie).") } s := w.Result().Cookies() if len(s) != 0 { t.Error("Expected cookieHashPolicy to not set a new cookie.") } pool[0].setHealthy(false) request = httptest.NewRequest(http.MethodGet, "/test", nil) w = httptest.NewRecorder() request.AddCookie(cookieServer1) h = cookieHashPolicy.Select(pool, request, w) if h == pool[0] { t.Error("Expected cookieHashPolicy to select a new host.") } if w.Result().Cookies() == nil { t.Error("Expected cookieHashPolicy to set a new cookie.") } } func TestCookieHashPolicyWithSecureRequest(t *testing.T) { ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() cookieHashPolicy := CookieHashSelection{} if err := cookieHashPolicy.Provision(ctx); err != nil { t.Errorf("Provision error: %v", err) t.FailNow() } pool := testPool() pool[0].Dial = "localhost:8080" pool[1].Dial = "localhost:8081" pool[2].Dial = "localhost:8082" pool[0].setHealthy(true) pool[1].setHealthy(false) pool[2].setHealthy(false) // Create a test server that serves HTTPS requests ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { h := cookieHashPolicy.Select(pool, r, w) if h != pool[0] { t.Error("Expected cookieHashPolicy host to be the first only available host.") } })) defer ts.Close() // Make a new HTTPS request to the test server client := ts.Client() request, err := http.NewRequest(http.MethodGet, ts.URL+"/test", nil) if err != nil { t.Fatal(err) } response, err := client.Do(request) if err != nil { t.Fatal(err) } // Check if the cookie set is Secure and has SameSiteNone mode cookies := response.Cookies() if len(cookies) == 0 { t.Fatal("Expected a cookie to be set") } cookie := cookies[0] if !cookie.Secure { t.Error("Expected cookie Secure attribute to be true when request is secure") } if cookie.SameSite != http.SameSiteNoneMode { t.Error("Expected cookie SameSite attribute to be None when request is secure") } } func TestCookieHashPolicyWithFirstFallback(t *testing.T) { ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() cookieHashPolicy := CookieHashSelection{ FallbackRaw: caddyconfig.JSONModuleObject(FirstSelection{}, "policy", "first", nil), } if err := cookieHashPolicy.Provision(ctx); err != nil { t.Errorf("Provision error: %v", err) t.FailNow() } pool := testPool() pool[0].Dial = "localhost:8080" pool[1].Dial = "localhost:8081" pool[2].Dial = "localhost:8082" pool[0].setHealthy(true) pool[1].setHealthy(true) pool[2].setHealthy(true) request := httptest.NewRequest(http.MethodGet, "/test", nil) w := httptest.NewRecorder() h := cookieHashPolicy.Select(pool, request, w) cookieServer1 := w.Result().Cookies()[0] if cookieServer1 == nil { t.Fatal("cookieHashPolicy should set a cookie") } if cookieServer1.Name != "lb" { t.Error("cookieHashPolicy should set a cookie with name lb") } if h != pool[0] { t.Errorf("Expected cookieHashPolicy host to be the first only available host, got %s", h) } request = httptest.NewRequest(http.MethodGet, "/test", nil) w = httptest.NewRecorder() request.AddCookie(cookieServer1) h = cookieHashPolicy.Select(pool, request, w) if h != pool[0] { t.Errorf("Expected cookieHashPolicy host to stick to the first host (matching cookie), got %s", h) } s := w.Result().Cookies() if len(s) != 0 { t.Error("Expected cookieHashPolicy to not set a new cookie.") } pool[0].setHealthy(false) request = httptest.NewRequest(http.MethodGet, "/test", nil) w = httptest.NewRecorder() request.AddCookie(cookieServer1) h = cookieHashPolicy.Select(pool, request, w) if h != pool[1] { t.Errorf("Expected cookieHashPolicy to select the next first available host, got %s", h) } if w.Result().Cookies() == nil { t.Error("Expected cookieHashPolicy to set a new cookie.") } } ================================================ FILE: modules/caddyhttp/reverseproxy/streaming.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Most of the code in this file was initially borrowed from the Go // standard library and modified; It had this copyright notice: // Copyright 2011 The Go Authors package reverseproxy import ( "bufio" "context" "errors" "fmt" "io" weakrand "math/rand/v2" "mime" "net/http" "sync" "time" "unsafe" "go.uber.org/zap" "go.uber.org/zap/zapcore" "golang.org/x/net/http/httpguts" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) type h2ReadWriteCloser struct { io.ReadCloser http.ResponseWriter } func (rwc h2ReadWriteCloser) Write(p []byte) (n int, err error) { n, err = rwc.ResponseWriter.Write(p) if err != nil { return 0, err } //nolint:bodyclose err = http.NewResponseController(rwc.ResponseWriter).Flush() if err != nil { return 0, err } return n, nil } func (h *Handler) handleUpgradeResponse(logger *zap.Logger, wg *sync.WaitGroup, rw http.ResponseWriter, req *http.Request, res *http.Response) { reqUpType := upgradeType(req.Header) resUpType := upgradeType(res.Header) // Taken from https://github.com/golang/go/commit/5c489514bc5e61ad9b5b07bd7d8ec65d66a0512a // We know reqUpType is ASCII, it's checked by the caller. if !asciiIsPrint(resUpType) { if c := logger.Check(zapcore.DebugLevel, "backend tried to switch to invalid protocol"); c != nil { c.Write(zap.String("backend_upgrade", resUpType)) } return } if !asciiEqualFold(reqUpType, resUpType) { if c := logger.Check(zapcore.DebugLevel, "backend tried to switch to unexpected protocol via Upgrade header"); c != nil { c.Write( zap.String("backend_upgrade", resUpType), zap.String("requested_upgrade", reqUpType), ) } return } backConn, ok := res.Body.(io.ReadWriteCloser) if !ok { logger.Error("internal error: 101 switching protocols response with non-writable body") return } // write header first, response headers should not be counted in size // like the rest of handler chain. copyHeader(rw.Header(), res.Header) normalizeWebsocketHeaders(rw.Header()) var ( conn io.ReadWriteCloser brw *bufio.ReadWriter ) // websocket over http2 or http3 if extended connect is enabled, assuming backend doesn't support this, the request will be modified to http1.1 upgrade // TODO: once we can reliably detect backend support this, it can be removed for those backends if body, ok := caddyhttp.GetVar(req.Context(), "extended_connect_websocket_body").(io.ReadCloser); ok { req.Body = body rw.Header().Del("Upgrade") rw.Header().Del("Connection") delete(rw.Header(), "Sec-WebSocket-Accept") rw.WriteHeader(http.StatusOK) if c := logger.Check(zap.DebugLevel, "upgrading connection"); c != nil { c.Write(zap.Int("http_version", 2)) } //nolint:bodyclose flushErr := http.NewResponseController(rw).Flush() if flushErr != nil { if c := h.logger.Check(zap.ErrorLevel, "failed to flush http2 websocket response"); c != nil { c.Write(zap.Error(flushErr)) } return } conn = h2ReadWriteCloser{req.Body, rw} // bufio is not needed, use minimal buffer brw = bufio.NewReadWriter(bufio.NewReaderSize(conn, 1), bufio.NewWriterSize(conn, 1)) } else { rw.WriteHeader(res.StatusCode) if c := logger.Check(zap.DebugLevel, "upgrading connection"); c != nil { c.Write(zap.Int("http_version", req.ProtoMajor)) } var hijackErr error //nolint:bodyclose conn, brw, hijackErr = http.NewResponseController(rw).Hijack() if errors.Is(hijackErr, http.ErrNotSupported) { if c := h.logger.Check(zap.ErrorLevel, "can't switch protocols using non-Hijacker ResponseWriter"); c != nil { c.Write(zap.String("type", fmt.Sprintf("%T", rw))) } return } if hijackErr != nil { if c := h.logger.Check(zap.ErrorLevel, "hijack failed on protocol switch"); c != nil { c.Write(zap.Error(hijackErr)) } return } } // adopted from https://github.com/golang/go/commit/8bcf2834afdf6a1f7937390903a41518715ef6f5 backConnCloseCh := make(chan struct{}) go func() { // Ensure that the cancellation of a request closes the backend. // See issue https://golang.org/issue/35559. select { case <-req.Context().Done(): case <-backConnCloseCh: } backConn.Close() }() defer close(backConnCloseCh) start := time.Now() defer func() { conn.Close() if c := logger.Check(zapcore.DebugLevel, "connection closed"); c != nil { c.Write(zap.Duration("duration", time.Since(start))) } }() if err := brw.Flush(); err != nil { if c := logger.Check(zapcore.DebugLevel, "response flush"); c != nil { c.Write(zap.Error(err)) } return } // There may be buffered data in the *bufio.Reader // see: https://github.com/caddyserver/caddy/issues/6273 if buffered := brw.Reader.Buffered(); buffered > 0 { data, _ := brw.Peek(buffered) _, err := backConn.Write(data) if err != nil { if c := logger.Check(zapcore.DebugLevel, "backConn write failed"); c != nil { c.Write(zap.Error(err)) } return } } // Ensure the hijacked client connection, and the new connection established // with the backend, are both closed in the event of a server shutdown. This // is done by registering them. We also try to gracefully close connections // we recognize as websockets. // We need to make sure the client connection messages (i.e. to upstream) // are masked, so we need to know whether the connection is considered the // server or the client side of the proxy. gracefulClose := func(conn io.ReadWriteCloser, isClient bool) func() error { if isWebsocket(req) { return func() error { return writeCloseControl(conn, isClient) } } return nil } deleteFrontConn := h.registerConnection(conn, gracefulClose(conn, false)) deleteBackConn := h.registerConnection(backConn, gracefulClose(backConn, true)) defer deleteFrontConn() defer deleteBackConn() spc := switchProtocolCopier{user: conn, backend: backConn, wg: wg} // setup the timeout if requested var timeoutc <-chan time.Time if h.StreamTimeout > 0 { timer := time.NewTimer(time.Duration(h.StreamTimeout)) defer timer.Stop() timeoutc = timer.C } // when a stream timeout is encountered, no error will be read from errc // a buffer size of 2 will allow both the read and write goroutines to send the error and exit // see: https://github.com/caddyserver/caddy/issues/7418 errc := make(chan error, 2) wg.Add(2) go spc.copyToBackend(errc) go spc.copyFromBackend(errc) select { case err := <-errc: if c := logger.Check(zapcore.DebugLevel, "streaming error"); c != nil { c.Write(zap.Error(err)) } case time := <-timeoutc: if c := logger.Check(zapcore.DebugLevel, "stream timed out"); c != nil { c.Write(zap.Time("timeout", time)) } } } // flushInterval returns the p.FlushInterval value, conditionally // overriding its value for a specific request/response. func (h Handler) flushInterval(req *http.Request, res *http.Response) time.Duration { resCTHeader := res.Header.Get("Content-Type") resCT, _, err := mime.ParseMediaType(resCTHeader) // For Server-Sent Events responses, flush immediately. // The MIME type is defined in https://www.w3.org/TR/eventsource/#text-event-stream if err == nil && resCT == "text/event-stream" { return -1 // negative means immediately } // We might have the case of streaming for which Content-Length might be unset. if res.ContentLength == -1 { return -1 } // for h2 and h2c upstream streaming data to client (issues #3556 and #3606) if h.isBidirectionalStream(req, res) { return -1 } return time.Duration(h.FlushInterval) } // isBidirectionalStream returns whether we should work in bi-directional stream mode. // // See https://github.com/caddyserver/caddy/pull/3620 for discussion of nuances. func (h Handler) isBidirectionalStream(req *http.Request, res *http.Response) bool { // We have to check the encoding here; only flush headers with identity encoding. // Non-identity encoding might combine with "encode" directive, and in that case, // if body size larger than enc.MinLength, upper level encode handle might have // Content-Encoding header to write. // (see https://github.com/caddyserver/caddy/issues/3606 for use case) ae := req.Header.Get("Accept-Encoding") return req.ProtoMajor == 2 && res.ProtoMajor == 2 && res.ContentLength == -1 && (ae == "identity" || ae == "") } func (h Handler) copyResponse(dst http.ResponseWriter, src io.Reader, flushInterval time.Duration, logger *zap.Logger) error { var w io.Writer = dst if flushInterval != 0 { var mlwLogger *zap.Logger if h.VerboseLogs { mlwLogger = logger.Named("max_latency_writer") } else { mlwLogger = zap.NewNop() } mlw := &maxLatencyWriter{ dst: dst, //nolint:bodyclose flush: http.NewResponseController(dst).Flush, latency: flushInterval, logger: mlwLogger, } defer mlw.stop() // set up initial timer so headers get flushed even if body writes are delayed mlw.flushPending = true mlw.t = time.AfterFunc(flushInterval, mlw.delayedFlush) w = mlw } buf := streamingBufPool.Get().(*[]byte) defer streamingBufPool.Put(buf) var copyLogger *zap.Logger if h.VerboseLogs { copyLogger = logger } else { copyLogger = zap.NewNop() } _, err := h.copyBuffer(w, src, *buf, copyLogger) return err } // copyBuffer returns any write errors or non-EOF read errors, and the amount // of bytes written. func (h Handler) copyBuffer(dst io.Writer, src io.Reader, buf []byte, logger *zap.Logger) (int64, error) { if len(buf) == 0 { buf = make([]byte, defaultBufferSize) } var written int64 for { logger.Debug("waiting to read from upstream") nr, rerr := src.Read(buf) logger := logger.With(zap.Int("read", nr)) if c := logger.Check(zapcore.DebugLevel, "read from upstream"); c != nil { c.Write(zap.Error(rerr)) } if rerr != nil && rerr != io.EOF && rerr != context.Canceled { // TODO: this could be useful to know (indeed, it revealed an error in our // fastcgi PoC earlier; but it's this single error report here that necessitates // a function separate from io.CopyBuffer, since io.CopyBuffer does not distinguish // between read or write errors; in a reverse proxy situation, write errors are not // something we need to report to the client, but read errors are a problem on our // end for sure. so we need to decide what we want.) // p.logf("copyBuffer: ReverseProxy read error during body copy: %v", rerr) if c := logger.Check(zapcore.ErrorLevel, "reading from backend"); c != nil { c.Write(zap.Error(rerr)) } } if nr > 0 { logger.Debug("writing to downstream") nw, werr := dst.Write(buf[:nr]) if nw > 0 { written += int64(nw) } if c := logger.Check(zapcore.DebugLevel, "wrote to downstream"); c != nil { c.Write( zap.Int("written", nw), zap.Int64("written_total", written), zap.Error(werr), ) } if werr != nil { return written, fmt.Errorf("writing: %w", werr) } if nr != nw { return written, io.ErrShortWrite } } if rerr != nil { if rerr == io.EOF { return written, nil } return written, fmt.Errorf("reading: %w", rerr) } } } // registerConnection holds onto conn so it can be closed in the event // of a server shutdown. This is useful because hijacked connections or // connections dialed to backends don't close when server is shut down. // The caller should call the returned delete() function when the // connection is done to remove it from memory. func (h *Handler) registerConnection(conn io.ReadWriteCloser, gracefulClose func() error) (del func()) { h.connectionsMu.Lock() h.connections[conn] = openConnection{conn, gracefulClose} h.connectionsMu.Unlock() return func() { h.connectionsMu.Lock() delete(h.connections, conn) // if there is no connection left before the connections close timer fires if len(h.connections) == 0 && h.connectionsCloseTimer != nil { // we release the timer that holds the reference to Handler if (*h.connectionsCloseTimer).Stop() { h.logger.Debug("stopped streaming connections close timer - all connections are already closed") } h.connectionsCloseTimer = nil } h.connectionsMu.Unlock() } } // closeConnections immediately closes all hijacked connections (both to client and backend). func (h *Handler) closeConnections() error { var err error h.connectionsMu.Lock() defer h.connectionsMu.Unlock() for _, oc := range h.connections { if oc.gracefulClose != nil { // this is potentially blocking while we have the lock on the connections // map, but that should be OK since the server has in theory shut down // and we are no longer using the connections map gracefulErr := oc.gracefulClose() if gracefulErr != nil && err == nil { err = gracefulErr } } closeErr := oc.conn.Close() if closeErr != nil && err == nil { err = closeErr } } return err } // cleanupConnections closes hijacked connections. // Depending on the value of StreamCloseDelay it does that either immediately // or sets up a timer that will do that later. func (h *Handler) cleanupConnections() error { if h.StreamCloseDelay == 0 { return h.closeConnections() } h.connectionsMu.Lock() defer h.connectionsMu.Unlock() // the handler is shut down, no new connection can appear, // so we can skip setting up the timer when there are no connections if len(h.connections) > 0 { delay := time.Duration(h.StreamCloseDelay) h.connectionsCloseTimer = time.AfterFunc(delay, func() { if c := h.logger.Check(zapcore.DebugLevel, "closing streaming connections after delay"); c != nil { c.Write(zap.Duration("delay", delay)) } err := h.closeConnections() if err != nil { if c := h.logger.Check(zapcore.ErrorLevel, "failed to closed connections after delay"); c != nil { c.Write( zap.Error(err), zap.Duration("delay", delay), ) } } }) } return nil } // writeCloseControl sends a best-effort Close control message to the given // WebSocket connection. Thanks to @pascaldekloe who provided inspiration // from his simple implementation of this I was able to learn from at: // github.com/pascaldekloe/websocket. Further work for handling masking // taken from github.com/gorilla/websocket. func writeCloseControl(conn io.Writer, isClient bool) error { // Sources: // https://github.com/pascaldekloe/websocket/blob/32050af67a5d/websocket.go#L119 // https://github.com/gorilla/websocket/blob/v1.5.0/conn.go#L413 // For now, we're not using a reason. We might later, though. // The code handling the reason is left in var reason string // max 123 bytes (control frame payload limit is 125; status code takes 2) const closeMessage = 8 const finalBit = 1 << 7 // Frame header byte 0 bits from Section 5.2 of RFC 6455 const maskBit = 1 << 7 // Frame header byte 1 bits from Section 5.2 of RFC 6455 const goingAwayUpper uint8 = 1001 >> 8 const goingAwayLower uint8 = 1001 & 0xff b0 := byte(closeMessage) | finalBit b1 := byte(len(reason) + 2) if isClient { b1 |= maskBit } buf := make([]byte, 0, 127) buf = append(buf, b0, b1) msgLength := 4 + len(reason) // Both branches below append the "going away" code and reason appendMessage := func(buf []byte) []byte { buf = append(buf, goingAwayUpper, goingAwayLower) buf = append(buf, []byte(reason)...) return buf } // When we're the client, we need to mask the message as per // https://www.rfc-editor.org/rfc/rfc6455#section-5.3 if isClient { key := newMaskKey() buf = append(buf, key[:]...) msgLength += len(key) buf = appendMessage(buf) maskBytes(key, 0, buf[2+len(key):]) } else { buf = appendMessage(buf) } // simply best-effort, but return error for logging purposes // TODO: we might need to ensure we are the exclusive writer by this point (io.Copy is stopped)? _, err := conn.Write(buf[:msgLength]) return err } // Copied from https://github.com/gorilla/websocket/blob/v1.5.0/mask.go func maskBytes(key [4]byte, pos int, b []byte) int { // Mask one byte at a time for small buffers. if len(b) < 2*wordSize { for i := range b { b[i] ^= key[pos&3] pos++ } return pos & 3 } // Mask one byte at a time to word boundary. if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { n = wordSize - n for i := range b[:n] { b[i] ^= key[pos&3] pos++ } b = b[n:] } // Create aligned word size key. var k [wordSize]byte for i := range k { k[i] = key[(pos+i)&3] // nolint:gosec // false positive, impossible to be out of bounds; see: https://github.com/securego/gosec/issues/1525 } kw := *(*uintptr)(unsafe.Pointer(&k)) // Mask one word at a time. n := (len(b) / wordSize) * wordSize for i := 0; i < n; i += wordSize { *(*uintptr)(unsafe.Add(unsafe.Pointer(&b[0]), i)) ^= kw } // Mask one byte at a time for remaining bytes. b = b[n:] for i := range b { b[i] ^= key[pos&3] pos++ } return pos & 3 } // Copied from https://github.com/gorilla/websocket/blob/v1.5.0/conn.go#L184 func newMaskKey() [4]byte { n := weakrand.Uint32() return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} } // isWebsocket returns true if r looks to be an upgrade request for WebSockets. // It is a fairly naive check. func isWebsocket(r *http.Request) bool { return httpguts.HeaderValuesContainsToken(r.Header["Connection"], "upgrade") && httpguts.HeaderValuesContainsToken(r.Header["Upgrade"], "websocket") } // openConnection maps an open connection to // an optional function for graceful close. type openConnection struct { conn io.ReadWriteCloser gracefulClose func() error } type maxLatencyWriter struct { dst io.Writer flush func() error latency time.Duration // non-zero; negative means to flush immediately mu sync.Mutex // protects t, flushPending, and dst.Flush t *time.Timer flushPending bool logger *zap.Logger } func (m *maxLatencyWriter) Write(p []byte) (n int, err error) { m.mu.Lock() defer m.mu.Unlock() n, err = m.dst.Write(p) if c := m.logger.Check(zapcore.DebugLevel, "wrote bytes"); c != nil { c.Write(zap.Int("n", n), zap.Error(err)) } if m.latency < 0 { m.logger.Debug("flushing immediately") //nolint:errcheck m.flush() return n, err } if m.flushPending { m.logger.Debug("delayed flush already pending") return n, err } if m.t == nil { m.t = time.AfterFunc(m.latency, m.delayedFlush) } else { m.t.Reset(m.latency) } if c := m.logger.Check(zapcore.DebugLevel, "timer set for delayed flush"); c != nil { c.Write(zap.Duration("duration", m.latency)) } m.flushPending = true return n, err } func (m *maxLatencyWriter) delayedFlush() { m.mu.Lock() defer m.mu.Unlock() if !m.flushPending { // if stop was called but AfterFunc already started this goroutine m.logger.Debug("delayed flush is not pending") return } m.logger.Debug("delayed flush") //nolint:errcheck m.flush() m.flushPending = false } func (m *maxLatencyWriter) stop() { m.mu.Lock() defer m.mu.Unlock() m.flushPending = false if m.t != nil { m.t.Stop() } } // switchProtocolCopier exists so goroutines proxying data back and // forth have nice names in stacks. type switchProtocolCopier struct { user, backend io.ReadWriteCloser wg *sync.WaitGroup } func (c switchProtocolCopier) copyFromBackend(errc chan<- error) { _, err := io.Copy(c.user, c.backend) errc <- err c.wg.Done() } func (c switchProtocolCopier) copyToBackend(errc chan<- error) { _, err := io.Copy(c.backend, c.user) errc <- err c.wg.Done() } var streamingBufPool = sync.Pool{ New: func() any { // The Pool's New function should generally only return pointer // types, since a pointer can be put into the return interface // value without an allocation // - (from the package docs) b := make([]byte, defaultBufferSize) return &b }, } const ( defaultBufferSize = 32 * 1024 wordSize = int(unsafe.Sizeof(uintptr(0))) ) ================================================ FILE: modules/caddyhttp/reverseproxy/streaming_test.go ================================================ package reverseproxy import ( "bytes" "net/http/httptest" "strings" "testing" "github.com/caddyserver/caddy/v2" ) func TestHandlerCopyResponse(t *testing.T) { h := Handler{} testdata := []string{ "", strings.Repeat("a", defaultBufferSize), strings.Repeat("123456789 123456789 123456789 12", 3000), } dst := bytes.NewBuffer(nil) recorder := httptest.NewRecorder() recorder.Body = dst for _, d := range testdata { src := bytes.NewBuffer([]byte(d)) dst.Reset() err := h.copyResponse(recorder, src, 0, caddy.Log()) if err != nil { t.Errorf("failed with error: %v", err) } out := dst.String() if out != d { t.Errorf("bad read: got %q", out) } } } ================================================ FILE: modules/caddyhttp/reverseproxy/upstreams.go ================================================ package reverseproxy import ( "context" "encoding/json" "fmt" weakrand "math/rand/v2" "net" "net/http" "strconv" "sync" "time" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(SRVUpstreams{}) caddy.RegisterModule(AUpstreams{}) caddy.RegisterModule(MultiUpstreams{}) } // SRVUpstreams provides upstreams from SRV lookups. // The lookup DNS name can be configured either by // its individual parts (that is, specifying the // service, protocol, and name separately) to form // the standard "_service._proto.name" domain, or // the domain can be specified directly in name by // leaving service and proto empty. See RFC 2782. // // Lookups are cached and refreshed at the configured // refresh interval. // // Returned upstreams are sorted by priority and weight. type SRVUpstreams struct { // The service label. Service string `json:"service,omitempty"` // The protocol label; either tcp or udp. Proto string `json:"proto,omitempty"` // The name label; or, if service and proto are // empty, the entire domain name to look up. Name string `json:"name,omitempty"` // The interval at which to refresh the SRV lookup. // Results are cached between lookups. Default: 1m Refresh caddy.Duration `json:"refresh,omitempty"` // If > 0 and there is an error with the lookup, // continue to use the cached results for up to // this long before trying again, (even though they // are stale) instead of returning an error to the // client. Default: 0s. GracePeriod caddy.Duration `json:"grace_period,omitempty"` // Configures the DNS resolver used to resolve the // SRV address to SRV records. Resolver *UpstreamResolver `json:"resolver,omitempty"` // If Resolver is configured, how long to wait before // timing out trying to connect to the DNS server. DialTimeout caddy.Duration `json:"dial_timeout,omitempty"` // If Resolver is configured, how long to wait before // spawning an RFC 6555 Fast Fallback connection. // A negative value disables this. FallbackDelay caddy.Duration `json:"dial_fallback_delay,omitempty"` // Specific network to dial when connecting to the upstream(s) // provided by SRV records upstream. See Go's net package for // accepted values. For example, to restrict to IPv4, use "tcp4". DialNetwork string `json:"dial_network,omitempty"` resolver *net.Resolver logger *zap.Logger } // CaddyModule returns the Caddy module information. func (SRVUpstreams) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.upstreams.srv", New: func() caddy.Module { return new(SRVUpstreams) }, } } func (su *SRVUpstreams) Provision(ctx caddy.Context) error { su.logger = ctx.Logger() if su.Refresh == 0 { su.Refresh = caddy.Duration(time.Minute) } if su.Resolver != nil { err := su.Resolver.ParseAddresses() if err != nil { return err } d := &net.Dialer{ Timeout: time.Duration(su.DialTimeout), FallbackDelay: time.Duration(su.FallbackDelay), } su.resolver = &net.Resolver{ PreferGo: true, Dial: func(ctx context.Context, _, _ string) (net.Conn, error) { //nolint:gosec addr := su.Resolver.netAddrs[weakrand.IntN(len(su.Resolver.netAddrs))] return d.DialContext(ctx, addr.Network, addr.JoinHostPort(0)) }, } } if su.resolver == nil { su.resolver = net.DefaultResolver } return nil } func (su SRVUpstreams) GetUpstreams(r *http.Request) ([]*Upstream, error) { suAddr, service, proto, name := su.expandedAddr(r) // first, use a cheap read-lock to return a cached result quickly srvsMu.RLock() cached := srvs[suAddr] srvsMu.RUnlock() if cached.isFresh() { return allNew(cached.upstreams), nil } // otherwise, obtain a write-lock to update the cached value srvsMu.Lock() defer srvsMu.Unlock() // check to see if it's still stale, since we're now in a different // lock from when we first checked freshness; another goroutine might // have refreshed it in the meantime before we re-obtained our lock cached = srvs[suAddr] if cached.isFresh() { return allNew(cached.upstreams), nil } if c := su.logger.Check(zapcore.DebugLevel, "refreshing SRV upstreams"); c != nil { c.Write( zap.String("service", service), zap.String("proto", proto), zap.String("name", name), ) } _, records, err := su.resolver.LookupSRV(r.Context(), service, proto, name) if err != nil { // From LookupSRV docs: "If the response contains invalid names, those records are filtered // out and an error will be returned alongside the remaining results, if any." Thus, we // only return an error if no records were also returned. if len(records) == 0 { if su.GracePeriod > 0 { if c := su.logger.Check(zapcore.ErrorLevel, "SRV lookup failed; using previously cached"); c != nil { c.Write(zap.Error(err)) } cached.freshness = time.Now().Add(time.Duration(su.GracePeriod) - time.Duration(su.Refresh)) srvs[suAddr] = cached return allNew(cached.upstreams), nil } return nil, err } if c := su.logger.Check(zapcore.WarnLevel, "SRV records filtered"); c != nil { c.Write(zap.Error(err)) } } upstreams := make([]Upstream, len(records)) for i, rec := range records { if c := su.logger.Check(zapcore.DebugLevel, "discovered SRV record"); c != nil { c.Write( zap.String("target", rec.Target), zap.Uint16("port", rec.Port), zap.Uint16("priority", rec.Priority), zap.Uint16("weight", rec.Weight), ) } addr := net.JoinHostPort(rec.Target, strconv.Itoa(int(rec.Port))) if su.DialNetwork != "" { addr = su.DialNetwork + "/" + addr } upstreams[i] = Upstream{Dial: addr} } // before adding a new one to the cache (as opposed to replacing stale one), make room if cache is full if cached.freshness.IsZero() && len(srvs) >= 100 { for randomKey := range srvs { delete(srvs, randomKey) break } } srvs[suAddr] = srvLookup{ srvUpstreams: su, freshness: time.Now(), upstreams: upstreams, } return allNew(upstreams), nil } func (su SRVUpstreams) String() string { if su.Service == "" && su.Proto == "" { return su.Name } return su.formattedAddr(su.Service, su.Proto, su.Name) } // expandedAddr expands placeholders in the configured SRV domain labels. // The return values are: addr, the RFC 2782 representation of the SRV domain; // service, the service; proto, the protocol; and name, the name. // If su.Service and su.Proto are empty, name will be returned as addr instead. func (su SRVUpstreams) expandedAddr(r *http.Request) (addr, service, proto, name string) { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) name = repl.ReplaceAll(su.Name, "") if su.Service == "" && su.Proto == "" { addr = name return addr, service, proto, name } service = repl.ReplaceAll(su.Service, "") proto = repl.ReplaceAll(su.Proto, "") addr = su.formattedAddr(service, proto, name) return addr, service, proto, name } // formattedAddr the RFC 2782 representation of the SRV domain, in // the form "_service._proto.name". func (SRVUpstreams) formattedAddr(service, proto, name string) string { return fmt.Sprintf("_%s._%s.%s", service, proto, name) } type srvLookup struct { srvUpstreams SRVUpstreams freshness time.Time upstreams []Upstream } func (sl srvLookup) isFresh() bool { return time.Since(sl.freshness) < time.Duration(sl.srvUpstreams.Refresh) } type IPVersions struct { IPv4 *bool `json:"ipv4,omitempty"` IPv6 *bool `json:"ipv6,omitempty"` } func resolveIpVersion(versions *IPVersions) string { resolveIpv4 := versions == nil || (versions.IPv4 == nil && versions.IPv6 == nil) || (versions.IPv4 != nil && *versions.IPv4) resolveIpv6 := versions == nil || (versions.IPv6 == nil && versions.IPv4 == nil) || (versions.IPv6 != nil && *versions.IPv6) switch { case resolveIpv4 && !resolveIpv6: return "ip4" case !resolveIpv4 && resolveIpv6: return "ip6" default: return "ip" } } // AUpstreams provides upstreams from A/AAAA lookups. // Results are cached and refreshed at the configured // refresh interval. type AUpstreams struct { // The domain name to look up. Name string `json:"name,omitempty"` // The port to use with the upstreams. Default: 80 Port string `json:"port,omitempty"` // The interval at which to refresh the A lookup. // Results are cached between lookups. Default: 1m Refresh caddy.Duration `json:"refresh,omitempty"` // Configures the DNS resolver used to resolve the // domain name to A records. Resolver *UpstreamResolver `json:"resolver,omitempty"` // If Resolver is configured, how long to wait before // timing out trying to connect to the DNS server. DialTimeout caddy.Duration `json:"dial_timeout,omitempty"` // If Resolver is configured, how long to wait before // spawning an RFC 6555 Fast Fallback connection. // A negative value disables this. FallbackDelay caddy.Duration `json:"dial_fallback_delay,omitempty"` // The IP versions to resolve for. By default, both // "ipv4" and "ipv6" will be enabled, which // correspond to A and AAAA records respectively. Versions *IPVersions `json:"versions,omitempty"` resolver *net.Resolver logger *zap.Logger } // CaddyModule returns the Caddy module information. func (AUpstreams) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.upstreams.a", New: func() caddy.Module { return new(AUpstreams) }, } } func (au *AUpstreams) Provision(ctx caddy.Context) error { au.logger = ctx.Logger() if au.Refresh == 0 { au.Refresh = caddy.Duration(time.Minute) } if au.Port == "" { au.Port = "80" } if au.Resolver != nil { err := au.Resolver.ParseAddresses() if err != nil { return err } d := &net.Dialer{ Timeout: time.Duration(au.DialTimeout), FallbackDelay: time.Duration(au.FallbackDelay), } au.resolver = &net.Resolver{ PreferGo: true, Dial: func(ctx context.Context, _, _ string) (net.Conn, error) { //nolint:gosec addr := au.Resolver.netAddrs[weakrand.IntN(len(au.Resolver.netAddrs))] return d.DialContext(ctx, addr.Network, addr.JoinHostPort(0)) }, } } if au.resolver == nil { au.resolver = net.DefaultResolver } return nil } func (au AUpstreams) GetUpstreams(r *http.Request) ([]*Upstream, error) { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) // Map ipVersion early, so we can use it as part of the cache-key. // This should be fairly inexpensive and comes and the upside of // allowing the same dynamic upstream (name + port combination) // to be used multiple times with different ip versions. // // It also forced a cache-miss if a previously cached dynamic // upstream changes its ip version, e.g. after a config reload, // while keeping the cache-invalidation as simple as it currently is. ipVersion := resolveIpVersion(au.Versions) auStr := repl.ReplaceAll(au.String()+ipVersion, "") // first, use a cheap read-lock to return a cached result quickly aAaaaMu.RLock() cached := aAaaa[auStr] aAaaaMu.RUnlock() if cached.isFresh() { return allNew(cached.upstreams), nil } // otherwise, obtain a write-lock to update the cached value aAaaaMu.Lock() defer aAaaaMu.Unlock() // check to see if it's still stale, since we're now in a different // lock from when we first checked freshness; another goroutine might // have refreshed it in the meantime before we re-obtained our lock cached = aAaaa[auStr] if cached.isFresh() { return allNew(cached.upstreams), nil } name := repl.ReplaceAll(au.Name, "") port := repl.ReplaceAll(au.Port, "") if c := au.logger.Check(zapcore.DebugLevel, "refreshing A upstreams"); c != nil { c.Write( zap.String("version", ipVersion), zap.String("name", name), zap.String("port", port), ) } ips, err := au.resolver.LookupIP(r.Context(), ipVersion, name) if err != nil { return nil, err } upstreams := make([]Upstream, len(ips)) for i, ip := range ips { if c := au.logger.Check(zapcore.DebugLevel, "discovered A record"); c != nil { c.Write(zap.String("ip", ip.String())) } upstreams[i] = Upstream{ Dial: net.JoinHostPort(ip.String(), port), } } // before adding a new one to the cache (as opposed to replacing stale one), make room if cache is full if cached.freshness.IsZero() && len(aAaaa) >= 100 { for randomKey := range aAaaa { delete(aAaaa, randomKey) break } } aAaaa[auStr] = aLookup{ aUpstreams: au, freshness: time.Now(), upstreams: upstreams, } return allNew(upstreams), nil } func (au AUpstreams) String() string { return net.JoinHostPort(au.Name, au.Port) } type aLookup struct { aUpstreams AUpstreams freshness time.Time upstreams []Upstream } func (al aLookup) isFresh() bool { return time.Since(al.freshness) < time.Duration(al.aUpstreams.Refresh) } // MultiUpstreams is a single dynamic upstream source that // aggregates the results of multiple dynamic upstream sources. // All configured sources will be queried in order, with their // results appended to the end of the list. Errors returned // from individual sources will be logged and the next source // will continue to be invoked. // // This module makes it easy to implement redundant cluster // failovers, especially in conjunction with the `first` load // balancing policy: if the first source returns an error or // no upstreams, the second source's upstreams will be used // naturally. type MultiUpstreams struct { // The list of upstream source modules to get upstreams from. // They will be queried in order, with their results appended // in the order they are returned. SourcesRaw []json.RawMessage `json:"sources,omitempty" caddy:"namespace=http.reverse_proxy.upstreams inline_key=source"` sources []UpstreamSource logger *zap.Logger } // CaddyModule returns the Caddy module information. func (MultiUpstreams) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.reverse_proxy.upstreams.multi", New: func() caddy.Module { return new(MultiUpstreams) }, } } func (mu *MultiUpstreams) Provision(ctx caddy.Context) error { mu.logger = ctx.Logger() if mu.SourcesRaw != nil { mod, err := ctx.LoadModule(mu, "SourcesRaw") if err != nil { return fmt.Errorf("loading upstream source modules: %v", err) } for _, src := range mod.([]any) { mu.sources = append(mu.sources, src.(UpstreamSource)) } } return nil } func (mu MultiUpstreams) GetUpstreams(r *http.Request) ([]*Upstream, error) { var upstreams []*Upstream for i, src := range mu.sources { select { case <-r.Context().Done(): return upstreams, context.Canceled default: } up, err := src.GetUpstreams(r) if err != nil { if c := mu.logger.Check(zapcore.ErrorLevel, "upstream source returned error"); c != nil { c.Write( zap.Int("source_idx", i), zap.Error(err), ) } } else if len(up) == 0 { if c := mu.logger.Check(zapcore.WarnLevel, "upstream source returned 0 upstreams"); c != nil { c.Write(zap.Int("source_idx", i)) } } else { upstreams = append(upstreams, up...) } } return upstreams, nil } // UpstreamResolver holds the set of addresses of DNS resolvers of // upstream addresses type UpstreamResolver struct { // The addresses of DNS resolvers to use when looking up the addresses of proxy upstreams. // It accepts [network addresses](/docs/conventions#network-addresses) // with port range of only 1. If the host is an IP address, it will be dialed directly to resolve the upstream server. // If the host is not an IP address, the addresses are resolved using the [name resolution convention](https://golang.org/pkg/net/#hdr-Name_Resolution) of the Go standard library. // If the array contains more than 1 resolver address, one is chosen at random. Addresses []string `json:"addresses,omitempty"` netAddrs []caddy.NetworkAddress } // ParseAddresses parses all the configured network addresses // and ensures they're ready to be used. func (u *UpstreamResolver) ParseAddresses() error { for _, v := range u.Addresses { addr, err := caddy.ParseNetworkAddressWithDefaults(v, "udp", 53) if err != nil { return err } if addr.PortRangeSize() != 1 { return fmt.Errorf("resolver address must have exactly one address; cannot call %v", addr) } u.netAddrs = append(u.netAddrs, addr) } return nil } func allNew(upstreams []Upstream) []*Upstream { results := make([]*Upstream, len(upstreams)) for i := range upstreams { results[i] = &Upstream{Dial: upstreams[i].Dial} } return results } var ( srvs = make(map[string]srvLookup) srvsMu sync.RWMutex aAaaa = make(map[string]aLookup) aAaaaMu sync.RWMutex ) // Interface guards var ( _ caddy.Provisioner = (*SRVUpstreams)(nil) _ UpstreamSource = (*SRVUpstreams)(nil) _ caddy.Provisioner = (*AUpstreams)(nil) _ UpstreamSource = (*AUpstreams)(nil) ) ================================================ FILE: modules/caddyhttp/reverseproxy/upstreams_test.go ================================================ package reverseproxy import "testing" func TestResolveIpVersion(t *testing.T) { falseBool := false trueBool := true tests := []struct { Versions *IPVersions expectedIpVersion string }{ { Versions: &IPVersions{IPv4: &trueBool}, expectedIpVersion: "ip4", }, { Versions: &IPVersions{IPv4: &falseBool}, expectedIpVersion: "ip", }, { Versions: &IPVersions{IPv4: &trueBool, IPv6: &falseBool}, expectedIpVersion: "ip4", }, { Versions: &IPVersions{IPv6: &trueBool}, expectedIpVersion: "ip6", }, { Versions: &IPVersions{IPv6: &falseBool}, expectedIpVersion: "ip", }, { Versions: &IPVersions{IPv6: &trueBool, IPv4: &falseBool}, expectedIpVersion: "ip6", }, { Versions: &IPVersions{}, expectedIpVersion: "ip", }, { Versions: &IPVersions{IPv4: &trueBool, IPv6: &trueBool}, expectedIpVersion: "ip", }, { Versions: &IPVersions{IPv4: &falseBool, IPv6: &falseBool}, expectedIpVersion: "ip", }, } for _, test := range tests { ipVersion := resolveIpVersion(test.Versions) if ipVersion != test.expectedIpVersion { t.Errorf("resolveIpVersion(): Expected %s got %s", test.expectedIpVersion, ipVersion) } } } ================================================ FILE: modules/caddyhttp/rewrite/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rewrite import ( "encoding/json" "strconv" "strings" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { httpcaddyfile.RegisterDirective("rewrite", parseCaddyfileRewrite) httpcaddyfile.RegisterHandlerDirective("method", parseCaddyfileMethod) httpcaddyfile.RegisterHandlerDirective("uri", parseCaddyfileURI) httpcaddyfile.RegisterDirective("handle_path", parseCaddyfileHandlePath) } // parseCaddyfileRewrite sets up a basic rewrite handler from Caddyfile tokens. Syntax: // // rewrite [] // // Only URI components which are given in will be set in the resulting URI. // See the docs for the rewrite handler for more information. func parseCaddyfileRewrite(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) { h.Next() // consume directive name // count the tokens to determine what to do argsCount := h.CountRemainingArgs() if argsCount == 0 { return nil, h.Errf("too few arguments; must have at least a rewrite URI") } if argsCount > 2 { return nil, h.Errf("too many arguments; should only be a matcher and a URI") } // with only one arg, assume it's a rewrite URI with no matcher token if argsCount == 1 { if !h.NextArg() { return nil, h.ArgErr() } return h.NewRoute(nil, Rewrite{URI: h.Val()}), nil } // parse the matcher token into a matcher set userMatcherSet, err := h.ExtractMatcherSet() if err != nil { return nil, err } h.Next() // consume directive name again, matcher parsing does a reset h.Next() // advance to the rewrite URI return h.NewRoute(userMatcherSet, Rewrite{URI: h.Val()}), nil } // parseCaddyfileMethod sets up a basic method rewrite handler from Caddyfile tokens. Syntax: // // method [] func parseCaddyfileMethod(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { h.Next() // consume directive name if !h.NextArg() { return nil, h.ArgErr() } if h.NextArg() { return nil, h.ArgErr() } return Rewrite{Method: h.Val()}, nil } // parseCaddyfileURI sets up a handler for manipulating (but not "rewriting") the // URI from Caddyfile tokens. Syntax: // // uri [] strip_prefix|strip_suffix|replace|path_regexp [ []] // // If strip_prefix or strip_suffix are used, then will be stripped // only if it is the beginning or the end, respectively, of the URI path. If // replace is used, then will be replaced with across // the whole URI, up to times (or unlimited if unspecified). If // path_regexp is used, then regular expression replacements will be performed // on the path portion of the URI (and a limit cannot be set). func parseCaddyfileURI(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { h.Next() // consume directive name args := h.RemainingArgs() if len(args) < 1 { return nil, h.ArgErr() } var rewr Rewrite switch args[0] { case "strip_prefix": if len(args) != 2 { return nil, h.ArgErr() } rewr.StripPathPrefix = args[1] case "strip_suffix": if len(args) != 2 { return nil, h.ArgErr() } rewr.StripPathSuffix = args[1] case "replace": var find, replace, lim string switch len(args) { case 4: lim = args[3] fallthrough case 3: find = args[1] replace = args[2] default: return nil, h.ArgErr() } var limInt int if lim != "" { var err error limInt, err = strconv.Atoi(lim) if err != nil { return nil, h.Errf("limit must be an integer; invalid: %v", err) } } rewr.URISubstring = append(rewr.URISubstring, substrReplacer{ Find: find, Replace: replace, Limit: limInt, }) case "path_regexp": if len(args) != 3 { return nil, h.ArgErr() } find, replace := args[1], args[2] rewr.PathRegexp = append(rewr.PathRegexp, ®exReplacer{ Find: find, Replace: replace, }) case "query": if len(args) > 4 { return nil, h.ArgErr() } rewr.Query = &queryOps{} var hasArgs bool if len(args) > 1 { hasArgs = true err := applyQueryOps(h, rewr.Query, args[1:]) if err != nil { return nil, err } } for h.NextBlock(0) { if hasArgs { return nil, h.Err("Cannot specify uri query rewrites in both argument and block") } // nolint:prealloc queryArgs := []string{h.Val()} queryArgs = append(queryArgs, h.RemainingArgs()...) err := applyQueryOps(h, rewr.Query, queryArgs) if err != nil { return nil, err } } default: return nil, h.Errf("unrecognized URI manipulation '%s'", args[0]) } return rewr, nil } func applyQueryOps(h httpcaddyfile.Helper, qo *queryOps, args []string) error { key := args[0] switch { case strings.HasPrefix(key, "-"): if len(args) != 1 { return h.ArgErr() } qo.Delete = append(qo.Delete, strings.TrimLeft(key, "-")) case strings.HasPrefix(key, "+"): if len(args) != 2 { return h.ArgErr() } param := strings.TrimLeft(key, "+") qo.Add = append(qo.Add, queryOpsArguments{Key: param, Val: args[1]}) case strings.Contains(key, ">"): if len(args) != 1 { return h.ArgErr() } renameValKey := strings.Split(key, ">") qo.Rename = append(qo.Rename, queryOpsArguments{Key: renameValKey[0], Val: renameValKey[1]}) case len(args) == 3: qo.Replace = append(qo.Replace, &queryOpsReplacement{Key: key, SearchRegexp: args[1], Replace: args[2]}) default: if len(args) != 2 { return h.ArgErr() } qo.Set = append(qo.Set, queryOpsArguments{Key: key, Val: args[1]}) } return nil } // parseCaddyfileHandlePath parses the handle_path directive. Syntax: // // handle_path [] { // // } // // Only path matchers (with a `/` prefix) are supported as this is a shortcut // for the handle directive with a strip_prefix rewrite. func parseCaddyfileHandlePath(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) { h.Next() // consume directive name // there must be a path matcher if !h.NextArg() { return nil, h.ArgErr() } // read the prefix to strip path := h.Val() if !strings.HasPrefix(path, "/") { return nil, h.Errf("path matcher must begin with '/', got %s", path) } // we only want to strip what comes before the '/' if // the user specified it (e.g. /api/* should only strip /api) var stripPath string if strings.HasSuffix(path, "/*") { stripPath = path[:len(path)-2] } else if strings.HasSuffix(path, "*") { stripPath = path[:len(path)-1] } else { stripPath = path } // the ParseSegmentAsSubroute function expects the cursor // to be at the token just before the block opening, // so we need to rewind because we already read past it h.Reset() h.Next() // parse the block contents as a subroute handler handler, err := httpcaddyfile.ParseSegmentAsSubroute(h) if err != nil { return nil, err } subroute, ok := handler.(*caddyhttp.Subroute) if !ok { return nil, h.Errf("segment was not parsed as a subroute") } // make a matcher on the path and everything below it pathMatcher := caddy.ModuleMap{ "path": h.JSON(caddyhttp.MatchPath{path}), } // build a route with a rewrite handler to strip the path prefix route := caddyhttp.Route{ HandlersRaw: []json.RawMessage{ caddyconfig.JSONModuleObject(Rewrite{ StripPathPrefix: stripPath, }, "handler", "rewrite", nil), }, } // prepend the route to the subroute subroute.Routes = append([]caddyhttp.Route{route}, subroute.Routes...) // build and return a route from the subroute return h.NewRoute(pathMatcher, subroute), nil } ================================================ FILE: modules/caddyhttp/rewrite/rewrite.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rewrite import ( "fmt" "net/http" "net/url" "regexp" "strconv" "strings" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(Rewrite{}) } // Rewrite is a middleware which can rewrite/mutate HTTP requests. // // The Method and URI properties are "setters" (the request URI // will be overwritten with the given values). Other properties are // "modifiers" (they modify existing values in a differentiable // way). It is atypical to combine the use of setters and // modifiers in a single rewrite. // // To ensure consistent behavior, prefix and suffix stripping is // performed in the URL-decoded (unescaped, normalized) space by // default except for the specific bytes where an escape sequence // is used in the prefix or suffix pattern. // // For all modifiers, paths are cleaned before being modified so that // multiple, consecutive slashes are collapsed into a single slash, // and dot elements are resolved and removed. In the special case // of a prefix, suffix, or substring containing "//" (repeated slashes), // slashes will not be merged while cleaning the path so that // the rewrite can be interpreted literally. type Rewrite struct { // Changes the request's HTTP verb. Method string `json:"method,omitempty"` // Changes the request's URI, which consists of path and query string. // Only components of the URI that are specified will be changed. // For example, a value of "/foo.html" or "foo.html" will only change // the path and will preserve any existing query string. Similarly, a // value of "?a=b" will only change the query string and will not affect // the path. Both can also be changed: "/foo?a=b" - this sets both the // path and query string at the same time. // // You can also use placeholders. For example, to preserve the existing // query string, you might use: "?{http.request.uri.query}&a=b". Any // key-value pairs you add to the query string will not overwrite // existing values (individual pairs are append-only). // // To clear the query string, explicitly set an empty one: "?" URI string `json:"uri,omitempty"` // Strips the given prefix from the beginning of the URI path. // The prefix should be written in normalized (unescaped) form, // but if an escaping (`%xx`) is used, the path will be required // to have that same escape at that position in order to match. StripPathPrefix string `json:"strip_path_prefix,omitempty"` // Strips the given suffix from the end of the URI path. // The suffix should be written in normalized (unescaped) form, // but if an escaping (`%xx`) is used, the path will be required // to have that same escape at that position in order to match. StripPathSuffix string `json:"strip_path_suffix,omitempty"` // Performs substring replacements on the URI. URISubstring []substrReplacer `json:"uri_substring,omitempty"` // Performs regular expression replacements on the URI path. PathRegexp []*regexReplacer `json:"path_regexp,omitempty"` // Mutates the query string of the URI. Query *queryOps `json:"query,omitempty"` logger *zap.Logger } // CaddyModule returns the Caddy module information. func (Rewrite) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.rewrite", New: func() caddy.Module { return new(Rewrite) }, } } // Provision sets up rewr. func (rewr *Rewrite) Provision(ctx caddy.Context) error { rewr.logger = ctx.Logger() for i, rep := range rewr.PathRegexp { if rep.Find == "" { return fmt.Errorf("path_regexp find cannot be empty") } re, err := regexp.Compile(rep.Find) if err != nil { return fmt.Errorf("compiling regular expression %d: %v", i, err) } rep.re = re } if rewr.Query != nil { for _, replacementOp := range rewr.Query.Replace { err := replacementOp.Provision(ctx) if err != nil { return fmt.Errorf("compiling regular expression %s in query rewrite replace operation: %v", replacementOp.SearchRegexp, err) } } } return nil } func (rewr Rewrite) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) const message = "rewrote request" c := rewr.logger.Check(zap.DebugLevel, message) if c == nil { rewr.Rewrite(r, repl) return next.ServeHTTP(w, r) } changed := rewr.Rewrite(r, repl) if changed { c.Write( zap.Object("request", caddyhttp.LoggableHTTPRequest{Request: r}), zap.String("method", r.Method), zap.String("uri", r.RequestURI), ) } return next.ServeHTTP(w, r) } // rewrite performs the rewrites on r using repl, which should // have been obtained from r, but is passed in for efficiency. // It returns true if any changes were made to r. func (rewr Rewrite) Rewrite(r *http.Request, repl *caddy.Replacer) bool { oldMethod := r.Method oldURI := r.RequestURI // method if rewr.Method != "" { r.Method = strings.ToUpper(repl.ReplaceAll(rewr.Method, "")) } // uri (path, query string and... fragment, because why not) if uri := rewr.URI; uri != "" { // find the bounds of each part of the URI that exist pathStart, qsStart, fragStart := -1, -1, -1 pathEnd, qsEnd := -1, -1 loop: for i, ch := range uri { switch { case ch == '?' && qsStart < 0: pathEnd, qsStart = i, i+1 case ch == '#' && fragStart < 0: // everything after fragment is fragment (very clear in RFC 3986 section 4.2) if qsStart < 0 { pathEnd = i } else { qsEnd = i } fragStart = i + 1 break loop case pathStart < 0 && qsStart < 0: pathStart = i } } if pathStart >= 0 && pathEnd < 0 { pathEnd = len(uri) } if qsStart >= 0 && qsEnd < 0 { qsEnd = len(uri) } // isolate the three main components of the URI var path, query, frag string if pathStart > -1 { path = uri[pathStart:pathEnd] } if qsStart > -1 { query = uri[qsStart:qsEnd] } if fragStart > -1 { frag = uri[fragStart:] } // build components which are specified, and store them // in a temporary variable so that they all read the // same version of the URI var newPath, newQuery, newFrag string if path != "" { // replace the `path` placeholder to escaped path pathPlaceholder := "{http.request.uri.path}" if strings.Contains(path, pathPlaceholder) { path = strings.ReplaceAll(path, pathPlaceholder, r.URL.EscapedPath()) } newPath = repl.ReplaceAll(path, "") } // before continuing, we need to check if a query string // snuck into the path component during replacements if before, after, found := strings.Cut(newPath, "?"); found { // recompute; new path contains a query string var injectedQuery string newPath, injectedQuery = before, after // don't overwrite explicitly-configured query string if query == "" { query = injectedQuery } } if query != "" { newQuery = buildQueryString(query, repl) } if frag != "" { newFrag = repl.ReplaceAll(frag, "") } // update the URI with the new components // only after building them if pathStart >= 0 { if path, err := url.PathUnescape(newPath); err != nil { r.URL.Path = newPath } else { r.URL.Path = path } r.URL.RawPath = "" // force recomputing when EscapedPath() is called } if qsStart >= 0 { r.URL.RawQuery = newQuery } if fragStart >= 0 { r.URL.Fragment = newFrag } } // strip path prefix or suffix if rewr.StripPathPrefix != "" { prefix := repl.ReplaceAll(rewr.StripPathPrefix, "") if !strings.HasPrefix(prefix, "/") { prefix = "/" + prefix } mergeSlashes := !strings.Contains(prefix, "//") changePath(r, func(escapedPath string) string { escapedPath = caddyhttp.CleanPath(escapedPath, mergeSlashes) return trimPathPrefix(escapedPath, prefix) }) } if rewr.StripPathSuffix != "" { suffix := repl.ReplaceAll(rewr.StripPathSuffix, "") mergeSlashes := !strings.Contains(suffix, "//") changePath(r, func(escapedPath string) string { escapedPath = caddyhttp.CleanPath(escapedPath, mergeSlashes) return reverse(trimPathPrefix(reverse(escapedPath), reverse(suffix))) }) } // substring replacements in URI for _, rep := range rewr.URISubstring { rep.do(r, repl) } // regular expression replacements on the path for _, rep := range rewr.PathRegexp { rep.do(r, repl) } // apply query operations if rewr.Query != nil { rewr.Query.do(r, repl) } // update the encoded copy of the URI r.RequestURI = r.URL.RequestURI() // return true if anything changed return r.Method != oldMethod || r.RequestURI != oldURI } // buildQueryString takes an input query string and // performs replacements on each component, returning // the resulting query string. This function appends // duplicate keys rather than replaces. func buildQueryString(qs string, repl *caddy.Replacer) string { var sb strings.Builder // first component must be key, which is the same // as if we just wrote a value in previous iteration wroteVal := true for len(qs) > 0 { // determine the end of this component, which will be at // the next equal sign or ampersand, whichever comes first nextEq, nextAmp := strings.Index(qs, "="), strings.Index(qs, "&") ampIsNext := nextAmp >= 0 && (nextAmp < nextEq || nextEq < 0) end := len(qs) // assume no delimiter remains... if ampIsNext { end = nextAmp // ...unless ampersand is first... } else if nextEq >= 0 && (nextEq < nextAmp || nextAmp < 0) { end = nextEq // ...or unless equal is first. } // consume the component and write the result comp := qs[:end] comp, _ = repl.ReplaceFunc(comp, func(name string, val any) (any, error) { if name == "http.request.uri.query" && wroteVal { return val, nil // already escaped } var valStr string switch v := val.(type) { case string: valStr = v case fmt.Stringer: valStr = v.String() case int: valStr = strconv.Itoa(v) default: valStr = fmt.Sprintf("%+v", v) } return url.QueryEscape(valStr), nil }) if end < len(qs) { end++ // consume delimiter } qs = qs[end:] // if previous iteration wrote a value, // that means we are writing a key if wroteVal { if sb.Len() > 0 && len(comp) > 0 { sb.WriteRune('&') } } else { sb.WriteRune('=') } sb.WriteString(comp) // remember for the next iteration that we just wrote a value, // which means the next iteration MUST write a key wroteVal = ampIsNext } return sb.String() } // trimPathPrefix is like strings.TrimPrefix, but customized for advanced URI // path prefix matching. The string prefix will be trimmed from the beginning // of escapedPath if escapedPath starts with prefix. Rather than a naive 1:1 // comparison of each byte to determine if escapedPath starts with prefix, // both strings are iterated in lock-step, and if prefix has a '%' encoding // at a particular position, escapedPath must also have the same encoding // representation for that character. In other words, if the prefix string // uses the escaped form for a character, escapedPath must literally use the // same escape at that position. Otherwise, all character comparisons are // performed in normalized/unescaped space. func trimPathPrefix(escapedPath, prefix string) string { var iPath, iPrefix int for iPath < len(escapedPath) && iPrefix < len(prefix) { prefixCh := prefix[iPrefix] ch := string(escapedPath[iPath]) if ch == "%" && prefixCh != '%' && len(escapedPath) >= iPath+3 { var err error ch, err = url.PathUnescape(escapedPath[iPath : iPath+3]) if err != nil { // should be impossible unless EscapedPath() is returning invalid values! return escapedPath } iPath += 2 } // prefix comparisons are case-insensitive to consistency with // path matcher, which is case-insensitive for good reasons if !strings.EqualFold(ch, string(prefixCh)) { return escapedPath } iPath++ iPrefix++ } // if we iterated through the entire prefix, we found it, so trim it if iPath >= len(prefix) { return escapedPath[iPath:] } // otherwise we did not find the prefix return escapedPath } func reverse(s string) string { r := []rune(s) for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { r[i], r[j] = r[j], r[i] } return string(r) } // substrReplacer describes either a simple and fast substring replacement. type substrReplacer struct { // A substring to find. Supports placeholders. Find string `json:"find,omitempty"` // The substring to replace with. Supports placeholders. Replace string `json:"replace,omitempty"` // Maximum number of replacements per string. // Set to <= 0 for no limit (default). Limit int `json:"limit,omitempty"` } // do performs the substring replacement on r. func (rep substrReplacer) do(r *http.Request, repl *caddy.Replacer) { if rep.Find == "" { return } lim := rep.Limit if lim == 0 { lim = -1 } find := repl.ReplaceAll(rep.Find, "") replace := repl.ReplaceAll(rep.Replace, "") mergeSlashes := !strings.Contains(rep.Find, "//") changePath(r, func(pathOrRawPath string) string { return strings.Replace(caddyhttp.CleanPath(pathOrRawPath, mergeSlashes), find, replace, lim) }) r.URL.RawQuery = strings.Replace(r.URL.RawQuery, find, replace, lim) } // regexReplacer describes a replacement using a regular expression. type regexReplacer struct { // The regular expression to find. Find string `json:"find,omitempty"` // The substring to replace with. Supports placeholders and // regular expression capture groups. Replace string `json:"replace,omitempty"` re *regexp.Regexp } func (rep regexReplacer) do(r *http.Request, repl *caddy.Replacer) { if rep.Find == "" || rep.re == nil { return } replace := repl.ReplaceAll(rep.Replace, "") changePath(r, func(pathOrRawPath string) string { return rep.re.ReplaceAllString(pathOrRawPath, replace) }) } func changePath(req *http.Request, newVal func(pathOrRawPath string) string) { req.URL.RawPath = newVal(req.URL.EscapedPath()) if p, err := url.PathUnescape(req.URL.RawPath); err == nil && p != "" { req.URL.Path = p } else { req.URL.Path = newVal(req.URL.Path) } // RawPath is only set if it's different from the normalized Path (std lib) if req.URL.RawPath == req.URL.Path { req.URL.RawPath = "" } } // queryOps describes the operations to perform on query keys: add, set, rename and delete. type queryOps struct { // Renames a query key from Key to Val, without affecting the value. Rename []queryOpsArguments `json:"rename,omitempty"` // Sets query parameters; overwrites a query key with the given value. Set []queryOpsArguments `json:"set,omitempty"` // Adds query parameters; does not overwrite an existing query field, // and only appends an additional value for that key if any already exist. Add []queryOpsArguments `json:"add,omitempty"` // Replaces query parameters. Replace []*queryOpsReplacement `json:"replace,omitempty"` // Deletes a given query key by name. Delete []string `json:"delete,omitempty"` } // Provision compiles the query replace operation regex. func (replacement *queryOpsReplacement) Provision(_ caddy.Context) error { if replacement.SearchRegexp != "" { re, err := regexp.Compile(replacement.SearchRegexp) if err != nil { return fmt.Errorf("replacement for query field '%s': %v", replacement.Key, err) } replacement.re = re } return nil } func (q *queryOps) do(r *http.Request, repl *caddy.Replacer) { query := r.URL.Query() for _, renameParam := range q.Rename { key := repl.ReplaceAll(renameParam.Key, "") val := repl.ReplaceAll(renameParam.Val, "") if key == "" || val == "" { continue } query[val] = query[key] delete(query, key) } for _, setParam := range q.Set { key := repl.ReplaceAll(setParam.Key, "") if key == "" { continue } val := repl.ReplaceAll(setParam.Val, "") query[key] = []string{val} } for _, addParam := range q.Add { key := repl.ReplaceAll(addParam.Key, "") if key == "" { continue } val := repl.ReplaceAll(addParam.Val, "") query[key] = append(query[key], val) } for _, replaceParam := range q.Replace { key := repl.ReplaceAll(replaceParam.Key, "") search := repl.ReplaceKnown(replaceParam.Search, "") replace := repl.ReplaceKnown(replaceParam.Replace, "") // replace all query keys... if key == "*" { for fieldName, vals := range query { for i := range vals { if replaceParam.re != nil { query[fieldName][i] = replaceParam.re.ReplaceAllString(query[fieldName][i], replace) } else { query[fieldName][i] = strings.ReplaceAll(query[fieldName][i], search, replace) } } } continue } for fieldName, vals := range query { for i := range vals { if replaceParam.re != nil { query[fieldName][i] = replaceParam.re.ReplaceAllString(query[fieldName][i], replace) } else { query[fieldName][i] = strings.ReplaceAll(query[fieldName][i], search, replace) } } } } for _, deleteParam := range q.Delete { param := repl.ReplaceAll(deleteParam, "") if param == "" { continue } delete(query, param) } r.URL.RawQuery = query.Encode() } type queryOpsArguments struct { // A key in the query string. Note that query string keys may appear multiple times. Key string `json:"key,omitempty"` // The value for the given operation; for add and set, this is // simply the value of the query, and for rename this is the // query key to rename to. Val string `json:"val,omitempty"` } type queryOpsReplacement struct { // The key to replace in the query string. Key string `json:"key,omitempty"` // The substring to search for. Search string `json:"search,omitempty"` // The regular expression to search with. SearchRegexp string `json:"search_regexp,omitempty"` // The string with which to replace matches. Replace string `json:"replace,omitempty"` re *regexp.Regexp } // Interface guard var _ caddyhttp.MiddlewareHandler = (*Rewrite)(nil) ================================================ FILE: modules/caddyhttp/rewrite/rewrite_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rewrite import ( "net/http" "regexp" "testing" "github.com/caddyserver/caddy/v2" ) func TestRewrite(t *testing.T) { repl := caddy.NewReplacer() for i, tc := range []struct { input, expect *http.Request rule Rewrite }{ { input: newRequest(t, "GET", "/"), expect: newRequest(t, "GET", "/"), }, { rule: Rewrite{Method: "GET", URI: "/"}, input: newRequest(t, "GET", "/"), expect: newRequest(t, "GET", "/"), }, { rule: Rewrite{Method: "POST"}, input: newRequest(t, "GET", "/"), expect: newRequest(t, "POST", "/"), }, { rule: Rewrite{URI: "/foo"}, input: newRequest(t, "GET", "/"), expect: newRequest(t, "GET", "/foo"), }, { rule: Rewrite{URI: "/foo"}, input: newRequest(t, "GET", "/bar"), expect: newRequest(t, "GET", "/foo"), }, { rule: Rewrite{URI: "foo"}, input: newRequest(t, "GET", "/"), expect: newRequest(t, "GET", "foo"), }, { rule: Rewrite{URI: "{http.request.uri}"}, input: newRequest(t, "GET", "/bar%3Fbaz?c=d"), expect: newRequest(t, "GET", "/bar%3Fbaz?c=d"), }, { rule: Rewrite{URI: "{http.request.uri.path}"}, input: newRequest(t, "GET", "/bar%3Fbaz"), expect: newRequest(t, "GET", "/bar%3Fbaz"), }, { rule: Rewrite{URI: "/foo{http.request.uri.path}"}, input: newRequest(t, "GET", "/bar"), expect: newRequest(t, "GET", "/foo/bar"), }, { rule: Rewrite{URI: "/index.php?p={http.request.uri.path}"}, input: newRequest(t, "GET", "/foo/bar"), expect: newRequest(t, "GET", "/index.php?p=%2Ffoo%2Fbar"), }, { rule: Rewrite{URI: "?a=b&{http.request.uri.query}"}, input: newRequest(t, "GET", "/"), expect: newRequest(t, "GET", "/?a=b"), }, { rule: Rewrite{URI: "/?c=d"}, input: newRequest(t, "GET", "/"), expect: newRequest(t, "GET", "/?c=d"), }, { rule: Rewrite{URI: "/?c=d"}, input: newRequest(t, "GET", "/?a=b"), expect: newRequest(t, "GET", "/?c=d"), }, { rule: Rewrite{URI: "?c=d"}, input: newRequest(t, "GET", "/foo"), expect: newRequest(t, "GET", "/foo?c=d"), }, { rule: Rewrite{URI: "/?c=d"}, input: newRequest(t, "GET", "/foo"), expect: newRequest(t, "GET", "/?c=d"), }, { rule: Rewrite{URI: "/?{http.request.uri.query}&c=d"}, input: newRequest(t, "GET", "/"), expect: newRequest(t, "GET", "/?c=d"), }, { rule: Rewrite{URI: "/foo?{http.request.uri.query}&c=d"}, input: newRequest(t, "GET", "/"), expect: newRequest(t, "GET", "/foo?c=d"), }, { rule: Rewrite{URI: "?{http.request.uri.query}&c=d"}, input: newRequest(t, "GET", "/foo"), expect: newRequest(t, "GET", "/foo?c=d"), }, { rule: Rewrite{URI: "{http.request.uri.path}?{http.request.uri.query}&c=d"}, input: newRequest(t, "GET", "/foo"), expect: newRequest(t, "GET", "/foo?c=d"), }, { rule: Rewrite{URI: "{http.request.uri.path}?{http.request.uri.query}&c=d"}, input: newRequest(t, "GET", "/foo"), expect: newRequest(t, "GET", "/foo?c=d"), }, { rule: Rewrite{URI: "/index.php?{http.request.uri.query}&c=d"}, input: newRequest(t, "GET", "/foo"), expect: newRequest(t, "GET", "/index.php?c=d"), }, { rule: Rewrite{URI: "?a=b&c=d"}, input: newRequest(t, "GET", "/foo"), expect: newRequest(t, "GET", "/foo?a=b&c=d"), }, { rule: Rewrite{URI: "/index.php?{http.request.uri.query}&c=d"}, input: newRequest(t, "GET", "/?a=b"), expect: newRequest(t, "GET", "/index.php?a=b&c=d"), }, { rule: Rewrite{URI: "/index.php?c=d&{http.request.uri.query}"}, input: newRequest(t, "GET", "/?a=b"), expect: newRequest(t, "GET", "/index.php?c=d&a=b"), }, { rule: Rewrite{URI: "/index.php?{http.request.uri.query}&p={http.request.uri.path}"}, input: newRequest(t, "GET", "/foo/bar?a=b"), expect: newRequest(t, "GET", "/index.php?a=b&p=%2Ffoo%2Fbar"), }, { rule: Rewrite{URI: "{http.request.uri.path}?"}, input: newRequest(t, "GET", "/foo/bar?a=b&c=d"), expect: newRequest(t, "GET", "/foo/bar"), }, { rule: Rewrite{URI: "?qs={http.request.uri.query}"}, input: newRequest(t, "GET", "/foo?a=b&c=d"), expect: newRequest(t, "GET", "/foo?qs=a%3Db%26c%3Dd"), }, { rule: Rewrite{URI: "/foo?{http.request.uri.query}#frag"}, input: newRequest(t, "GET", "/foo/bar?a=b"), expect: newRequest(t, "GET", "/foo?a=b#frag"), }, { rule: Rewrite{URI: "/foo{http.request.uri}"}, input: newRequest(t, "GET", "/bar?a=b"), expect: newRequest(t, "GET", "/foo/bar?a=b"), }, { rule: Rewrite{URI: "/foo{http.request.uri}"}, input: newRequest(t, "GET", "/bar"), expect: newRequest(t, "GET", "/foo/bar"), }, { rule: Rewrite{URI: "/foo{http.request.uri}?c=d"}, input: newRequest(t, "GET", "/bar?a=b"), expect: newRequest(t, "GET", "/foo/bar?c=d"), }, { rule: Rewrite{URI: "/foo{http.request.uri}?{http.request.uri.query}&c=d"}, input: newRequest(t, "GET", "/bar?a=b"), expect: newRequest(t, "GET", "/foo/bar?a=b&c=d"), }, { rule: Rewrite{URI: "{http.request.uri}"}, input: newRequest(t, "GET", "/bar?a=b"), expect: newRequest(t, "GET", "/bar?a=b"), }, { rule: Rewrite{URI: "{http.request.uri.path}bar?c=d"}, input: newRequest(t, "GET", "/foo/?a=b"), expect: newRequest(t, "GET", "/foo/bar?c=d"), }, { rule: Rewrite{URI: "/i{http.request.uri}"}, input: newRequest(t, "GET", "/%C2%B7%E2%88%B5.png"), expect: newRequest(t, "GET", "/i/%C2%B7%E2%88%B5.png"), }, { rule: Rewrite{URI: "/i{http.request.uri}"}, input: newRequest(t, "GET", "/·∵.png?a=b"), expect: newRequest(t, "GET", "/i/%C2%B7%E2%88%B5.png?a=b"), }, { rule: Rewrite{URI: "/i{http.request.uri}"}, input: newRequest(t, "GET", "/%C2%B7%E2%88%B5.png?a=b"), expect: newRequest(t, "GET", "/i/%C2%B7%E2%88%B5.png?a=b"), }, { rule: Rewrite{URI: "/bar#?"}, input: newRequest(t, "GET", "/foo#fragFirst?c=d"), // not a valid query string (is part of fragment) expect: newRequest(t, "GET", "/bar#?"), // I think this is right? but who knows; std lib drops fragment when parsing }, { rule: Rewrite{URI: "/bar"}, input: newRequest(t, "GET", "/foo#fragFirst?c=d"), expect: newRequest(t, "GET", "/bar#fragFirst?c=d"), }, { rule: Rewrite{URI: "/api/admin/panel"}, input: newRequest(t, "GET", "/api/admin%2Fpanel"), expect: newRequest(t, "GET", "/api/admin/panel"), }, { rule: Rewrite{StripPathPrefix: "/prefix"}, input: newRequest(t, "GET", "/foo/bar"), expect: newRequest(t, "GET", "/foo/bar"), }, { rule: Rewrite{StripPathPrefix: "/prefix"}, input: newRequest(t, "GET", "/prefix/foo/bar"), expect: newRequest(t, "GET", "/foo/bar"), }, { rule: Rewrite{StripPathPrefix: "prefix"}, input: newRequest(t, "GET", "/prefix/foo/bar"), expect: newRequest(t, "GET", "/foo/bar"), }, { rule: Rewrite{StripPathPrefix: "/prefix"}, input: newRequest(t, "GET", "/prefix"), expect: newRequest(t, "GET", ""), }, { rule: Rewrite{StripPathPrefix: "/prefix"}, input: newRequest(t, "GET", "/"), expect: newRequest(t, "GET", "/"), }, { rule: Rewrite{StripPathPrefix: "/prefix"}, input: newRequest(t, "GET", "/prefix/foo%2Fbar"), expect: newRequest(t, "GET", "/foo%2Fbar"), }, { rule: Rewrite{StripPathPrefix: "/prefix"}, input: newRequest(t, "GET", "/foo/prefix/bar"), expect: newRequest(t, "GET", "/foo/prefix/bar"), }, { rule: Rewrite{StripPathPrefix: "//prefix"}, // scheme and host needed for URL parser to succeed in setting up test input: newRequest(t, "GET", "http://host//prefix/foo/bar"), expect: newRequest(t, "GET", "http://host/foo/bar"), }, { rule: Rewrite{StripPathPrefix: "//prefix"}, input: newRequest(t, "GET", "/prefix/foo/bar"), expect: newRequest(t, "GET", "/prefix/foo/bar"), }, { rule: Rewrite{StripPathPrefix: "/a%2Fb/c"}, input: newRequest(t, "GET", "/a%2Fb/c/d"), expect: newRequest(t, "GET", "/d"), }, { rule: Rewrite{StripPathPrefix: "/a%2Fb/c"}, input: newRequest(t, "GET", "/a%2fb/c/d"), expect: newRequest(t, "GET", "/d"), }, { rule: Rewrite{StripPathPrefix: "/a/b/c"}, input: newRequest(t, "GET", "/a%2Fb/c/d"), expect: newRequest(t, "GET", "/d"), }, { rule: Rewrite{StripPathPrefix: "/a%2Fb/c"}, input: newRequest(t, "GET", "/a/b/c/d"), expect: newRequest(t, "GET", "/a/b/c/d"), }, { rule: Rewrite{StripPathPrefix: "//a%2Fb/c"}, input: newRequest(t, "GET", "/a/b/c/d"), expect: newRequest(t, "GET", "/a/b/c/d"), }, { rule: Rewrite{StripPathSuffix: "/suffix"}, input: newRequest(t, "GET", "/foo/bar"), expect: newRequest(t, "GET", "/foo/bar"), }, { rule: Rewrite{StripPathSuffix: "suffix"}, input: newRequest(t, "GET", "/foo/bar/suffix"), expect: newRequest(t, "GET", "/foo/bar/"), }, { rule: Rewrite{StripPathSuffix: "suffix"}, input: newRequest(t, "GET", "/foo%2Fbar/suffix"), expect: newRequest(t, "GET", "/foo%2Fbar/"), }, { rule: Rewrite{StripPathSuffix: "%2fsuffix"}, input: newRequest(t, "GET", "/foo%2Fbar%2fsuffix"), expect: newRequest(t, "GET", "/foo%2Fbar"), }, { rule: Rewrite{StripPathSuffix: "/suffix"}, input: newRequest(t, "GET", "/foo/suffix/bar"), expect: newRequest(t, "GET", "/foo/suffix/bar"), }, { rule: Rewrite{URISubstring: []substrReplacer{{Find: "findme", Replace: "replaced"}}}, input: newRequest(t, "GET", "/foo/bar"), expect: newRequest(t, "GET", "/foo/bar"), }, { rule: Rewrite{URISubstring: []substrReplacer{{Find: "findme", Replace: "replaced"}}}, input: newRequest(t, "GET", "/foo/findme/bar"), expect: newRequest(t, "GET", "/foo/replaced/bar"), }, { rule: Rewrite{URISubstring: []substrReplacer{{Find: "findme", Replace: "replaced"}}}, input: newRequest(t, "GET", "/foo/findme%2Fbar"), expect: newRequest(t, "GET", "/foo/replaced%2Fbar"), }, { rule: Rewrite{PathRegexp: []*regexReplacer{{Find: "/{2,}", Replace: "/"}}}, input: newRequest(t, "GET", "/foo//bar///baz?a=b//c"), expect: newRequest(t, "GET", "/foo/bar/baz?a=b//c"), }, } { // copy the original input just enough so that we can // compare it after the rewrite to see if it changed urlCopy := *tc.input.URL originalInput := &http.Request{ Method: tc.input.Method, RequestURI: tc.input.RequestURI, URL: &urlCopy, } // populate the replacer just enough for our tests repl.Set("http.request.uri", tc.input.RequestURI) repl.Set("http.request.uri.path", tc.input.URL.Path) repl.Set("http.request.uri.query", tc.input.URL.RawQuery) // we can't directly call Provision() without a valid caddy.Context // (TODO: fix that) so here we ad-hoc compile the regex for _, rep := range tc.rule.PathRegexp { re, err := regexp.Compile(rep.Find) if err != nil { t.Fatal(err) } rep.re = re } changed := tc.rule.Rewrite(tc.input, repl) if expected, actual := !reqEqual(originalInput, tc.input), changed; expected != actual { t.Errorf("Test %d: Expected changed=%t but was %t", i, expected, actual) } if expected, actual := tc.expect.Method, tc.input.Method; expected != actual { t.Errorf("Test %d: Expected Method='%s' but got '%s'", i, expected, actual) } if expected, actual := tc.expect.RequestURI, tc.input.RequestURI; expected != actual { t.Errorf("Test %d: Expected RequestURI='%s' but got '%s'", i, expected, actual) } if expected, actual := tc.expect.URL.String(), tc.input.URL.String(); expected != actual { t.Errorf("Test %d: Expected URL='%s' but got '%s'", i, expected, actual) } if expected, actual := tc.expect.URL.RequestURI(), tc.input.URL.RequestURI(); expected != actual { t.Errorf("Test %d: Expected URL.RequestURI()='%s' but got '%s'", i, expected, actual) } if expected, actual := tc.expect.URL.Fragment, tc.input.URL.Fragment; expected != actual { t.Errorf("Test %d: Expected URL.Fragment='%s' but got '%s'", i, expected, actual) } } } func newRequest(t *testing.T, method, uri string) *http.Request { req, err := http.NewRequest(method, uri, nil) if err != nil { t.Fatalf("error creating request: %v", err) } req.RequestURI = req.URL.RequestURI() // simulate incoming request return req } // reqEqual if r1 and r2 are equal enough for our purposes. func reqEqual(r1, r2 *http.Request) bool { if r1.Method != r2.Method { return false } if r1.RequestURI != r2.RequestURI { return false } if (r1.URL == nil && r2.URL != nil) || (r1.URL != nil && r2.URL == nil) { return false } if r1.URL == nil && r2.URL == nil { return true } return r1.URL.Scheme == r2.URL.Scheme && r1.URL.Host == r2.URL.Host && r1.URL.Path == r2.URL.Path && r1.URL.RawPath == r2.URL.RawPath && r1.URL.RawQuery == r2.URL.RawQuery && r1.URL.Fragment == r2.URL.Fragment } ================================================ FILE: modules/caddyhttp/routes.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "encoding/json" "fmt" "net/http" "strings" "github.com/caddyserver/caddy/v2" ) // Route consists of a set of rules for matching HTTP requests, // a list of handlers to execute, and optional flow control // parameters which customize the handling of HTTP requests // in a highly flexible and performant manner. type Route struct { // Group is an optional name for a group to which this // route belongs. Grouping a route makes it mutually // exclusive with others in its group; if a route belongs // to a group, only the first matching route in that group // will be executed. Group string `json:"group,omitempty"` // The matcher sets which will be used to qualify this // route for a request (essentially the "if" statement // of this route). Each matcher set is OR'ed, but matchers // within a set are AND'ed together. MatcherSetsRaw RawMatcherSets `json:"match,omitempty" caddy:"namespace=http.matchers"` // The list of handlers for this route. Upon matching a request, they are chained // together in a middleware fashion: requests flow from the first handler to the last // (top of the list to the bottom), with the possibility that any handler could stop // the chain and/or return an error. Responses flow back through the chain (bottom of // the list to the top) as they are written out to the client. // // Not all handlers call the next handler in the chain. For example, the reverse_proxy // handler always sends a request upstream or returns an error. Thus, configuring // handlers after reverse_proxy in the same route is illogical, since they would never // be executed. You will want to put handlers which originate the response at the very // end of your route(s). The documentation for a module should state whether it invokes // the next handler, but sometimes it is common sense. // // Some handlers manipulate the response. Remember that requests flow down the list, and // responses flow up the list. // // For example, if you wanted to use both `templates` and `encode` handlers, you would // need to put `templates` after `encode` in your route, because responses flow up. // Thus, `templates` will be able to parse and execute the plain-text response as a // template, and then return it up to the `encode` handler which will then compress it // into a binary format. // // If `templates` came before `encode`, then `encode` would write a compressed, // binary-encoded response to `templates` which would not be able to parse the response // properly. // // The correct order, then, is this: // // [ // {"handler": "encode"}, // {"handler": "templates"}, // {"handler": "file_server"} // ] // // The request flows ⬇️ DOWN (`encode` -> `templates` -> `file_server`). // // 1. First, `encode` will choose how to `encode` the response and wrap the response. // 2. Then, `templates` will wrap the response with a buffer. // 3. Finally, `file_server` will originate the content from a file. // // The response flows ⬆️ UP (`file_server` -> `templates` -> `encode`): // // 1. First, `file_server` will write the file to the response. // 2. That write will be buffered and then executed by `templates`. // 3. Lastly, the write from `templates` will flow into `encode` which will compress the stream. // // If you think of routes in this way, it will be easy and even fun to solve the puzzle of writing correct routes. HandlersRaw []json.RawMessage `json:"handle,omitempty" caddy:"namespace=http.handlers inline_key=handler"` // If true, no more routes will be executed after this one. Terminal bool `json:"terminal,omitempty"` // decoded values MatcherSets MatcherSets `json:"-"` Handlers []MiddlewareHandler `json:"-"` middleware []Middleware metrics *Metrics metricsCtx caddy.Context handlerName string } // Empty returns true if the route has all zero/default values. func (r Route) Empty() bool { return len(r.MatcherSetsRaw) == 0 && len(r.MatcherSets) == 0 && len(r.HandlersRaw) == 0 && len(r.Handlers) == 0 && !r.Terminal && r.Group == "" } func (r Route) String() string { var handlersRaw strings.Builder handlersRaw.WriteByte('[') for _, hr := range r.HandlersRaw { handlersRaw.WriteByte(' ') handlersRaw.WriteString(string(hr)) } handlersRaw.WriteByte(']') return fmt.Sprintf(`{Group:"%s" MatcherSetsRaw:%s HandlersRaw:%s Terminal:%t}`, r.Group, r.MatcherSetsRaw, handlersRaw.String(), r.Terminal) } // Provision sets up both the matchers and handlers in the route. func (r *Route) Provision(ctx caddy.Context, metrics *Metrics) error { err := r.ProvisionMatchers(ctx) if err != nil { return err } return r.ProvisionHandlers(ctx, metrics) } // ProvisionMatchers sets up all the matchers by loading the // matcher modules. Only call this method directly if you need // to set up matchers and handlers separately without having // to provision a second time; otherwise use Provision instead. func (r *Route) ProvisionMatchers(ctx caddy.Context) error { // matchers matchersIface, err := ctx.LoadModule(r, "MatcherSetsRaw") if err != nil { return fmt.Errorf("loading matcher modules: %v", err) } err = r.MatcherSets.FromInterface(matchersIface) if err != nil { return err } return nil } // ProvisionHandlers sets up all the handlers by loading the // handler modules. Only call this method directly if you need // to set up matchers and handlers separately without having // to provision a second time; otherwise use Provision instead. func (r *Route) ProvisionHandlers(ctx caddy.Context, metrics *Metrics) error { handlersIface, err := ctx.LoadModule(r, "HandlersRaw") if err != nil { return fmt.Errorf("loading handler modules: %v", err) } for _, handler := range handlersIface.([]any) { r.Handlers = append(r.Handlers, handler.(MiddlewareHandler)) } // Store metrics info for route-level instrumentation (applied once // per route in wrapRoute, instead of per-handler which was redundant). r.metrics = metrics r.metricsCtx = ctx if len(r.Handlers) > 0 { r.handlerName = caddy.GetModuleName(r.Handlers[0]) } // Make ProvisionHandlers idempotent by clearing the middleware field r.middleware = []Middleware{} // pre-compile the middleware handler chain for _, midhandler := range r.Handlers { r.middleware = append(r.middleware, wrapMiddleware(ctx, midhandler)) } return nil } // Compile prepares a middleware chain from the route list. // This should only be done once during the request, just // before the middleware chain is executed. func (r Route) Compile(next Handler) Handler { return wrapRoute(r)(next) } // RouteList is a list of server routes that can // create a middleware chain. type RouteList []Route // Provision sets up both the matchers and handlers in the routes. func (routes RouteList) Provision(ctx caddy.Context) error { err := routes.ProvisionMatchers(ctx) if err != nil { return err } return routes.ProvisionHandlers(ctx, nil) } // ProvisionMatchers sets up all the matchers by loading the // matcher modules. Only call this method directly if you need // to set up matchers and handlers separately without having // to provision a second time; otherwise use Provision instead. func (routes RouteList) ProvisionMatchers(ctx caddy.Context) error { for i := range routes { err := routes[i].ProvisionMatchers(ctx) if err != nil { return fmt.Errorf("route %d: %v", i, err) } } return nil } // ProvisionHandlers sets up all the handlers by loading the // handler modules. Only call this method directly if you need // to set up matchers and handlers separately without having // to provision a second time; otherwise use Provision instead. func (routes RouteList) ProvisionHandlers(ctx caddy.Context, metrics *Metrics) error { for i := range routes { err := routes[i].ProvisionHandlers(ctx, metrics) if err != nil { return fmt.Errorf("route %d: %v", i, err) } } return nil } // Compile prepares a middleware chain from the route list. // This should only be done either once during provisioning // for top-level routes, or on each request just before the // middleware chain is executed for subroutes. func (routes RouteList) Compile(next Handler) Handler { mid := make([]Middleware, 0, len(routes)) for _, route := range routes { mid = append(mid, wrapRoute(route)) } stack := next for i := len(mid) - 1; i >= 0; i-- { stack = mid[i](stack) } return stack } // wrapRoute wraps route with a middleware and handler so that it can // be chained in and defer evaluation of its matchers to request-time. // Like wrapMiddleware, it is vital that this wrapping takes place in // its own stack frame so as to not overwrite the reference to the // intended route by looping and changing the reference each time. func wrapRoute(route Route) Middleware { return func(next Handler) Handler { return HandlerFunc(func(rw http.ResponseWriter, req *http.Request) error { // TODO: Update this comment, it seems we've moved the copy into the handler? // copy the next handler (it's an interface, so it's just // a very lightweight copy of a pointer); this is important // because this is a closure to the func below, which // re-assigns the value as it compiles the middleware stack; // if we don't make this copy, we'd affect the underlying // pointer for all future request (yikes); we could // alternatively solve this by moving the func below out of // this closure and into a standalone package-level func, // but I just thought this made more sense nextCopy := next // route must match at least one of the matcher sets matches, err := route.MatcherSets.AnyMatchWithError(req) if err != nil { // allow matchers the opportunity to short circuit // the request and trigger the error handling chain return err } if !matches { // call the next handler, and skip this one, // since the matcher didn't match return nextCopy.ServeHTTP(rw, req) } // if route is part of a group, ensure only the // first matching route in the group is applied if route.Group != "" { groups := req.Context().Value(routeGroupCtxKey).(map[string]struct{}) if _, ok := groups[route.Group]; ok { // this group has already been // satisfied by a matching route return nextCopy.ServeHTTP(rw, req) } // this matching route satisfies the group groups[route.Group] = struct{}{} } // make terminal routes terminate if route.Terminal { if _, ok := req.Context().Value(ErrorCtxKey).(error); ok { nextCopy = errorEmptyHandler } else { nextCopy = emptyHandler } } // compile this route's handler stack for i := len(route.middleware) - 1; i >= 0; i-- { nextCopy = route.middleware[i](nextCopy) } // Apply metrics instrumentation once for the entire route, // rather than wrapping each individual handler. This avoids // redundant metrics collection that caused significant CPU // overhead (see issue #4644). if route.metrics != nil { nextCopy = newMetricsInstrumentedRoute( route.metricsCtx, route.handlerName, nextCopy, route.metrics, ) } return nextCopy.ServeHTTP(rw, req) }) } } // wrapMiddleware wraps mh such that it can be correctly // appended to a list of middleware in preparation for // compiling into a handler chain. func wrapMiddleware(ctx caddy.Context, mh MiddlewareHandler) Middleware { return func(next Handler) Handler { return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { // EXPERIMENTAL: Trace each module that gets invoked if server, ok := r.Context().Value(ServerCtxKey).(*Server); ok && server != nil { server.logTrace(mh) } return mh.ServeHTTP(w, r, next) }) } } // MatcherSet is a set of matchers which // must all match in order for the request // to be matched successfully. type MatcherSet []any // Match returns true if the request matches all // matchers in mset or if there are no matchers. func (mset MatcherSet) Match(r *http.Request) bool { for _, m := range mset { if me, ok := m.(RequestMatcherWithError); ok { match, _ := me.MatchWithError(r) if !match { return false } continue } if me, ok := m.(RequestMatcher); ok { if !me.Match(r) { return false } continue } return false } return true } // MatchWithError returns true if r matches m. func (mset MatcherSet) MatchWithError(r *http.Request) (bool, error) { for _, m := range mset { if me, ok := m.(RequestMatcherWithError); ok { match, err := me.MatchWithError(r) if err != nil || !match { return match, err } continue } if me, ok := m.(RequestMatcher); ok { if !me.Match(r) { // for backwards compatibility err, ok := GetVar(r.Context(), MatcherErrorVarKey).(error) if ok { // clear out the error from context since we've consumed it SetVar(r.Context(), MatcherErrorVarKey, nil) return false, err } return false, nil } continue } return false, fmt.Errorf("matcher is not a RequestMatcher or RequestMatcherWithError: %#v", m) } return true, nil } // RawMatcherSets is a group of matcher sets // in their raw, JSON form. type RawMatcherSets []caddy.ModuleMap // MatcherSets is a group of matcher sets capable // of checking whether a request matches any of // the sets. type MatcherSets []MatcherSet // AnyMatch returns true if req matches any of the // matcher sets in ms or if there are no matchers, // in which case the request always matches. // // Deprecated: Use AnyMatchWithError instead. func (ms MatcherSets) AnyMatch(req *http.Request) bool { for _, m := range ms { match, err := m.MatchWithError(req) if err != nil { SetVar(req.Context(), MatcherErrorVarKey, err) return false } if match { return match } } return len(ms) == 0 } // AnyMatchWithError returns true if req matches any of the // matcher sets in ms or if there are no matchers, in which // case the request always matches. If any matcher returns // an error, we cut short and return the error. func (ms MatcherSets) AnyMatchWithError(req *http.Request) (bool, error) { for _, m := range ms { match, err := m.MatchWithError(req) if err != nil || match { return match, err } } return len(ms) == 0, nil } // FromInterface fills ms from an 'any' value obtained from LoadModule. func (ms *MatcherSets) FromInterface(matcherSets any) error { for _, matcherSetIfaces := range matcherSets.([]map[string]any) { var matcherSet MatcherSet for _, matcher := range matcherSetIfaces { if m, ok := matcher.(RequestMatcherWithError); ok { matcherSet = append(matcherSet, m) continue } if m, ok := matcher.(RequestMatcher); ok { matcherSet = append(matcherSet, m) continue } return fmt.Errorf("decoded module is not a RequestMatcher or RequestMatcherWithError: %#v", matcher) } *ms = append(*ms, matcherSet) } return nil } // TODO: Is this used? func (ms MatcherSets) String() string { var result strings.Builder result.WriteByte('[') for _, matcherSet := range ms { for _, matcher := range matcherSet { fmt.Fprintf(&result, " %#v", matcher) } } result.WriteByte(']') return result.String() } var routeGroupCtxKey = caddy.CtxKey("route_group") ================================================ FILE: modules/caddyhttp/server.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "context" "crypto/tls" "encoding/json" "errors" "fmt" "io" "net" "net/http" "net/netip" "net/url" "runtime" "slices" "strings" "sync" "time" "github.com/caddyserver/certmagic" "github.com/quic-go/quic-go" "github.com/quic-go/quic-go/http3" h3qlog "github.com/quic-go/quic-go/http3/qlog" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyevents" "github.com/caddyserver/caddy/v2/modules/caddytls" ) // Server describes an HTTP server. type Server struct { // Socket addresses to which to bind listeners. Accepts // [network addresses](/docs/conventions#network-addresses) // that may include port ranges. Listener addresses must // be unique; they cannot be repeated across all defined // servers. Listen []string `json:"listen,omitempty"` // A list of listener wrapper modules, which can modify the behavior // of the base listener. They are applied in the given order. ListenerWrappersRaw []json.RawMessage `json:"listener_wrappers,omitempty" caddy:"namespace=caddy.listeners inline_key=wrapper"` // A list of packet conn wrapper modules, which can modify the behavior // of the base packet conn. They are applied in the given order. PacketConnWrappersRaw []json.RawMessage `json:"packet_conn_wrappers,omitempty" caddy:"namespace=caddy.packetconns inline_key=wrapper"` // How long to allow a read from a client's upload. Setting this // to a short, non-zero value can mitigate slowloris attacks, but // may also affect legitimately slow clients. ReadTimeout caddy.Duration `json:"read_timeout,omitempty"` // ReadHeaderTimeout is like ReadTimeout but for request headers. // Default is 1 minute. ReadHeaderTimeout caddy.Duration `json:"read_header_timeout,omitempty"` // WriteTimeout is how long to allow a write to a client. Note // that setting this to a small value when serving large files // may negatively affect legitimately slow clients. WriteTimeout caddy.Duration `json:"write_timeout,omitempty"` // IdleTimeout is the maximum time to wait for the next request // when keep-alives are enabled. If zero, a default timeout of // 5m is applied to help avoid resource exhaustion. IdleTimeout caddy.Duration `json:"idle_timeout,omitempty"` // KeepAliveInterval is the interval at which TCP keepalive packets // are sent to keep the connection alive at the TCP layer when no other // data is being transmitted. // If zero, the default is 15s. // If negative, keepalive packets are not sent and other keepalive parameters // are ignored. KeepAliveInterval caddy.Duration `json:"keepalive_interval,omitempty"` // KeepAliveIdle is the time that the connection must be idle before // the first TCP keep-alive probe is sent when no other data is being // transmitted. // If zero, the default is 15s. // If negative, underlying socket value is unchanged. KeepAliveIdle caddy.Duration `json:"keepalive_idle,omitempty"` // KeepAliveCount is the maximum number of TCP keep-alive probes that // should be sent before dropping a connection. // If zero, the default is 9. // If negative, underlying socket value is unchanged. KeepAliveCount int `json:"keepalive_count,omitempty"` // MaxHeaderBytes is the maximum size to parse from a client's // HTTP request headers. MaxHeaderBytes int `json:"max_header_bytes,omitempty"` // Enable full-duplex communication for HTTP/1 requests. // Only has an effect if Caddy was built with Go 1.21 or later. // // For HTTP/1 requests, the Go HTTP server by default consumes any // unread portion of the request body before beginning to write the // response, preventing handlers from concurrently reading from the // request and writing the response. Enabling this option disables // this behavior and permits handlers to continue to read from the // request while concurrently writing the response. // // For HTTP/2 requests, the Go HTTP server always permits concurrent // reads and responses, so this option has no effect. // // Test thoroughly with your HTTP clients, as some older clients may // not support full-duplex HTTP/1 which can cause them to deadlock. // See https://github.com/golang/go/issues/57786 for more info. // // TODO: This is an EXPERIMENTAL feature. Subject to change or removal. EnableFullDuplex bool `json:"enable_full_duplex,omitempty"` // Routes describes how this server will handle requests. // Routes are executed sequentially. First a route's matchers // are evaluated, then its grouping. If it matches and has // not been mutually-excluded by its grouping, then its // handlers are executed sequentially. The sequence of invoked // handlers comprises a compiled middleware chain that flows // from each matching route and its handlers to the next. // // By default, all unrouted requests receive a 200 OK response // to indicate the server is working. Routes RouteList `json:"routes,omitempty"` // Errors is how this server will handle errors returned from any // of the handlers in the primary routes. If the primary handler // chain returns an error, the error along with its recommended // status code are bubbled back up to the HTTP server which // executes a separate error route, specified using this property. // The error routes work exactly like the normal routes. Errors *HTTPErrorConfig `json:"errors,omitempty"` // NamedRoutes describes a mapping of reusable routes that can be // invoked by their name. This can be used to optimize memory usage // when the same route is needed for many subroutes, by having // the handlers and matchers be only provisioned once, but used from // many places. These routes are not executed unless they are invoked // from another route. // // EXPERIMENTAL: Subject to change or removal. NamedRoutes map[string]*Route `json:"named_routes,omitempty"` // How to handle TLS connections. At least one policy is // required to enable HTTPS on this server if automatic // HTTPS is disabled or does not apply. TLSConnPolicies caddytls.ConnectionPolicies `json:"tls_connection_policies,omitempty"` // AutoHTTPS configures or disables automatic HTTPS within this server. // HTTPS is enabled automatically and by default when qualifying names // are present in a Host matcher and/or when the server is listening // only on the HTTPS port. AutoHTTPS *AutoHTTPSConfig `json:"automatic_https,omitempty"` // If true, will require that a request's Host header match // the value of the ServerName sent by the client's TLS // ClientHello; often a necessary safeguard when using TLS // client authentication. StrictSNIHost *bool `json:"strict_sni_host,omitempty"` // A module which provides a source of IP ranges, from which // requests should be trusted. By default, no proxies are // trusted. // // On its own, this configuration will not do anything, // but it can be used as a default set of ranges for // handlers or matchers in routes to pick up, instead // of needing to configure each of them. See the // `reverse_proxy` handler for example, which uses this // to trust sensitive incoming `X-Forwarded-*` headers. TrustedProxiesRaw json.RawMessage `json:"trusted_proxies,omitempty" caddy:"namespace=http.ip_sources inline_key=source"` // The headers from which the client IP address could be // read from. These will be considered in order, with the // first good value being used as the client IP. // By default, only `X-Forwarded-For` is considered. // // This depends on `trusted_proxies` being configured and // the request being validated as coming from a trusted // proxy, otherwise the client IP will be set to the direct // remote IP address. ClientIPHeaders []string `json:"client_ip_headers,omitempty"` // If greater than zero, enables strict ClientIPHeaders // (default X-Forwarded-For) parsing. If enabled, the // ClientIPHeaders will be parsed from right to left, and // the first value that is both valid and doesn't match the // trusted proxy list will be used as client IP. If zero, // the ClientIPHeaders will be parsed from left to right, // and the first value that is a valid IP address will be // used as client IP. // // This depends on `trusted_proxies` being configured. // This option is disabled by default. TrustedProxiesStrict int `json:"trusted_proxies_strict,omitempty"` // If greater than zero, enables trusting socket connections // (e.g. Unix domain sockets) as coming from a trusted // proxy. // // This option is disabled by default. TrustedProxiesUnix bool `json:"trusted_proxies_unix,omitempty"` // Enables access logging and configures how access logs are handled // in this server. To minimally enable access logs, simply set this // to a non-null, empty struct. Logs *ServerLogConfig `json:"logs,omitempty"` // Protocols specifies which HTTP protocols to enable. // Supported values are: // // - `h1` (HTTP/1.1) // - `h2` (HTTP/2) // - `h2c` (cleartext HTTP/2) // - `h3` (HTTP/3) // // If enabling `h2` or `h2c`, `h1` must also be enabled; // this is due to current limitations in the Go standard // library. // // HTTP/2 operates only over TLS (HTTPS). HTTP/3 opens // a UDP socket to serve QUIC connections. // // H2C operates over plain TCP if the client supports it; // however, because this is not implemented by the Go // standard library, other server options are not compatible // and will not be applied to H2C requests. Do not enable this // only to achieve maximum client compatibility. In practice, // very few clients implement H2C, and even fewer require it. // Enabling H2C can be useful for serving/proxying gRPC // if encryption is not possible or desired. // // We recommend for most users to simply let Caddy use the // default settings. // // Default: `[h1 h2 h3]` Protocols []string `json:"protocols,omitempty"` // ListenProtocols overrides Protocols for each parallel address in Listen. // A nil value or element indicates that Protocols will be used instead. ListenProtocols [][]string `json:"listen_protocols,omitempty"` // If set, overrides whether QUIC listeners allow 0-RTT (early data). // If nil, the default behavior is used (currently allowed). // // One reason to disable 0-RTT is if a remote IP matcher is used, // which introduces a dependency on the remote address being verified // if routing happens before the TLS handshake completes. An HTTP 425 // response is written in that case, but some clients misbehave and // don't perform a retry, so disabling 0-RTT can smooth it out. Allow0RTT *bool `json:"allow_0rtt,omitempty"` // If set, metrics observations will be enabled. // This setting is EXPERIMENTAL and subject to change. // DEPRECATED: Use the app-level `metrics` field. Metrics *Metrics `json:"metrics,omitempty"` name string primaryHandlerChain Handler errorHandlerChain Handler listenerWrappers []caddy.ListenerWrapper packetConnWrappers []caddy.PacketConnWrapper listeners []net.Listener quicListeners []http3.QUICListener // http3 now leave the quic.Listener management to us tlsApp *caddytls.TLS events *caddyevents.App logger *zap.Logger accessLogger *zap.Logger errorLogger *zap.Logger traceLogger *zap.Logger ctx caddy.Context server *http.Server h3server *http3.Server addresses []caddy.NetworkAddress trustedProxies IPRangeSource shutdownAt time.Time shutdownAtMu *sync.RWMutex // registered callback functions connStateFuncs []func(net.Conn, http.ConnState) connContextFuncs []func(ctx context.Context, c net.Conn) context.Context onShutdownFuncs []func() onStopFuncs []func(context.Context) error // TODO: Experimental (Nov. 2023) } var ( ServerHeader = "Caddy" serverHeader = []string{ServerHeader} ) // ServeHTTP is the entry point for all HTTP requests. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { start := time.Now() // If there are listener wrappers that process tls connections but don't return a *tls.Conn, this field will be nil. if r.TLS == nil { if tlsConnStateFunc, ok := r.Context().Value(tlsConnectionStateFuncCtxKey).(func() *tls.ConnectionState); ok { r.TLS = tlsConnStateFunc() } } // enable full-duplex for HTTP/1, ensuring the entire // request body gets consumed before writing the response if s.EnableFullDuplex && r.ProtoMajor == 1 { if err := http.NewResponseController(w).EnableFullDuplex(); err != nil { //nolint:bodyclose if c := s.logger.Check(zapcore.WarnLevel, "failed to enable full duplex"); c != nil { c.Write(zap.Error(err)) } } } // set the Server header h := w.Header() h["Server"] = serverHeader // advertise HTTP/3, if enabled if s.h3server != nil && r.ProtoMajor < 3 { if err := s.h3server.SetQUICHeaders(h); err != nil { if c := s.logger.Check(zapcore.ErrorLevel, "setting HTTP/3 Alt-Svc header"); c != nil { c.Write(zap.Error(err)) } } } // prepare internals of the request for the handler pipeline repl := caddy.NewReplacer() r = PrepareRequest(r, repl, w, s) // clone the request for logging purposes before it enters any handler chain; // this is necessary to capture the original request in case it gets modified // during handling (cloning the request and using .WithLazy is considerably // faster than using .With, which will JSON-encode the request immediately) shouldLogCredentials := s.Logs != nil && s.Logs.ShouldLogCredentials loggableReq := zap.Object("request", LoggableHTTPRequest{ Request: r.Clone(r.Context()), ShouldLogCredentials: shouldLogCredentials, }) errLog := s.errorLogger.WithLazy(loggableReq) var duration time.Duration if s.shouldLogRequest(r) { wrec := NewResponseRecorder(w, nil, nil) w = wrec // wrap the request body in a LengthReader // so we can track the number of bytes read from it var bodyReader *lengthReader if r.Body != nil { bodyReader = &lengthReader{Source: r.Body} r.Body = bodyReader // should always be true, private interface can only be referenced in the same package if setReadSizer, ok := wrec.(interface{ setReadSize(*int) }); ok { setReadSizer.setReadSize(&bodyReader.Length) } } // capture the original version of the request accLog := s.accessLogger.WithLazy(loggableReq) defer s.logRequest(accLog, r, wrec, &duration, repl, bodyReader, shouldLogCredentials) } // guarantee ACME HTTP challenges; handle them separately from any user-defined handlers if s.tlsApp.HandleHTTPChallenge(w, r) { duration = time.Since(start) return } err := s.serveHTTP(w, r) duration = time.Since(start) if err == nil { return } // restore original request before invoking error handler chain (issue #3717) // NOTE: this does not restore original headers if modified (for efficiency) origReq, ok := r.Context().Value(OriginalRequestCtxKey).(http.Request) if ok { r.Method = origReq.Method r.RemoteAddr = origReq.RemoteAddr r.RequestURI = origReq.RequestURI cloneURL(origReq.URL, r.URL) } // prepare the error log errLog = errLog.With(zap.Duration("duration", duration)) errLoggers := []*zap.Logger{errLog} if s.Logs != nil { errLoggers = s.Logs.wrapLogger(errLog, r) } // get the values that will be used to log the error errStatus, errMsg, errFields := errLogValues(err) // add HTTP error information to request context r = s.Errors.WithError(r, err) var fields []zapcore.Field if s.Errors != nil && len(s.Errors.Routes) > 0 { // execute user-defined error handling route if err2 := s.errorHandlerChain.ServeHTTP(w, r); err2 == nil { // user's error route handled the error response successfully, so now just log the error for _, logger := range errLoggers { if c := logger.Check(zapcore.DebugLevel, errMsg); c != nil { if fields == nil { fields = errFields() } c.Write(fields...) } } } else { // well... this is awkward for _, logger := range errLoggers { if c := logger.Check(zapcore.ErrorLevel, "error handling handler error"); c != nil { if fields == nil { fields = errFields() fields = append([]zapcore.Field{ zap.String("error", err2.Error()), zap.Namespace("first_error"), zap.String("msg", errMsg), }, fields...) } c.Write(fields...) } } if handlerErr, ok := err.(HandlerError); ok { w.WriteHeader(handlerErr.StatusCode) } else { w.WriteHeader(http.StatusInternalServerError) } } } else { logLevel := zapcore.DebugLevel if errStatus >= 500 { logLevel = zapcore.ErrorLevel } for _, logger := range errLoggers { if c := logger.Check(logLevel, errMsg); c != nil { if fields == nil { fields = errFields() } c.Write(fields...) } } w.WriteHeader(errStatus) } } func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) error { // reject very long methods; probably a mistake or an attack if len(r.Method) > 32 { if s.shouldLogRequest(r) { if c := s.accessLogger.Check(zapcore.DebugLevel, "rejecting request with long method"); c != nil { c.Write( zap.String("method_trunc", r.Method[:32]), zap.String("remote_addr", r.RemoteAddr), ) } } return HandlerError{StatusCode: http.StatusMethodNotAllowed} } // RFC 9112 section 3.2: "A server MUST respond with a 400 (Bad Request) status // code to any HTTP/1.1 request message that lacks a Host header field and to any // request message that contains more than one Host header field line or a Host // header field with an invalid field value." if r.ProtoMajor == 1 && r.ProtoMinor == 1 && r.Host == "" { return HandlerError{ Err: errors.New("rfc9112 forbids empty Host"), StatusCode: http.StatusBadRequest, } } // execute the primary handler chain return s.primaryHandlerChain.ServeHTTP(w, r) } // wrapPrimaryRoute wraps stack (a compiled middleware handler chain) // in s.enforcementHandler which performs crucial security checks, etc. func (s *Server) wrapPrimaryRoute(stack Handler) Handler { return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { return s.enforcementHandler(w, r, stack) }) } // enforcementHandler is an implicit middleware which performs // standard checks before executing the HTTP middleware chain. func (s *Server) enforcementHandler(w http.ResponseWriter, r *http.Request, next Handler) error { // enforce strict host matching, which ensures that the SNI // value (if any), matches the Host header; essential for // servers that rely on TLS ClientAuth sharing a listener // with servers that do not; if not enforced, client could // bypass by sending benign SNI then restricted Host header if s.StrictSNIHost != nil && *s.StrictSNIHost && r.TLS != nil { hostname, _, err := net.SplitHostPort(r.Host) if err != nil { hostname = r.Host // OK; probably lacked port } if !strings.EqualFold(r.TLS.ServerName, hostname) { err := fmt.Errorf("strict host matching: TLS ServerName (%s) and HTTP Host (%s) values differ", r.TLS.ServerName, hostname) r.Close = true return Error(http.StatusMisdirectedRequest, err) } } return next.ServeHTTP(w, r) } // listenersUseAnyPortOtherThan returns true if there are any // listeners in s that use a port which is not otherPort. func (s *Server) listenersUseAnyPortOtherThan(otherPort int) bool { for _, lnAddr := range s.Listen { laddrs, err := caddy.ParseNetworkAddress(lnAddr) if err != nil { continue } if uint(otherPort) > laddrs.EndPort || uint(otherPort) < laddrs.StartPort { return true } } return false } // hasListenerAddress returns true if s has a listener // at the given address fullAddr. Currently, fullAddr // must represent exactly one socket address (port // ranges are not supported) func (s *Server) hasListenerAddress(fullAddr string) bool { laddrs, err := caddy.ParseNetworkAddress(fullAddr) if err != nil { return false } if laddrs.PortRangeSize() != 1 { return false // TODO: support port ranges } for _, lnAddr := range s.Listen { thisAddrs, err := caddy.ParseNetworkAddress(lnAddr) if err != nil { continue } if thisAddrs.Network != laddrs.Network { continue } // Apparently, Linux requires all bound ports to be distinct // *regardless of host interface* even if the addresses are // in fact different; binding "192.168.0.1:9000" and then // ":9000" will fail for ":9000" because "address is already // in use" even though it's not, and the same bindings work // fine on macOS. I also found on Linux that listening on // "[::]:9000" would fail with a similar error, except with // the address "0.0.0.0:9000", as if deliberately ignoring // that I specified the IPv6 interface explicitly. This seems // to be a major bug in the Linux network stack and I don't // know why it hasn't been fixed yet, so for now we have to // special-case ourselves around Linux like a doting parent. // The second issue seems very similar to a discussion here: // https://github.com/nodejs/node/issues/9390 // // However, binding to *different specific* interfaces // (e.g. 127.0.0.2:80 and 127.0.0.3:80) IS allowed on Linux. // The conflict only happens when mixing specific IPs with // wildcards (0.0.0.0 or ::). // Hosts match exactly (e.g. 127.0.0.2 == 127.0.0.2) -> Conflict. hostMatch := thisAddrs.Host == laddrs.Host // On Linux, specific IP vs Wildcard fails to bind. // So if we are on Linux AND either host is empty (wildcard), we treat // it as a match (conflict). But if both are specific and different // (127.0.0.2 vs 127.0.0.3), this remains false (no conflict). linuxWildcardConflict := runtime.GOOS == "linux" && (thisAddrs.Host == "" || laddrs.Host == "") if (hostMatch || linuxWildcardConflict) && (laddrs.StartPort <= thisAddrs.EndPort) && (laddrs.StartPort >= thisAddrs.StartPort) { return true } } return false } func (s *Server) hasTLSClientAuth() bool { return slices.ContainsFunc(s.TLSConnPolicies, func(cp *caddytls.ConnectionPolicy) bool { return cp.ClientAuthentication != nil && cp.ClientAuthentication.Active() }) } // findLastRouteWithHostMatcher returns the index of the last route // in the server which has a host matcher. Used during Automatic HTTPS // to determine where to insert the HTTP->HTTPS redirect route, such // that it is after any other host matcher but before any "catch-all" // route without a host matcher. func (s *Server) findLastRouteWithHostMatcher() int { foundHostMatcher := false lastIndex := len(s.Routes) for i, route := range s.Routes { // since we want to break out of an inner loop, use a closure // to allow us to use 'return' when we found a host matcher found := (func() bool { for _, sets := range route.MatcherSets { for _, matcher := range sets { switch matcher.(type) { case *MatchHost: foundHostMatcher = true return true } } } return false })() // if we found the host matcher, change the lastIndex to // just after the current route if found { lastIndex = i + 1 } } // If we didn't actually find a host matcher, return 0 // because that means every defined route was a "catch-all". // See https://caddy.community/t/how-to-set-priority-in-caddyfile/13002/8 if !foundHostMatcher { return 0 } return lastIndex } // serveHTTP3 creates a QUIC listener, configures an HTTP/3 server if // not already done, and then uses that server to serve HTTP/3 over // the listener, with Server s as the handler. func (s *Server) serveHTTP3(addr caddy.NetworkAddress, tlsCfg *tls.Config) error { h3net, err := getHTTP3Network(addr.Network) if err != nil { return fmt.Errorf("starting HTTP/3 QUIC listener: %v", err) } addr.Network = h3net h3ln, err := addr.ListenQUIC(s.ctx, 0, net.ListenConfig{}, tlsCfg, s.packetConnWrappers, s.Allow0RTT) if err != nil { return fmt.Errorf("starting HTTP/3 QUIC listener: %v", err) } // create HTTP/3 server if not done already if s.h3server == nil { s.h3server = &http3.Server{ Handler: s, TLSConfig: tlsCfg, MaxHeaderBytes: s.MaxHeaderBytes, QUICConfig: &quic.Config{ Versions: []quic.Version{quic.Version1, quic.Version2}, Tracer: h3qlog.DefaultConnectionTracer, }, IdleTimeout: time.Duration(s.IdleTimeout), } } s.quicListeners = append(s.quicListeners, h3ln) //nolint:errcheck go s.h3server.ServeListener(h3ln) return nil } // configureServer applies/binds the registered callback functions to the server. func (s *Server) configureServer(server *http.Server) { for _, f := range s.connStateFuncs { if server.ConnState != nil { baseConnStateFunc := server.ConnState server.ConnState = func(conn net.Conn, state http.ConnState) { baseConnStateFunc(conn, state) f(conn, state) } } else { server.ConnState = f } } for _, f := range s.connContextFuncs { if server.ConnContext != nil { baseConnContextFunc := server.ConnContext server.ConnContext = func(ctx context.Context, c net.Conn) context.Context { return f(baseConnContextFunc(ctx, c), c) } } else { server.ConnContext = f } } for _, f := range s.onShutdownFuncs { server.RegisterOnShutdown(f) } } // RegisterConnState registers f to be invoked on s.ConnState. func (s *Server) RegisterConnState(f func(net.Conn, http.ConnState)) { s.connStateFuncs = append(s.connStateFuncs, f) } // RegisterConnContext registers f to be invoked as part of s.ConnContext. func (s *Server) RegisterConnContext(f func(ctx context.Context, c net.Conn) context.Context) { s.connContextFuncs = append(s.connContextFuncs, f) } // RegisterOnShutdown registers f to be invoked when the server begins to shut down. func (s *Server) RegisterOnShutdown(f func()) { s.onShutdownFuncs = append(s.onShutdownFuncs, f) } // RegisterOnStop registers f to be invoked after the server has shut down completely. // // EXPERIMENTAL: Subject to change or removal. func (s *Server) RegisterOnStop(f func(context.Context) error) { s.onStopFuncs = append(s.onStopFuncs, f) } // HTTPErrorConfig determines how to handle errors // from the HTTP handlers. type HTTPErrorConfig struct { // The routes to evaluate after the primary handler // chain returns an error. In an error route, extra // placeholders are available: // // Placeholder | Description // ------------|--------------- // `{http.error.status_code}` | The recommended HTTP status code // `{http.error.status_text}` | The status text associated with the recommended status code // `{http.error.message}` | The error message // `{http.error.trace}` | The origin of the error // `{http.error.id}` | An identifier for this occurrence of the error Routes RouteList `json:"routes,omitempty"` } // WithError makes a shallow copy of r to add the error to its // context, and sets placeholders on the request's replacer // related to err. It returns the modified request which has // the error information in its context and replacer. It // overwrites any existing error values that are stored. func (*HTTPErrorConfig) WithError(r *http.Request, err error) *http.Request { // add the raw error value to the request context // so it can be accessed by error handlers c := context.WithValue(r.Context(), ErrorCtxKey, err) r = r.WithContext(c) // add error values to the replacer repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) repl.Set("http.error", err) if handlerErr, ok := err.(HandlerError); ok { repl.Set("http.error.status_code", handlerErr.StatusCode) repl.Set("http.error.status_text", http.StatusText(handlerErr.StatusCode)) repl.Set("http.error.id", handlerErr.ID) repl.Set("http.error.trace", handlerErr.Trace) if handlerErr.Err != nil { repl.Set("http.error.message", handlerErr.Err.Error()) } else { repl.Set("http.error.message", http.StatusText(handlerErr.StatusCode)) } } return r } // shouldLogRequest returns true if this request should be logged. func (s *Server) shouldLogRequest(r *http.Request) bool { if s.accessLogger == nil || s.Logs == nil { // logging is disabled return false } // strip off the port if any, logger names are host only hostWithoutPort, _, err := net.SplitHostPort(r.Host) if err != nil { hostWithoutPort = r.Host } for loggerName := range s.Logs.LoggerNames { if certmagic.MatchWildcard(hostWithoutPort, loggerName) { // this host is mapped to a particular logger name return true } } for _, dh := range s.Logs.SkipHosts { // logging for this particular host is disabled if certmagic.MatchWildcard(hostWithoutPort, dh) { return false } } // if configured, this host is not mapped and thus must not be logged return !s.Logs.SkipUnmappedHosts } // logTrace will log that this middleware handler is being invoked. // It emits at DEBUG level. func (s *Server) logTrace(mh MiddlewareHandler) { if s.Logs == nil || !s.Logs.Trace { return } if c := s.traceLogger.Check(zapcore.DebugLevel, caddy.GetModuleName(mh)); c != nil { c.Write(zap.Any("module", mh)) } } // logRequest logs the request to access logs, unless skipped. func (s *Server) logRequest( accLog *zap.Logger, r *http.Request, wrec ResponseRecorder, duration *time.Duration, repl *caddy.Replacer, bodyReader *lengthReader, shouldLogCredentials bool, ) { ctx := r.Context() // this request may be flagged as omitted from the logs if skip, ok := GetVar(ctx, LogSkipVar).(bool); ok && skip { return } status := wrec.Status() size := wrec.Size() repl.Set("http.response.status", status) // will be 0 if no response is written by us (Go will write 200 to client) repl.Set("http.response.size", size) repl.Set("http.response.duration", duration) repl.Set("http.response.duration_ms", duration.Seconds()*1e3) // multiply seconds to preserve decimal (see #4666) loggers := []*zap.Logger{accLog} if s.Logs != nil { loggers = s.Logs.wrapLogger(accLog, r) } message := "handled request" if nop, ok := GetVar(ctx, "unhandled").(bool); ok && nop { message = "NOP" } logLevel := zapcore.InfoLevel if status >= 500 { logLevel = zapcore.ErrorLevel } var fields []zapcore.Field for _, logger := range loggers { c := logger.Check(logLevel, message) if c == nil { continue } if fields == nil { userID, _ := repl.GetString("http.auth.user.id") reqBodyLength := 0 if bodyReader != nil { reqBodyLength = bodyReader.Length } extra := ctx.Value(ExtraLogFieldsCtxKey).(*ExtraLogFields) fieldCount := 6 fields = make([]zapcore.Field, 0, fieldCount+len(extra.fields)) fields = append(fields, zap.Int("bytes_read", reqBodyLength), zap.String("user_id", userID), zap.Duration("duration", *duration), zap.Int("size", size), zap.Int("status", status), zap.Object("resp_headers", LoggableHTTPHeader{ Header: wrec.Header(), ShouldLogCredentials: shouldLogCredentials, }), ) fields = append(fields, extra.fields...) } c.Write(fields...) } } // protocol returns true if the protocol proto is configured/enabled. func (s *Server) protocol(proto string) bool { if s.ListenProtocols == nil { if slices.Contains(s.Protocols, proto) { return true } } else { for _, lnProtocols := range s.ListenProtocols { for _, lnProtocol := range lnProtocols { if lnProtocol == "" && slices.Contains(s.Protocols, proto) || lnProtocol == proto { return true } } } } return false } // Listeners returns the server's listeners. These are active listeners, // so calling Accept() or Close() on them will probably break things. // They are made available here for read-only purposes (e.g. Addr()) // and for type-asserting for purposes where you know what you're doing. // // EXPERIMENTAL: Subject to change or removal. func (s *Server) Listeners() []net.Listener { return s.listeners } // Name returns the server's name. func (s *Server) Name() string { return s.name } // PrepareRequest fills the request r for use in a Caddy HTTP handler chain. w and s can // be nil, but the handlers will lose response placeholders and access to the server. func PrepareRequest(r *http.Request, repl *caddy.Replacer, w http.ResponseWriter, s *Server) *http.Request { // set up the context for the request ctx := context.WithValue(r.Context(), caddy.ReplacerCtxKey, repl) ctx = context.WithValue(ctx, ServerCtxKey, s) trusted, clientIP := determineTrustedProxy(r, s) ctx = context.WithValue(ctx, VarsCtxKey, map[string]any{ TrustedProxyVarKey: trusted, ClientIPVarKey: clientIP, }) ctx = context.WithValue(ctx, routeGroupCtxKey, make(map[string]struct{})) var url2 url.URL // avoid letting this escape to the heap ctx = context.WithValue(ctx, OriginalRequestCtxKey, originalRequest(r, &url2)) ctx = context.WithValue(ctx, ExtraLogFieldsCtxKey, new(ExtraLogFields)) r = r.WithContext(ctx) // once the pointer to the request won't change // anymore, finish setting up the replacer addHTTPVarsToReplacer(repl, r, w) return r } // originalRequest returns a partial, shallow copy of // req, including: req.Method, deep copy of req.URL // (into the urlCopy parameter, which should be on the // stack), req.RequestURI, and req.RemoteAddr. Notably, // headers are not copied. This function is designed to // be very fast and efficient, and useful primarily for // read-only/logging purposes. func originalRequest(req *http.Request, urlCopy *url.URL) http.Request { cloneURL(req.URL, urlCopy) return http.Request{ Method: req.Method, RemoteAddr: req.RemoteAddr, RequestURI: req.RequestURI, URL: urlCopy, } } // determineTrustedProxy parses the remote IP address of // the request, and determines (if the server configured it) // if the client is a trusted proxy. If trusted, also returns // the real client IP if possible. func determineTrustedProxy(r *http.Request, s *Server) (bool, string) { // If there's no server, then we can't check anything if s == nil { return false, "" } if s.TrustedProxiesUnix && r.RemoteAddr == "@" { if s.TrustedProxiesStrict > 0 { ipRanges := []netip.Prefix{} if s.trustedProxies != nil { ipRanges = s.trustedProxies.GetIPRanges(r) } return true, strictUntrustedClientIp(r, s.ClientIPHeaders, ipRanges, "@") } else { return true, trustedRealClientIP(r, s.ClientIPHeaders, "@") } } // Parse the remote IP, ignore the error as non-fatal, // but the remote IP is required to continue, so we // just return early. This should probably never happen // though, unless some other module manipulated the request's // remote address and used an invalid value. clientIP, _, err := net.SplitHostPort(r.RemoteAddr) if err != nil { return false, "" } // Client IP may contain a zone if IPv6, so we need // to pull that out before parsing the IP clientIP, _, _ = strings.Cut(clientIP, "%") ipAddr, err := netip.ParseAddr(clientIP) if err != nil { return false, "" } // Check if the client is a trusted proxy if s.trustedProxies == nil { return false, ipAddr.String() } if isTrustedClientIP(ipAddr, s.trustedProxies.GetIPRanges(r)) { if s.TrustedProxiesStrict > 0 { return true, strictUntrustedClientIp(r, s.ClientIPHeaders, s.trustedProxies.GetIPRanges(r), ipAddr.String()) } return true, trustedRealClientIP(r, s.ClientIPHeaders, ipAddr.String()) } return false, ipAddr.String() } // isTrustedClientIP returns true if the given IP address is // in the list of trusted IP ranges. func isTrustedClientIP(ipAddr netip.Addr, trusted []netip.Prefix) bool { return slices.ContainsFunc(trusted, func(prefix netip.Prefix) bool { return prefix.Contains(ipAddr) }) } // trustedRealClientIP finds the client IP from the request assuming it is // from a trusted client. If there is no client IP headers, then the // direct remote address is returned. If there are client IP headers, // then the first value from those headers is used. func trustedRealClientIP(r *http.Request, headers []string, clientIP string) string { // Read all the values of the configured client IP headers, in order // nolint:prealloc var values []string for _, field := range headers { values = append(values, r.Header.Values(field)...) } // If we don't have any values, then give up if len(values) == 0 { return clientIP } // Since there can be many header values, we need to // join them together before splitting to get the full list allValues := strings.SplitSeq(strings.Join(values, ","), ",") // Get first valid left-most IP address for part := range allValues { // Some proxies may retain the port number, so split if possible host, _, err := net.SplitHostPort(part) if err != nil { host = part } // Remove any zone identifier from the IP address host, _, _ = strings.Cut(strings.TrimSpace(host), "%") // Parse the IP address ipAddr, err := netip.ParseAddr(host) if err != nil { continue } return ipAddr.String() } // We didn't find a valid IP return clientIP } // strictUntrustedClientIp iterates through the list of client IP headers, // parses them from right-to-left, and returns the first valid IP address // that is untrusted. If no valid IP address is found, then the direct // remote address is returned. func strictUntrustedClientIp(r *http.Request, headers []string, trusted []netip.Prefix, clientIP string) string { for _, headerName := range headers { parts := strings.Split(strings.Join(r.Header.Values(headerName), ","), ",") for i := len(parts) - 1; i >= 0; i-- { // Some proxies may retain the port number, so split if possible host, _, err := net.SplitHostPort(parts[i]) if err != nil { host = parts[i] } // Remove any zone identifier from the IP address host, _, _ = strings.Cut(strings.TrimSpace(host), "%") // Parse the IP address ipAddr, err := netip.ParseAddr(host) if err != nil { continue } if !isTrustedClientIP(ipAddr, trusted) { return ipAddr.String() } } } return clientIP } // cloneURL makes a copy of r.URL and returns a // new value that doesn't reference the original. func cloneURL(from, to *url.URL) { *to = *from if from.User != nil { userInfo := new(url.Userinfo) *userInfo = *from.User to.User = userInfo } } // lengthReader is an io.ReadCloser that keeps track of the // number of bytes read from the request body. type lengthReader struct { Source io.ReadCloser Length int } func (r *lengthReader) Read(b []byte) (int, error) { n, err := r.Source.Read(b) r.Length += n return n, err } func (r *lengthReader) Close() error { return r.Source.Close() } // Context keys for HTTP request context values. const ( // For referencing the server instance ServerCtxKey caddy.CtxKey = "server" // For the request's variable table VarsCtxKey caddy.CtxKey = "vars" // For a partial copy of the unmodified request that // originally came into the server's entry handler OriginalRequestCtxKey caddy.CtxKey = "original_request" // DEPRECATED: not used anymore. // To refer to the underlying connection, implement a middleware plugin // that RegisterConnContext during provisioning. ConnCtxKey caddy.CtxKey = "conn" // used to get the tls connection state in the context, if available tlsConnectionStateFuncCtxKey caddy.CtxKey = "tls_connection_state_func" // For tracking whether the client is a trusted proxy TrustedProxyVarKey string = "trusted_proxy" // For tracking the real client IP (affected by trusted_proxy) ClientIPVarKey string = "client_ip" ) var networkTypesHTTP3 = map[string]string{ "unixgram": "unixgram", "udp": "udp", "udp4": "udp4", "udp6": "udp6", "tcp": "udp", "tcp4": "udp4", "tcp6": "udp6", "fdgram": "fdgram", } // RegisterNetworkHTTP3 registers a mapping from non-HTTP/3 network to HTTP/3 // network. This should be called during init() and will panic if the network // type is standard, reserved, or already registered. // // EXPERIMENTAL: Subject to change. func RegisterNetworkHTTP3(originalNetwork, h3Network string) { if _, ok := networkTypesHTTP3[strings.ToLower(originalNetwork)]; ok { panic("network type " + originalNetwork + " is already registered") } networkTypesHTTP3[originalNetwork] = h3Network } func getHTTP3Network(originalNetwork string) (string, error) { h3Network, ok := networkTypesHTTP3[strings.ToLower(originalNetwork)] if !ok { return "", fmt.Errorf("network '%s' cannot handle HTTP/3 connections", originalNetwork) } return h3Network, nil } ================================================ FILE: modules/caddyhttp/server_test.go ================================================ package caddyhttp import ( "bytes" "context" "io" "net/http" "net/http/httptest" "net/netip" "testing" "time" "github.com/stretchr/testify/assert" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) type writeFunc func(p []byte) (int, error) type nopSyncer writeFunc func (n nopSyncer) Write(p []byte) (int, error) { return n(p) } func (n nopSyncer) Sync() error { return nil } // testLogger returns a logger and a buffer to which the logger writes. The // buffer can be read for asserting log output. func testLogger(wf writeFunc) *zap.Logger { ws := nopSyncer(wf) encoderCfg := zapcore.EncoderConfig{ MessageKey: "msg", LevelKey: "level", NameKey: "logger", EncodeLevel: zapcore.LowercaseLevelEncoder, EncodeTime: zapcore.ISO8601TimeEncoder, EncodeDuration: zapcore.StringDurationEncoder, } core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), ws, zap.DebugLevel) return zap.New(core) } func TestServer_LogRequest(t *testing.T) { s := &Server{} ctx := context.Background() ctx = context.WithValue(ctx, ExtraLogFieldsCtxKey, new(ExtraLogFields)) req := httptest.NewRequest(http.MethodGet, "/", nil).WithContext(ctx) rec := httptest.NewRecorder() wrec := NewResponseRecorder(rec, nil, nil) duration := 50 * time.Millisecond repl := NewTestReplacer(req) bodyReader := &lengthReader{Source: req.Body} shouldLogCredentials := false buf := bytes.Buffer{} accLog := testLogger(buf.Write) s.logRequest(accLog, req, wrec, &duration, repl, bodyReader, shouldLogCredentials) assert.JSONEq(t, `{ "msg":"handled request", "level":"info", "bytes_read":0, "duration":"50ms", "resp_headers": {}, "size":0, "status":0, "user_id":"" }`, buf.String()) } func TestServer_LogRequest_WithTrace(t *testing.T) { s := &Server{} extra := new(ExtraLogFields) ctx := context.WithValue(context.Background(), ExtraLogFieldsCtxKey, extra) extra.Add(zap.String("traceID", "1234567890abcdef")) extra.Add(zap.String("spanID", "12345678")) req := httptest.NewRequest(http.MethodGet, "/", nil).WithContext(ctx) rec := httptest.NewRecorder() wrec := NewResponseRecorder(rec, nil, nil) duration := 50 * time.Millisecond repl := NewTestReplacer(req) bodyReader := &lengthReader{Source: req.Body} shouldLogCredentials := false buf := bytes.Buffer{} accLog := testLogger(buf.Write) s.logRequest(accLog, req, wrec, &duration, repl, bodyReader, shouldLogCredentials) assert.JSONEq(t, `{ "msg":"handled request", "level":"info", "bytes_read":0, "duration":"50ms", "resp_headers": {}, "size":0, "status":0, "user_id":"", "traceID":"1234567890abcdef", "spanID":"12345678" }`, buf.String()) } func BenchmarkServer_LogRequest(b *testing.B) { s := &Server{} extra := new(ExtraLogFields) ctx := context.WithValue(context.Background(), ExtraLogFieldsCtxKey, extra) req := httptest.NewRequest(http.MethodGet, "/", nil).WithContext(ctx) rec := httptest.NewRecorder() wrec := NewResponseRecorder(rec, nil, nil) duration := 50 * time.Millisecond repl := NewTestReplacer(req) bodyReader := &lengthReader{Source: req.Body} buf := io.Discard accLog := testLogger(buf.Write) for b.Loop() { s.logRequest(accLog, req, wrec, &duration, repl, bodyReader, false) } } func BenchmarkServer_LogRequest_NopLogger(b *testing.B) { s := &Server{} extra := new(ExtraLogFields) ctx := context.WithValue(context.Background(), ExtraLogFieldsCtxKey, extra) req := httptest.NewRequest(http.MethodGet, "/", nil).WithContext(ctx) rec := httptest.NewRecorder() wrec := NewResponseRecorder(rec, nil, nil) duration := 50 * time.Millisecond repl := NewTestReplacer(req) bodyReader := &lengthReader{Source: req.Body} accLog := zap.NewNop() for b.Loop() { s.logRequest(accLog, req, wrec, &duration, repl, bodyReader, false) } } func BenchmarkServer_LogRequest_WithTrace(b *testing.B) { s := &Server{} extra := new(ExtraLogFields) ctx := context.WithValue(context.Background(), ExtraLogFieldsCtxKey, extra) extra.Add(zap.String("traceID", "1234567890abcdef")) extra.Add(zap.String("spanID", "12345678")) req := httptest.NewRequest(http.MethodGet, "/", nil).WithContext(ctx) rec := httptest.NewRecorder() wrec := NewResponseRecorder(rec, nil, nil) duration := 50 * time.Millisecond repl := NewTestReplacer(req) bodyReader := &lengthReader{Source: req.Body} buf := io.Discard accLog := testLogger(buf.Write) for b.Loop() { s.logRequest(accLog, req, wrec, &duration, repl, bodyReader, false) } } func TestServer_TrustedRealClientIP_NoTrustedHeaders(t *testing.T) { req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "192.0.2.1:12345" ip := trustedRealClientIP(req, []string{}, "192.0.2.1") assert.Equal(t, ip, "192.0.2.1") } func TestServer_TrustedRealClientIP_OneTrustedHeaderEmpty(t *testing.T) { req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "192.0.2.1:12345" ip := trustedRealClientIP(req, []string{"X-Forwarded-For"}, "192.0.2.1") assert.Equal(t, ip, "192.0.2.1") } func TestServer_TrustedRealClientIP_OneTrustedHeaderInvalid(t *testing.T) { req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "192.0.2.1:12345" req.Header.Set("X-Forwarded-For", "not, an, ip") ip := trustedRealClientIP(req, []string{"X-Forwarded-For"}, "192.0.2.1") assert.Equal(t, ip, "192.0.2.1") } func TestServer_TrustedRealClientIP_OneTrustedHeaderValid(t *testing.T) { req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "192.0.2.1:12345" req.Header.Set("X-Forwarded-For", "10.0.0.1") ip := trustedRealClientIP(req, []string{"X-Forwarded-For"}, "192.0.2.1") assert.Equal(t, ip, "10.0.0.1") } func TestServer_TrustedRealClientIP_OneTrustedHeaderValidArray(t *testing.T) { req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "192.0.2.1:12345" req.Header.Set("X-Forwarded-For", "1.1.1.1, 2.2.2.2, 3.3.3.3") ip := trustedRealClientIP(req, []string{"X-Forwarded-For"}, "192.0.2.1") assert.Equal(t, ip, "1.1.1.1") } func TestServer_TrustedRealClientIP_IncludesPort(t *testing.T) { req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "192.0.2.1:12345" req.Header.Set("X-Forwarded-For", "1.1.1.1:1234") ip := trustedRealClientIP(req, []string{"X-Forwarded-For"}, "192.0.2.1") assert.Equal(t, ip, "1.1.1.1") } func TestServer_TrustedRealClientIP_SkipsInvalidIps(t *testing.T) { req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "192.0.2.1:12345" req.Header.Set("X-Forwarded-For", "not an ip, bad bad, 10.0.0.1") ip := trustedRealClientIP(req, []string{"X-Forwarded-For"}, "192.0.2.1") assert.Equal(t, ip, "10.0.0.1") } func TestServer_TrustedRealClientIP_MultipleTrustedHeaderValidArray(t *testing.T) { req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "192.0.2.1:12345" req.Header.Set("Real-Client-IP", "1.1.1.1, 2.2.2.2, 3.3.3.3") req.Header.Set("X-Forwarded-For", "3.3.3.3, 4.4.4.4") ip1 := trustedRealClientIP(req, []string{"X-Forwarded-For", "Real-Client-IP"}, "192.0.2.1") ip2 := trustedRealClientIP(req, []string{"Real-Client-IP", "X-Forwarded-For"}, "192.0.2.1") ip3 := trustedRealClientIP(req, []string{"Missing-Header-IP", "Real-Client-IP", "X-Forwarded-For"}, "192.0.2.1") assert.Equal(t, ip1, "3.3.3.3") assert.Equal(t, ip2, "1.1.1.1") assert.Equal(t, ip3, "1.1.1.1") } func TestServer_DetermineTrustedProxy_NoConfig(t *testing.T) { server := &Server{} req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "192.0.2.1:12345" trusted, clientIP := determineTrustedProxy(req, server) assert.False(t, trusted) assert.Equal(t, clientIP, "192.0.2.1") } func TestServer_DetermineTrustedProxy_NoConfigIpv6(t *testing.T) { server := &Server{} req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "[::1]:12345" trusted, clientIP := determineTrustedProxy(req, server) assert.False(t, trusted) assert.Equal(t, clientIP, "::1") } func TestServer_DetermineTrustedProxy_NoConfigIpv6Zones(t *testing.T) { server := &Server{} req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "[::1%eth2]:12345" trusted, clientIP := determineTrustedProxy(req, server) assert.False(t, trusted) assert.Equal(t, clientIP, "::1") } func TestServer_DetermineTrustedProxy_TrustedLoopback(t *testing.T) { loopbackPrefix, _ := netip.ParsePrefix("127.0.0.1/8") server := &Server{ trustedProxies: &StaticIPRange{ ranges: []netip.Prefix{loopbackPrefix}, }, ClientIPHeaders: []string{"X-Forwarded-For"}, } req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "127.0.0.1:12345" req.Header.Set("X-Forwarded-For", "31.40.0.10") trusted, clientIP := determineTrustedProxy(req, server) assert.True(t, trusted) assert.Equal(t, clientIP, "31.40.0.10") } func TestServer_DetermineTrustedProxy_UnixSocket(t *testing.T) { server := &Server{ ClientIPHeaders: []string{"X-Forwarded-For"}, TrustedProxiesUnix: true, } req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "@" req.Header.Set("X-Forwarded-For", "2.2.2.2, 3.3.3.3") trusted, clientIP := determineTrustedProxy(req, server) assert.True(t, trusted) assert.Equal(t, "2.2.2.2", clientIP) } func TestServer_DetermineTrustedProxy_UnixSocketStrict(t *testing.T) { server := &Server{ ClientIPHeaders: []string{"X-Forwarded-For"}, TrustedProxiesUnix: true, TrustedProxiesStrict: 1, } req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "@" req.Header.Set("X-Forwarded-For", "2.2.2.2, 3.3.3.3") trusted, clientIP := determineTrustedProxy(req, server) assert.True(t, trusted) assert.Equal(t, "3.3.3.3", clientIP) } func TestServer_DetermineTrustedProxy_UntrustedPrefix(t *testing.T) { loopbackPrefix, _ := netip.ParsePrefix("127.0.0.1/8") server := &Server{ trustedProxies: &StaticIPRange{ ranges: []netip.Prefix{loopbackPrefix}, }, ClientIPHeaders: []string{"X-Forwarded-For"}, } req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "10.0.0.1:12345" req.Header.Set("X-Forwarded-For", "31.40.0.10") trusted, clientIP := determineTrustedProxy(req, server) assert.False(t, trusted) assert.Equal(t, clientIP, "10.0.0.1") } func TestServer_DetermineTrustedProxy_MultipleTrustedPrefixes(t *testing.T) { loopbackPrefix, _ := netip.ParsePrefix("127.0.0.1/8") localPrivatePrefix, _ := netip.ParsePrefix("10.0.0.0/8") server := &Server{ trustedProxies: &StaticIPRange{ ranges: []netip.Prefix{loopbackPrefix, localPrivatePrefix}, }, ClientIPHeaders: []string{"X-Forwarded-For"}, } req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "10.0.0.1:12345" req.Header.Set("X-Forwarded-For", "31.40.0.10") trusted, clientIP := determineTrustedProxy(req, server) assert.True(t, trusted) assert.Equal(t, clientIP, "31.40.0.10") } func TestServer_DetermineTrustedProxy_MultipleTrustedClientHeaders(t *testing.T) { loopbackPrefix, _ := netip.ParsePrefix("127.0.0.1/8") localPrivatePrefix, _ := netip.ParsePrefix("10.0.0.0/8") server := &Server{ trustedProxies: &StaticIPRange{ ranges: []netip.Prefix{loopbackPrefix, localPrivatePrefix}, }, ClientIPHeaders: []string{"CF-Connecting-IP", "X-Forwarded-For"}, } req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "10.0.0.1:12345" req.Header.Set("CF-Connecting-IP", "1.1.1.1, 2.2.2.2") req.Header.Set("X-Forwarded-For", "3.3.3.3, 4.4.4.4") trusted, clientIP := determineTrustedProxy(req, server) assert.True(t, trusted) assert.Equal(t, clientIP, "1.1.1.1") } func TestServer_DetermineTrustedProxy_MatchLeftMostValidIp(t *testing.T) { localPrivatePrefix, _ := netip.ParsePrefix("10.0.0.0/8") server := &Server{ trustedProxies: &StaticIPRange{ ranges: []netip.Prefix{localPrivatePrefix}, }, ClientIPHeaders: []string{"X-Forwarded-For"}, } req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "10.0.0.1:12345" req.Header.Set("X-Forwarded-For", "30.30.30.30, 45.54.45.54, 10.0.0.1") trusted, clientIP := determineTrustedProxy(req, server) assert.True(t, trusted) assert.Equal(t, clientIP, "30.30.30.30") } func TestServer_DetermineTrustedProxy_MatchRightMostUntrusted(t *testing.T) { localPrivatePrefix, _ := netip.ParsePrefix("10.0.0.0/8") server := &Server{ trustedProxies: &StaticIPRange{ ranges: []netip.Prefix{localPrivatePrefix}, }, ClientIPHeaders: []string{"X-Forwarded-For"}, TrustedProxiesStrict: 1, } req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "10.0.0.1:12345" req.Header.Set("X-Forwarded-For", "30.30.30.30, 45.54.45.54, 10.0.0.1") trusted, clientIP := determineTrustedProxy(req, server) assert.True(t, trusted) assert.Equal(t, clientIP, "45.54.45.54") } func TestServer_DetermineTrustedProxy_MatchRightMostUntrustedSkippingEmpty(t *testing.T) { localPrivatePrefix, _ := netip.ParsePrefix("10.0.0.0/8") server := &Server{ trustedProxies: &StaticIPRange{ ranges: []netip.Prefix{localPrivatePrefix}, }, ClientIPHeaders: []string{"Missing-Header", "CF-Connecting-IP", "X-Forwarded-For"}, TrustedProxiesStrict: 1, } req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "10.0.0.1:12345" req.Header.Set("CF-Connecting-IP", "not a real IP") req.Header.Set("X-Forwarded-For", "30.30.30.30, bad, 45.54.45.54, not real") trusted, clientIP := determineTrustedProxy(req, server) assert.True(t, trusted) assert.Equal(t, clientIP, "45.54.45.54") } func TestServer_DetermineTrustedProxy_MatchRightMostUntrustedSkippingTrusted(t *testing.T) { localPrivatePrefix, _ := netip.ParsePrefix("10.0.0.0/8") server := &Server{ trustedProxies: &StaticIPRange{ ranges: []netip.Prefix{localPrivatePrefix}, }, ClientIPHeaders: []string{"CF-Connecting-IP", "X-Forwarded-For"}, TrustedProxiesStrict: 1, } req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "10.0.0.1:12345" req.Header.Set("CF-Connecting-IP", "10.0.0.1, 10.0.0.2, 10.0.0.3") req.Header.Set("X-Forwarded-For", "30.30.30.30, 45.54.45.54, 10.0.0.4") trusted, clientIP := determineTrustedProxy(req, server) assert.True(t, trusted) assert.Equal(t, clientIP, "45.54.45.54") } func TestServer_DetermineTrustedProxy_MatchRightMostUntrustedFirst(t *testing.T) { localPrivatePrefix, _ := netip.ParsePrefix("10.0.0.0/8") server := &Server{ trustedProxies: &StaticIPRange{ ranges: []netip.Prefix{localPrivatePrefix}, }, ClientIPHeaders: []string{"CF-Connecting-IP", "X-Forwarded-For"}, TrustedProxiesStrict: 1, } req := httptest.NewRequest("GET", "/", nil) req.RemoteAddr = "10.0.0.1:12345" req.Header.Set("CF-Connecting-IP", "10.0.0.1, 90.100.110.120, 10.0.0.2, 10.0.0.3") req.Header.Set("X-Forwarded-For", "30.30.30.30, 45.54.45.54, 10.0.0.4") trusted, clientIP := determineTrustedProxy(req, server) assert.True(t, trusted) assert.Equal(t, clientIP, "90.100.110.120") } ================================================ FILE: modules/caddyhttp/standard/imports.go ================================================ package standard import ( // standard Caddy HTTP app modules _ "github.com/caddyserver/caddy/v2/modules/caddyhttp" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/caddyauth" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode/brotli" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode/gzip" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/encode/zstd" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/fileserver" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/intercept" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/logging" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/map" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/proxyprotocol" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/push" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/requestbody" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/fastcgi" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/forwardauth" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/rewrite" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/templates" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/tracing" ) ================================================ FILE: modules/caddyhttp/staticerror.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "fmt" "net/http" "strconv" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(StaticError{}) } // StaticError implements a simple handler that returns an error. // This handler returns an error value, but does not write a response. // This is useful when you want the server to act as if an error // occurred; for example, to invoke your custom error handling logic. // // Since this handler does not write a response, the error information // is for use by the server to know how to handle the error. type StaticError struct { // The error message. Optional. Default is no error message. Error string `json:"error,omitempty"` // The recommended HTTP status code. Can be either an integer or a // string if placeholders are needed. Optional. Default is 500. StatusCode WeakString `json:"status_code,omitempty"` } // CaddyModule returns the Caddy module information. func (StaticError) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.error", New: func() caddy.Module { return new(StaticError) }, } } // UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax: // // error [] | [] { // message // } // // If there is just one argument (other than the matcher), it is considered // to be a status code if it's a valid positive integer of 3 digits. func (e *StaticError) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume directive name args := d.RemainingArgs() switch len(args) { case 1: if len(args[0]) == 3 { if num, err := strconv.Atoi(args[0]); err == nil && num > 0 { e.StatusCode = WeakString(args[0]) break } } e.Error = args[0] case 2: e.Error = args[0] e.StatusCode = WeakString(args[1]) default: return d.ArgErr() } for d.NextBlock(0) { switch d.Val() { case "message": if e.Error != "" { return d.Err("message already specified") } if !d.AllArgs(&e.Error) { return d.ArgErr() } default: return d.Errf("unrecognized subdirective '%s'", d.Val()) } } return nil } func (e StaticError) ServeHTTP(w http.ResponseWriter, r *http.Request, _ Handler) error { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) statusCode := http.StatusInternalServerError if codeStr := e.StatusCode.String(); codeStr != "" { intVal, err := strconv.Atoi(repl.ReplaceAll(codeStr, "")) if err != nil { return Error(http.StatusInternalServerError, err) } statusCode = intVal } return Error(statusCode, fmt.Errorf("%s", repl.ReplaceKnown(e.Error, ""))) } // Interface guard var ( _ MiddlewareHandler = (*StaticError)(nil) _ caddyfile.Unmarshaler = (*StaticError)(nil) ) ================================================ FILE: modules/caddyhttp/staticresp.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "bytes" "encoding/json" "fmt" "io" "net/http" "net/textproto" "os" "slices" "strconv" "strings" "text/template" "time" "github.com/spf13/cobra" "go.uber.org/zap" caddycmd "github.com/caddyserver/caddy/v2/cmd" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(StaticResponse{}) caddycmd.RegisterCommand(caddycmd.Command{ Name: "respond", Usage: `[--status ] [--body ] [--listen ] [--access-log] [--debug] [--header "Field: value"] `, Short: "Simple, hard-coded HTTP responses for development and testing", Long: ` Spins up a quick-and-clean HTTP server for development and testing purposes. With no options specified, this command listens on a random available port and answers HTTP requests with an empty 200 response. The listen address can be customized with the --listen flag and will always be printed to stdout. If the listen address includes a port range, multiple servers will be started. If a final, unnamed argument is given, it will be treated as a status code (same as the --status flag) if it is a 3-digit number. Otherwise, it is used as the response body (same as the --body flag). The --status and --body flags will always override this argument (for example, to write a body that literally says "404" but with a status code of 200, do '--status 200 404'). A body may be given in 3 ways: a flag, a final (and unnamed) argument to the command, or piped to stdin (if flag and argument are unset). Limited template evaluation is supported on the body, with the following variables: {{.N}} The server number (useful if using a port range) {{.Port}} The listener port {{.Address}} The listener address (See the docs for the text/template package in the Go standard library for information about using templates: https://pkg.go.dev/text/template) Access/request logging and more verbose debug logging can also be enabled. Response headers may be added using the --header flag for each header field. `, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringP("listen", "l", ":0", "The address to which to bind the listener") cmd.Flags().IntP("status", "s", http.StatusOK, "The response status code") cmd.Flags().StringP("body", "b", "", "The body of the HTTP response") cmd.Flags().BoolP("access-log", "", false, "Enable the access log") cmd.Flags().BoolP("debug", "v", false, "Enable more verbose debug-level logging") cmd.Flags().StringArrayP("header", "H", []string{}, "Set a header on the response (format: \"Field: value\")") cmd.RunE = caddycmd.WrapCommandFuncForCobra(cmdRespond) }, }) } // StaticResponse implements a simple responder for static responses. type StaticResponse struct { // The HTTP status code to respond with. Can be an integer or, // if needing to use a placeholder, a string. // // If the status code is 103 (Early Hints), the response headers // will be written to the client immediately, the body will be // ignored, and the next handler will be invoked. This behavior // is EXPERIMENTAL while RFC 8297 is a draft, and may be changed // or removed. StatusCode WeakString `json:"status_code,omitempty"` // Header fields to set on the response; overwrites any existing // header fields of the same names after normalization. Headers http.Header `json:"headers,omitempty"` // The response body. If non-empty, the Content-Type header may // be added automatically if it is not explicitly configured nor // already set on the response; the default value is // "text/plain; charset=utf-8" unless the body is a valid JSON object // or array, in which case the value will be "application/json". // Other than those common special cases the Content-Type header // should be set explicitly if it is desired because MIME sniffing // is disabled for safety. Body string `json:"body,omitempty"` // If true, the server will close the client's connection // after writing the response. Close bool `json:"close,omitempty"` // Immediately and forcefully closes the connection without // writing a response. Interrupts any other HTTP streams on // the same connection. Abort bool `json:"abort,omitempty"` } // CaddyModule returns the Caddy module information. func (StaticResponse) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.static_response", New: func() caddy.Module { return new(StaticResponse) }, } } // UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax: // // respond [] | [] { // body // close // } // // If there is just one argument (other than the matcher), it is considered // to be a status code if it's a valid positive integer of 3 digits. func (s *StaticResponse) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume directive name args := d.RemainingArgs() switch len(args) { case 1: if len(args[0]) == 3 { if num, err := strconv.Atoi(args[0]); err == nil && num > 0 { s.StatusCode = WeakString(args[0]) break } } s.Body = args[0] case 2: s.Body = args[0] s.StatusCode = WeakString(args[1]) default: return d.ArgErr() } for d.NextBlock(0) { switch d.Val() { case "body": if s.Body != "" { return d.Err("body already specified") } if !d.AllArgs(&s.Body) { return d.ArgErr() } case "close": if s.Close { return d.Err("close already specified") } s.Close = true default: return d.Errf("unrecognized subdirective '%s'", d.Val()) } } return nil } func (s StaticResponse) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error { // close the connection immediately if s.Abort { panic(http.ErrAbortHandler) } // close the connection after responding if s.Close { r.Close = true w.Header().Set("Connection", "close") } repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) // set all headers for field, vals := range s.Headers { field = textproto.CanonicalMIMEHeaderKey(repl.ReplaceAll(field, "")) newVals := make([]string, len(vals)) for i := range vals { newVals[i] = repl.ReplaceAll(vals[i], "") } w.Header()[field] = newVals } // implicitly set Content-Type header if we can do so safely // (this allows templates handler to eval templates successfully // or for clients to render JSON properly which is very common) body := repl.ReplaceKnown(s.Body, "") if body != "" && w.Header().Get("Content-Type") == "" { content := strings.TrimSpace(body) if len(content) > 2 && (content[0] == '{' && content[len(content)-1] == '}' || (content[0] == '[' && content[len(content)-1] == ']')) && json.Valid([]byte(content)) { w.Header().Set("Content-Type", "application/json") } else { w.Header().Set("Content-Type", "text/plain; charset=utf-8") } } // do not allow Go to sniff the content-type, for safety if w.Header().Get("Content-Type") == "" { w.Header()["Content-Type"] = nil } // get the status code; if this handler exists in an error route, // use the recommended status code as the default; otherwise 200 statusCode := http.StatusOK if reqErr, ok := r.Context().Value(ErrorCtxKey).(error); ok { if handlerErr, ok := reqErr.(HandlerError); ok { if handlerErr.StatusCode > 0 { statusCode = handlerErr.StatusCode } } } if codeStr := s.StatusCode.String(); codeStr != "" { intVal, err := strconv.Atoi(repl.ReplaceAll(codeStr, "")) if err != nil { return Error(http.StatusInternalServerError, err) } statusCode = intVal } // write headers w.WriteHeader(statusCode) // write response body if statusCode != http.StatusEarlyHints && body != "" { fmt.Fprint(w, body) //nolint:gosec // no XSS unless you sabatoge your own config } // continue handling after Early Hints as they are not the final response if statusCode == http.StatusEarlyHints { return next.ServeHTTP(w, r) } return nil } func buildHTTPServer( i int, port uint, addr string, statusCode int, hdr http.Header, body string, accessLog bool, ) (*Server, error) { // nolint:prealloc var handlers []json.RawMessage // response body supports a basic template; evaluate it tplCtx := struct { N int // server number Port uint // only the port Address string // listener address }{ N: i, Port: port, Address: addr, } tpl, err := template.New("body").Parse(body) if err != nil { return nil, err } buf := new(bytes.Buffer) err = tpl.Execute(buf, tplCtx) if err != nil { return nil, err } // create route with handler handler := StaticResponse{ StatusCode: WeakString(fmt.Sprintf("%d", statusCode)), Headers: hdr, Body: buf.String(), } handlers = append(handlers, caddyconfig.JSONModuleObject(handler, "handler", "static_response", nil)) route := Route{HandlersRaw: handlers} server := &Server{ Listen: []string{addr}, ReadHeaderTimeout: caddy.Duration(10 * time.Second), IdleTimeout: caddy.Duration(30 * time.Second), MaxHeaderBytes: 1024 * 10, Routes: RouteList{route}, AutoHTTPS: &AutoHTTPSConfig{DisableRedir: true}, } if accessLog { server.Logs = new(ServerLogConfig) } return server, nil } func cmdRespond(fl caddycmd.Flags) (int, error) { caddy.TrapSignals() // get flag values listen := fl.String("listen") statusCodeFl := fl.Int("status") bodyFl := fl.String("body") accessLog := fl.Bool("access-log") debug := fl.Bool("debug") arg := fl.Arg(0) if fl.NArg() > 1 { return caddy.ExitCodeFailedStartup, fmt.Errorf("too many unflagged arguments") } // prefer status and body from explicit flags statusCode, body := statusCodeFl, bodyFl // figure out if status code was explicitly specified; this lets // us set a non-zero value as the default but is a little hacky statusCodeFlagSpecified := slices.Contains(os.Args, "--status") // try to determine what kind of parameter the unnamed argument is if arg != "" { // specifying body and status flags makes the argument redundant/unused if bodyFl != "" && statusCodeFlagSpecified { return caddy.ExitCodeFailedStartup, fmt.Errorf("unflagged argument \"%s\" is overridden by flags", arg) } // if a valid 3-digit number, treat as status code; otherwise body if argInt, err := strconv.Atoi(arg); err == nil && !statusCodeFlagSpecified { if argInt >= 100 && argInt <= 999 { statusCode = argInt } } else if body == "" { body = arg } } // if we still need a body, see if stdin is being piped if body == "" { stdinInfo, err := os.Stdin.Stat() if err != nil { return caddy.ExitCodeFailedStartup, err } if stdinInfo.Mode()&os.ModeNamedPipe != 0 { bodyBytes, err := io.ReadAll(os.Stdin) if err != nil { return caddy.ExitCodeFailedStartup, err } body = string(bodyBytes) } } // build headers map headers, err := fl.GetStringArray("header") if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid header flag: %v", err) } hdr := make(http.Header) for i, h := range headers { key, val, found := strings.Cut(h, ":") key, val = strings.TrimSpace(key), strings.TrimSpace(val) if !found || key == "" || val == "" { return caddy.ExitCodeFailedStartup, fmt.Errorf("header %d: invalid format \"%s\" (expecting \"Field: value\")", i, h) } hdr.Set(key, val) } // build each HTTP server httpApp := App{Servers: make(map[string]*Server)} // expand listen address, if more than one port listenAddr, err := caddy.ParseNetworkAddress(listen) if err != nil { return caddy.ExitCodeFailedStartup, err } if !listenAddr.IsUnixNetwork() && !listenAddr.IsFdNetwork() { listenAddrs := make([]string, 0, listenAddr.PortRangeSize()) for offset := uint(0); offset < listenAddr.PortRangeSize(); offset++ { listenAddrs = append(listenAddrs, listenAddr.JoinHostPort(offset)) } for i, addr := range listenAddrs { server, err := buildHTTPServer(i, listenAddr.StartPort+uint(i), addr, statusCode, hdr, body, accessLog) if err != nil { return caddy.ExitCodeFailedStartup, err } // save server httpApp.Servers[fmt.Sprintf("static%d", i)] = server } } else { server, err := buildHTTPServer(0, 0, listen, statusCode, hdr, body, accessLog) if err != nil { return caddy.ExitCodeFailedStartup, err } // save server httpApp.Servers[fmt.Sprintf("static%d", 0)] = server } // finish building the config var false bool cfg := &caddy.Config{ Admin: &caddy.AdminConfig{ Disabled: true, Config: &caddy.ConfigSettings{ Persist: &false, }, }, AppsRaw: caddy.ModuleMap{ "http": caddyconfig.JSON(httpApp, nil), }, } if debug { cfg.Logging = &caddy.Logging{ Logs: map[string]*caddy.CustomLog{ "default": {BaseLog: caddy.BaseLog{Level: zap.DebugLevel.CapitalString()}}, }, } } // run it! err = caddy.Run(cfg) if err != nil { return caddy.ExitCodeFailedStartup, err } // to print listener addresses, get the active HTTP app loadedHTTPApp, err := caddy.ActiveContext().App("http") if err != nil { return caddy.ExitCodeFailedStartup, err } // print each listener address for _, srv := range loadedHTTPApp.(*App).Servers { for _, ln := range srv.listeners { fmt.Printf("Server address: %s\n", ln.Addr()) } } select {} } // Interface guards var ( _ MiddlewareHandler = (*StaticResponse)(nil) _ caddyfile.Unmarshaler = (*StaticResponse)(nil) ) ================================================ FILE: modules/caddyhttp/staticresp_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "context" "io" "net/http" "net/http/httptest" "strconv" "testing" "github.com/caddyserver/caddy/v2" ) func TestStaticResponseHandler(t *testing.T) { r := fakeRequest() w := httptest.NewRecorder() s := StaticResponse{ StatusCode: WeakString(strconv.Itoa(http.StatusNotFound)), Headers: http.Header{ "X-Test": []string{"Testing"}, }, Body: "Text", Close: true, } err := s.ServeHTTP(w, r, nil) if err != nil { t.Errorf("did not expect an error, but got: %v", err) } resp := w.Result() respBody, _ := io.ReadAll(resp.Body) if resp.StatusCode != http.StatusNotFound { t.Errorf("expected status %d but got %d", http.StatusNotFound, resp.StatusCode) } if resp.Header.Get("X-Test") != "Testing" { t.Errorf("expected x-test header to be 'testing' but was '%s'", resp.Header.Get("X-Test")) } if string(respBody) != "Text" { t.Errorf("expected body to be 'test' but was '%s'", respBody) } } func fakeRequest() *http.Request { r, _ := http.NewRequest("GET", "/", nil) repl := caddy.NewReplacer() ctx := context.WithValue(r.Context(), caddy.ReplacerCtxKey, repl) r = r.WithContext(ctx) return r } ================================================ FILE: modules/caddyhttp/subroute.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "fmt" "net/http" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(Subroute{}) } // Subroute implements a handler that compiles and executes routes. // This is useful for a batch of routes that all inherit the same // matchers, or for multiple routes that should be treated as a // single route. // // You can also use subroutes to handle errors from its handlers. // First the primary routes will be executed, and if they return an // error, the errors routes will be executed; in that case, an error // is only returned to the entry point at the server if there is an // additional error returned from the errors routes. type Subroute struct { // The primary list of routes to compile and execute. Routes RouteList `json:"routes,omitempty"` // If the primary routes return an error, error handling // can be promoted to this configuration instead. Errors *HTTPErrorConfig `json:"errors,omitempty"` } // CaddyModule returns the Caddy module information. func (Subroute) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.subroute", New: func() caddy.Module { return new(Subroute) }, } } // Provision sets up subrouting. func (sr *Subroute) Provision(ctx caddy.Context) error { if sr.Routes != nil { err := sr.Routes.Provision(ctx) if err != nil { return fmt.Errorf("setting up subroutes: %v", err) } if sr.Errors != nil { err := sr.Errors.Routes.Provision(ctx) if err != nil { return fmt.Errorf("setting up error subroutes: %v", err) } } } return nil } func (sr *Subroute) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error { subroute := sr.Routes.Compile(next) err := subroute.ServeHTTP(w, r) if err != nil && sr.Errors != nil { r = sr.Errors.WithError(r, err) errRoute := sr.Errors.Routes.Compile(next) return errRoute.ServeHTTP(w, r) } return err } // Interface guards var ( _ caddy.Provisioner = (*Subroute)(nil) _ MiddlewareHandler = (*Subroute)(nil) ) ================================================ FILE: modules/caddyhttp/templates/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package templates import ( "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { httpcaddyfile.RegisterHandlerDirective("templates", parseCaddyfile) } // parseCaddyfile sets up the handler from Caddyfile tokens. Syntax: // // templates [] { // mime // between // root // } func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { h.Next() // consume directive name t := new(Templates) for h.NextBlock(0) { switch h.Val() { case "mime": t.MIMETypes = h.RemainingArgs() if len(t.MIMETypes) == 0 { return nil, h.ArgErr() } case "between": t.Delimiters = h.RemainingArgs() if len(t.Delimiters) != 2 { return nil, h.ArgErr() } case "root": if !h.Args(&t.FileRoot) { return nil, h.ArgErr() } case "extensions": if h.NextArg() { return nil, h.ArgErr() } if t.ExtensionsRaw != nil { return nil, h.Err("extensions already specified") } for nesting := h.Nesting(); h.NextBlock(nesting); { extensionModuleName := h.Val() modID := "http.handlers.templates.functions." + extensionModuleName unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID) if err != nil { return nil, err } cf, ok := unm.(CustomFunctions) if !ok { return nil, h.Errf("module %s (%T) does not provide template functions", modID, unm) } if t.ExtensionsRaw == nil { t.ExtensionsRaw = make(caddy.ModuleMap) } t.ExtensionsRaw[extensionModuleName] = caddyconfig.JSON(cf, nil) } } } return t, nil } ================================================ FILE: modules/caddyhttp/templates/frontmatter.go ================================================ package templates import ( "encoding/json" "fmt" "strings" "unicode" "github.com/BurntSushi/toml" "gopkg.in/yaml.v3" ) func extractFrontMatter(input string) (map[string]any, string, error) { // get the bounds of the first non-empty line var firstLineStart, firstLineEnd int lineEmpty := true for i, b := range input { if b == '\n' { firstLineStart = firstLineEnd if firstLineStart > 0 { firstLineStart++ // skip newline character } firstLineEnd = i if !lineEmpty { break } continue } lineEmpty = lineEmpty && unicode.IsSpace(b) } firstLine := input[firstLineStart:firstLineEnd] // ensure residue windows carriage return byte is removed firstLine = strings.TrimSpace(firstLine) // see what kind of front matter there is, if any var closingFence []string var fmParser func([]byte) (map[string]any, error) for _, fmType := range supportedFrontMatterTypes { if firstLine == fmType.FenceOpen { closingFence = fmType.FenceClose fmParser = fmType.ParseFunc break } } if fmParser == nil { // no recognized front matter; whole document is body return nil, input, nil } // find end of front matter var fmEndFence string fmEndFenceStart := -1 for _, fence := range closingFence { index := strings.Index(input[firstLineEnd:], "\n"+fence) if index >= 0 { fmEndFenceStart = index fmEndFence = fence break } } if fmEndFenceStart < 0 { return nil, "", fmt.Errorf("unterminated front matter") } fmEndFenceStart += firstLineEnd + 1 // add 1 to account for newline // extract and parse front matter frontMatter := input[firstLineEnd:fmEndFenceStart] fm, err := fmParser([]byte(frontMatter)) if err != nil { return nil, "", err } // the rest is the body body := input[fmEndFenceStart+len(fmEndFence):] return fm, body, nil } func yamlFrontMatter(input []byte) (map[string]any, error) { m := make(map[string]any) err := yaml.Unmarshal(input, &m) return m, err } func tomlFrontMatter(input []byte) (map[string]any, error) { m := make(map[string]any) err := toml.Unmarshal(input, &m) return m, err } func jsonFrontMatter(input []byte) (map[string]any, error) { input = append([]byte{'{'}, input...) input = append(input, '}') m := make(map[string]any) err := json.Unmarshal(input, &m) return m, err } type parsedMarkdownDoc struct { Meta map[string]any `json:"meta,omitempty"` Body string `json:"body,omitempty"` } type frontMatterType struct { FenceOpen string FenceClose []string ParseFunc func(input []byte) (map[string]any, error) } var supportedFrontMatterTypes = []frontMatterType{ { FenceOpen: "---", FenceClose: []string{"---", "..."}, ParseFunc: yamlFrontMatter, }, { FenceOpen: "+++", FenceClose: []string{"+++"}, ParseFunc: tomlFrontMatter, }, { FenceOpen: "{", FenceClose: []string{"}"}, ParseFunc: jsonFrontMatter, }, } ================================================ FILE: modules/caddyhttp/templates/frontmatter_fuzz.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build gofuzz package templates func FuzzExtractFrontMatter(data []byte) int { _, _, err := extractFrontMatter(string(data)) if err != nil { return 0 } return 1 } ================================================ FILE: modules/caddyhttp/templates/templates.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package templates import ( "bytes" "errors" "fmt" "net/http" "strconv" "strings" "text/template" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(Templates{}) } // Templates is a middleware which executes response bodies as Go templates. // The syntax is documented in the Go standard library's // [text/template package](https://golang.org/pkg/text/template/). // // ⚠️ Template functions/actions are still experimental, so they are subject to change. // // Custom template functions can be registered by creating a plugin module under the `http.handlers.templates.functions.*` namespace that implements the `CustomFunctions` interface. // // [All Sprig functions](https://masterminds.github.io/sprig/) are supported. // // In addition to the standard functions and the Sprig library, Caddy adds // extra functions and data that are available to a template: // // ##### `.Args` // // A slice of arguments passed to this page/context, for example // as the result of a [`include`](#include). // // ``` // {{index .Args 0}} // first argument // ``` // // ##### `.Cookie` // // Gets the value of a cookie by name. // // ``` // {{.Cookie "cookiename"}} // ``` // // ##### `env` // // Gets an environment variable. // // ``` // {{env "VAR_NAME"}} // ``` // // ##### `placeholder` // // Gets an [placeholder variable](/docs/conventions#placeholders). // The braces (`{}`) have to be omitted. // // ``` // {{placeholder "http.request.uri.path"}} // {{placeholder "http.error.status_code"}} // ``` // // As a shortcut, `ph` is an alias for `placeholder`. // // ``` // {{ph "http.request.method"}} // ``` // // ##### `.Host` // // Returns the hostname portion (no port) of the Host header of the HTTP request. // // ``` // {{.Host}} // ``` // // ##### `httpInclude` // // Includes the contents of another file, and renders it in-place, // by making a virtual HTTP request (also known as a sub-request). // The URI path must exist on the same virtual server because the // request does not use sockets; instead, the request is crafted in // memory and the handler is invoked directly for increased efficiency. // // ``` // {{httpInclude "/foo/bar?q=val"}} // ``` // // ##### `import` // // Reads and returns the contents of another file, and parses it // as a template, adding any template definitions to the template // stack. If there are no definitions, the filepath will be the // definition name. Any `{{ define }}` blocks will be accessible by // `{{ template }}` or `{{ block }}`. Imports must happen before the // template or block action is called. Note that the contents are // NOT escaped, so you should only import trusted template files. // // **filename.html** // ``` // {{ define "main" }} // content // {{ end }} // ``` // // **index.html** // ``` // {{ import "/path/to/filename.html" }} // {{ template "main" }} // ``` // // ##### `include` // // Includes the contents of another file, rendering it in-place. // Optionally can pass key-value pairs as arguments to be accessed // by the included file. Use [`.Args N`](#args) to access the N-th // argument, 0-indexed. Note that the contents are NOT escaped, so // you should only include trusted template files. // // ``` // {{include "path/to/file.html"}} // no arguments // {{include "path/to/file.html" "arg0" 1 "value 2"}} // with arguments // ``` // // ##### `readFile` // // Reads and returns the contents of another file, as-is. // Note that the contents are NOT escaped, so you should // only read trusted files. // // ``` // {{readFile "path/to/file.html"}} // ``` // // ##### `listFiles` // // Returns a list of the files in the given directory, which is relative // to the template context's file root. // // ``` // {{listFiles "/mydir"}} // ``` // // ##### `markdown` // // Renders the given Markdown text as HTML and returns it. This uses the // [Goldmark](https://github.com/yuin/goldmark) library, // which is CommonMark compliant. It also has these extensions // enabled: GitHub Flavored Markdown, Footnote, and syntax // highlighting provided by [Chroma](https://github.com/alecthomas/chroma). // // ``` // {{markdown "My _markdown_ text"}} // ``` // // ##### `.RemoteIP` // // Returns the connection's IP address. // // ``` // {{.RemoteIP}} // ``` // // ##### `.ClientIP` // // Returns the real client's IP address, if `trusted_proxies` was configured, // otherwise returns the connection's IP address. // // ``` // {{.ClientIP}} // ``` // // ##### `.Req` // // Accesses the current HTTP request, which has various fields, including: // // - `.Method` - the method // - `.URL` - the URL, which in turn has component fields (Scheme, Host, Path, etc.) // - `.Header` - the header fields // - `.Host` - the Host or :authority header of the request // // ``` // {{.Req.Header.Get "User-Agent"}} // ``` // // ##### `.OriginalReq` // // Like [`.Req`](#req), except it accesses the original HTTP // request before rewrites or other internal modifications. // // ##### `.RespHeader.Add` // // Adds a header field to the HTTP response. // // ``` // {{.RespHeader.Add "Field-Name" "val"}} // ``` // // ##### `.RespHeader.Del` // // Deletes a header field on the HTTP response. // // ``` // {{.RespHeader.Del "Field-Name"}} // ``` // // ##### `.RespHeader.Set` // // Sets a header field on the HTTP response, replacing any existing value. // // ``` // {{.RespHeader.Set "Field-Name" "val"}} // ``` // // ##### `httpError` // // Returns an error with the given status code to the HTTP handler chain. // // ``` // {{if not (fileExists $includedFile)}}{{httpError 404}}{{end}} // ``` // // ##### `splitFrontMatter` // // Splits front matter out from the body. Front matter is metadata that // appears at the very beginning of a file or string. Front matter can // be in YAML, TOML, or JSON formats: // // **TOML** front matter starts and ends with `+++`: // // ```toml // +++ // template = "blog" // title = "Blog Homepage" // sitename = "A Caddy site" // +++ // ``` // // **YAML** is surrounded by `---`: // // ```yaml // --- // template: blog // title: Blog Homepage // sitename: A Caddy site // --- // ``` // // **JSON** is simply `{` and `}`: // // ```json // { // "template": "blog", // "title": "Blog Homepage", // "sitename": "A Caddy site" // } // ``` // // The resulting front matter will be made available like so: // // - `.Meta` to access the metadata fields, for example: `{{$parsed.Meta.title}}` // - `.Body` to access the body after the front matter, for example: `{{markdown $parsed.Body}}` // // ##### `stripHTML` // // Removes HTML from a string. // // ``` // {{stripHTML "Shows only text content"}} // ``` // // ##### `humanize` // // Transforms size and time inputs to a human readable format. // This uses the [go-humanize](https://github.com/dustin/go-humanize) library. // // The first argument must be a format type, and the last argument // is the input, or the input can be piped in. The supported format // types are: // - **size** which turns an integer amount of bytes into a string like `2.3 MB` // - **time** which turns a time string into a relative time string like `2 weeks ago` // // For the `time` format, the layout for parsing the input can be configured // by appending a colon `:` followed by the desired time layout. You can // find the documentation on time layouts [in Go's docs](https://pkg.go.dev/time#pkg-constants). // The default time layout is `RFC1123Z`, i.e. `Mon, 02 Jan 2006 15:04:05 -0700`. // // ``` // {{humanize "size" "2048000"}} // {{placeholder "http.response.header.Content-Length" | humanize "size"}} // {{humanize "time" "Fri, 05 May 2022 15:04:05 +0200"}} // {{humanize "time:2006-Jan-02" "2022-May-05"}} // ``` // // ##### `pathEscape` // // Passes a string through `url.PathEscape`, replacing characters that have // special meaning in URL path parameters (`?`, `&`, `%`). // // Useful e.g. to include filenames containing these characters in URL path // parameters, or use them as an `img` element's `src` attribute. // // ``` // {{pathEscape "50%_valid_filename?.jpg"}} // ``` // // ##### `maybe` // // Invokes a custom template function only if it is registered (plugged-in) // in the `http.handlers.templates.functions.*` namespace. // // The first argument is the function name, and any subsequent arguments // are forwarded to that function. If the named function is not available, // the invocation is ignored and a log message is emitted. // // This is useful for templates that optionally use components which may // not be present in every build or environment. // // NOTE: This function is EXPERIMENTAL and subject to change or removal. // // ``` // {{ maybe "myOptionalFunc" "arg1" 2 }} // ``` type Templates struct { // The root path from which to load files. Required if template functions // accessing the file system are used (such as include). Default is // `{http.vars.root}` if set, or current working directory otherwise. FileRoot string `json:"file_root,omitempty"` // The MIME types for which to render templates. It is important to use // this if the route matchers do not exclude images or other binary files. // Default is text/plain, text/markdown, and text/html. MIMETypes []string `json:"mime_types,omitempty"` // The template action delimiters. If set, must be precisely two elements: // the opening and closing delimiters. Default: `["{{", "}}"]` Delimiters []string `json:"delimiters,omitempty"` // Extensions adds functions to the template's func map. These often // act as components on web pages, for example. ExtensionsRaw caddy.ModuleMap `json:"match,omitempty" caddy:"namespace=http.handlers.templates.functions"` customFuncs []template.FuncMap logger *zap.Logger } // CustomFunctions is the interface for registering custom template functions. type CustomFunctions interface { // CustomTemplateFunctions should return the mapping from custom function names to implementations. CustomTemplateFunctions() template.FuncMap } // CaddyModule returns the Caddy module information. func (Templates) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.templates", New: func() caddy.Module { return new(Templates) }, } } // Provision provisions t. func (t *Templates) Provision(ctx caddy.Context) error { t.logger = ctx.Logger() mods, err := ctx.LoadModule(t, "ExtensionsRaw") if err != nil { return fmt.Errorf("loading template extensions: %v", err) } for _, modIface := range mods.(map[string]any) { t.customFuncs = append(t.customFuncs, modIface.(CustomFunctions).CustomTemplateFunctions()) } if t.MIMETypes == nil { t.MIMETypes = defaultMIMETypes } if t.FileRoot == "" { t.FileRoot = "{http.vars.root}" } return nil } // Validate ensures t has a valid configuration. func (t *Templates) Validate() error { if len(t.Delimiters) != 0 && len(t.Delimiters) != 2 { return fmt.Errorf("delimiters must consist of exactly two elements: opening and closing") } return nil } func (t *Templates) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) // shouldBuf determines whether to execute templates on this response, // since generally we will not want to execute for images or CSS, etc. shouldBuf := func(status int, header http.Header) bool { ct := header.Get("Content-Type") for _, mt := range t.MIMETypes { if strings.Contains(ct, mt) { return true } } return false } rec := caddyhttp.NewResponseRecorder(w, buf, shouldBuf) err := next.ServeHTTP(rec, r) if err != nil { return err } if !rec.Buffered() { return nil } err = t.executeTemplate(rec, r) if err != nil { return err } rec.Header().Set("Content-Length", strconv.Itoa(buf.Len())) rec.Header().Del("Accept-Ranges") // we don't know ranges for dynamically-created content rec.Header().Del("Last-Modified") // useless for dynamic content since it's always changing // we don't know a way to quickly generate etag for dynamic content, // and weak etags still cause browsers to rely on it even after a // refresh, so disable them until we find a better way to do this rec.Header().Del("Etag") return rec.WriteResponse() } // executeTemplate executes the template contained in wb.buf and replaces it with the results. func (t *Templates) executeTemplate(rr caddyhttp.ResponseRecorder, r *http.Request) error { var fs http.FileSystem if t.FileRoot != "" { repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) fs = http.Dir(repl.ReplaceAll(t.FileRoot, ".")) } ctx := &TemplateContext{ Root: fs, Req: r, RespHeader: WrappedHeader{rr.Header()}, config: t, CustomFuncs: t.customFuncs, } err := ctx.executeTemplateInBuffer(r.URL.Path, rr.Buffer()) if err != nil { // templates may return a custom HTTP error to be propagated to the client, // otherwise for any other error we assume the template is broken var handlerErr caddyhttp.HandlerError if errors.As(err, &handlerErr) { return handlerErr } return caddyhttp.Error(http.StatusInternalServerError, err) } return nil } // virtualResponseWriter is used in virtualized HTTP requests // that templates may execute. type virtualResponseWriter struct { status int header http.Header body *bytes.Buffer } func (vrw *virtualResponseWriter) Header() http.Header { return vrw.header } func (vrw *virtualResponseWriter) WriteHeader(statusCode int) { vrw.status = statusCode } func (vrw *virtualResponseWriter) Write(data []byte) (int, error) { return vrw.body.Write(data) } var defaultMIMETypes = []string{ "text/html", "text/plain", "text/markdown", } // Interface guards var ( _ caddy.Provisioner = (*Templates)(nil) _ caddy.Validator = (*Templates)(nil) _ caddyhttp.MiddlewareHandler = (*Templates)(nil) ) ================================================ FILE: modules/caddyhttp/templates/tplcontext.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package templates import ( "bytes" "fmt" "io" "io/fs" "net" "net/http" "net/url" "os" "path" "reflect" "strconv" "strings" "sync" "text/template" "time" "github.com/Masterminds/sprig/v3" chromahtml "github.com/alecthomas/chroma/v2/formatters/html" "github.com/dustin/go-humanize" "github.com/yuin/goldmark" highlighting "github.com/yuin/goldmark-highlighting/v2" "github.com/yuin/goldmark/extension" "github.com/yuin/goldmark/parser" gmhtml "github.com/yuin/goldmark/renderer/html" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) // TemplateContext is the TemplateContext with which HTTP templates are executed. type TemplateContext struct { Root http.FileSystem Req *http.Request Args []any // defined by arguments to funcInclude RespHeader WrappedHeader CustomFuncs []template.FuncMap // functions added by plugins config *Templates tpl *template.Template } // NewTemplate returns a new template intended to be evaluated with this // context, as it is initialized with configuration from this context. func (c *TemplateContext) NewTemplate(tplName string) *template.Template { c.tpl = template.New(tplName).Option("missingkey=zero") // customize delimiters, if applicable if c.config != nil && len(c.config.Delimiters) == 2 { c.tpl.Delims(c.config.Delimiters[0], c.config.Delimiters[1]) } // add sprig library c.tpl.Funcs(sprigFuncMap) // add all custom functions for _, funcMap := range c.CustomFuncs { c.tpl.Funcs(funcMap) } // add our own library c.tpl.Funcs(template.FuncMap{ "include": c.funcInclude, "readFile": c.funcReadFile, "import": c.funcImport, "httpInclude": c.funcHTTPInclude, "stripHTML": c.funcStripHTML, "markdown": c.funcMarkdown, "splitFrontMatter": c.funcSplitFrontMatter, "listFiles": c.funcListFiles, "fileStat": c.funcFileStat, "env": c.funcEnv, "placeholder": c.funcPlaceholder, "ph": c.funcPlaceholder, // shortcut "fileExists": c.funcFileExists, "httpError": c.funcHTTPError, "humanize": c.funcHumanize, "maybe": c.funcMaybe, "pathEscape": url.PathEscape, }) return c.tpl } // OriginalReq returns the original, unmodified, un-rewritten request as // it originally came in over the wire. func (c TemplateContext) OriginalReq() http.Request { or, _ := c.Req.Context().Value(caddyhttp.OriginalRequestCtxKey).(http.Request) return or } // funcInclude returns the contents of filename relative to the site root and renders it in place. // Note that included files are NOT escaped, so you should only include // trusted files. If it is not trusted, be sure to use escaping functions // in your template. func (c TemplateContext) funcInclude(filename string, args ...any) (string, error) { bodyBuf := bufPool.Get().(*bytes.Buffer) bodyBuf.Reset() defer bufPool.Put(bodyBuf) err := c.readFileToBuffer(filename, bodyBuf) if err != nil { return "", err } c.Args = args err = c.executeTemplateInBuffer(filename, bodyBuf) if err != nil { return "", err } return bodyBuf.String(), nil } // funcReadFile returns the contents of a filename relative to the site root. // Note that included files are NOT escaped, so you should only include // trusted files. If it is not trusted, be sure to use escaping functions // in your template. func (c TemplateContext) funcReadFile(filename string) (string, error) { bodyBuf := bufPool.Get().(*bytes.Buffer) bodyBuf.Reset() defer bufPool.Put(bodyBuf) err := c.readFileToBuffer(filename, bodyBuf) if err != nil { return "", err } return bodyBuf.String(), nil } // readFileToBuffer reads a file into a buffer func (c TemplateContext) readFileToBuffer(filename string, bodyBuf *bytes.Buffer) error { if c.Root == nil { return fmt.Errorf("root file system not specified") } file, err := c.Root.Open(filename) if err != nil { return err } defer file.Close() _, err = io.Copy(bodyBuf, file) if err != nil { return err } return nil } // funcHTTPInclude returns the body of a virtual (lightweight) request // to the given URI on the same server. Note that included bodies // are NOT escaped, so you should only include trusted resources. // If it is not trusted, be sure to use escaping functions yourself. func (c TemplateContext) funcHTTPInclude(uri string) (string, error) { // prevent virtual request loops by counting how many levels // deep we are; and if we get too deep, return an error recursionCount := 1 if numStr := c.Req.Header.Get(recursionPreventionHeader); numStr != "" { num, err := strconv.Atoi(numStr) if err != nil { return "", fmt.Errorf("parsing %s: %v", recursionPreventionHeader, err) } if num >= 3 { return "", fmt.Errorf("virtual request cycle") } recursionCount = num + 1 } buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) virtReq, err := http.NewRequest("GET", uri, nil) if err != nil { return "", err } virtReq.Host = c.Req.Host virtReq.RemoteAddr = "127.0.0.1:10000" // https://github.com/caddyserver/caddy/issues/5835 virtReq.Header = c.Req.Header.Clone() virtReq.Header.Set("Accept-Encoding", "identity") // https://github.com/caddyserver/caddy/issues/4352 virtReq.Trailer = c.Req.Trailer.Clone() virtReq.Header.Set(recursionPreventionHeader, strconv.Itoa(recursionCount)) vrw := &virtualResponseWriter{body: buf, header: make(http.Header)} server := c.Req.Context().Value(caddyhttp.ServerCtxKey).(http.Handler) server.ServeHTTP(vrw, virtReq) if vrw.status >= 400 { return "", fmt.Errorf("http %d", vrw.status) } err = c.executeTemplateInBuffer(uri, buf) if err != nil { return "", err } return buf.String(), nil } // funcImport parses the filename into the current template stack. The imported // file will be rendered within the current template by calling {{ block }} or // {{ template }} from the standard template library. If the imported file has // no {{ define }} blocks, the name of the import will be the path func (c *TemplateContext) funcImport(filename string) (string, error) { bodyBuf := bufPool.Get().(*bytes.Buffer) bodyBuf.Reset() defer bufPool.Put(bodyBuf) err := c.readFileToBuffer(filename, bodyBuf) if err != nil { return "", err } _, err = c.tpl.Parse(bodyBuf.String()) if err != nil { return "", err } return "", nil } func (c *TemplateContext) executeTemplateInBuffer(tplName string, buf *bytes.Buffer) error { c.NewTemplate(tplName) _, err := c.tpl.Parse(buf.String()) if err != nil { return err } buf.Reset() // reuse buffer for output return c.tpl.Execute(buf, c) } func (c TemplateContext) funcPlaceholder(name string) string { repl := c.Req.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) // For safety, we don't want to allow the file placeholder in // templates because it could be used to read arbitrary files // if the template contents were not trusted. repl = repl.WithoutFile() value, _ := repl.GetString(name) return value } func (TemplateContext) funcEnv(varName string) string { return os.Getenv(varName) } // Cookie gets the value of a cookie with name. func (c TemplateContext) Cookie(name string) string { cookies := c.Req.Cookies() for _, cookie := range cookies { if cookie.Name == name { return cookie.Value } } return "" } // RemoteIP gets the IP address of the connection's remote IP. func (c TemplateContext) RemoteIP() string { ip, _, err := net.SplitHostPort(c.Req.RemoteAddr) if err != nil { return c.Req.RemoteAddr } return ip } // ClientIP gets the IP address of the real client making the request // if the request is trusted (see trusted_proxies), otherwise returns // the connection's remote IP. func (c TemplateContext) ClientIP() string { address := caddyhttp.GetVar(c.Req.Context(), caddyhttp.ClientIPVarKey).(string) clientIP, _, err := net.SplitHostPort(address) if err != nil { clientIP = address // no port } return clientIP } // Host returns the hostname portion of the Host header // from the HTTP request. func (c TemplateContext) Host() (string, error) { host, _, err := net.SplitHostPort(c.Req.Host) if err != nil { if !strings.Contains(c.Req.Host, ":") { // common with sites served on the default port 80 return c.Req.Host, nil } return "", err } return host, nil } // funcStripHTML returns s without HTML tags. It is fairly naive // but works with most valid HTML inputs. func (TemplateContext) funcStripHTML(s string) string { var buf bytes.Buffer var inTag, inQuotes bool var tagStart int for i, ch := range s { if inTag { if ch == '>' && !inQuotes { inTag = false } else if ch == '<' && !inQuotes { // false start buf.WriteString(s[tagStart:i]) tagStart = i } else if ch == '"' { inQuotes = !inQuotes } continue } if ch == '<' { inTag = true tagStart = i continue } buf.WriteRune(ch) } if inTag { // false start buf.WriteString(s[tagStart:]) } return buf.String() } // funcMarkdown renders the markdown body as HTML. The resulting // HTML is NOT escaped so that it can be rendered as HTML. func (TemplateContext) funcMarkdown(input any) (string, error) { inputStr := caddy.ToString(input) md := goldmark.New( goldmark.WithExtensions( extension.GFM, extension.Footnote, highlighting.NewHighlighting( highlighting.WithFormatOptions( chromahtml.WithClasses(true), ), ), ), goldmark.WithParserOptions( parser.WithAutoHeadingID(), ), goldmark.WithRendererOptions( gmhtml.WithUnsafe(), // TODO: this is not awesome, maybe should be configurable? ), ) buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) err := md.Convert([]byte(inputStr), buf) if err != nil { return "", err } return buf.String(), nil } // funcSplitFrontMatter parses front matter out from the beginning of input, // and returns the separated key-value pairs and the body/content. input // must be a "stringy" value. func (TemplateContext) funcSplitFrontMatter(input any) (parsedMarkdownDoc, error) { meta, body, err := extractFrontMatter(caddy.ToString(input)) if err != nil { return parsedMarkdownDoc{}, err } return parsedMarkdownDoc{Meta: meta, Body: body}, nil } // funcListFiles reads and returns a slice of names from the given // directory relative to the root of c. func (c TemplateContext) funcListFiles(name string) ([]string, error) { if c.Root == nil { return nil, fmt.Errorf("root file system not specified") } dir, err := c.Root.Open(path.Clean(name)) if err != nil { return nil, err } defer dir.Close() stat, err := dir.Stat() if err != nil { return nil, err } if !stat.IsDir() { return nil, fmt.Errorf("%v is not a directory", name) } dirInfo, err := dir.Readdir(0) if err != nil { return nil, err } names := make([]string, len(dirInfo)) for i, fileInfo := range dirInfo { names[i] = fileInfo.Name() } return names, nil } // funcFileExists returns true if filename can be opened successfully. func (c TemplateContext) funcFileExists(filename string) (bool, error) { if c.Root == nil { return false, fmt.Errorf("root file system not specified") } file, err := c.Root.Open(filename) if err == nil { file.Close() return true, nil } return false, nil } // funcFileStat returns Stat of a filename func (c TemplateContext) funcFileStat(filename string) (fs.FileInfo, error) { if c.Root == nil { return nil, fmt.Errorf("root file system not specified") } file, err := c.Root.Open(path.Clean(filename)) if err != nil { return nil, err } defer file.Close() return file.Stat() } // funcHTTPError returns a structured HTTP handler error. EXPERIMENTAL; SUBJECT TO CHANGE. // Example usage: `{{if not (fileExists $includeFile)}}{{httpError 404}}{{end}}` func (c TemplateContext) funcHTTPError(statusCode int) (bool, error) { // Delete some headers that may have been set by the underlying // handler (such as file_server) which may break the error response. c.RespHeader.Header.Del("Content-Length") c.RespHeader.Header.Del("Content-Type") c.RespHeader.Header.Del("Etag") c.RespHeader.Header.Del("Last-Modified") c.RespHeader.Header.Del("Accept-Ranges") return false, caddyhttp.Error(statusCode, nil) } // funcHumanize transforms size and time inputs to a human readable format. // // Size inputs are expected to be integers, and are formatted as a // byte size, such as "83 MB". // // Time inputs are parsed using the given layout (default layout is RFC1123Z) // and are formatted as a relative time, such as "2 weeks ago". // See https://pkg.go.dev/time#pkg-constants for time layout docs. func (c TemplateContext) funcHumanize(formatType, data string) (string, error) { // The format type can optionally be followed // by a colon to provide arguments for the format parts := strings.Split(formatType, ":") switch parts[0] { case "size": dataint, dataerr := strconv.ParseUint(data, 10, 64) if dataerr != nil { return "", fmt.Errorf("humanize: size cannot be parsed: %s", dataerr.Error()) } return humanize.Bytes(dataint), nil case "time": timelayout := time.RFC1123Z if len(parts) > 1 { timelayout = parts[1] } dataint, dataerr := time.Parse(timelayout, data) if dataerr != nil { return "", fmt.Errorf("humanize: time cannot be parsed: %s", dataerr.Error()) } return humanize.Time(dataint), nil } return "", fmt.Errorf("no know function was given") } // funcMaybe invokes the plugged-in function named functionName if it is plugged in // (is a module in the 'http.handlers.templates.functions' namespace). If it is not // available, a log message is emitted. // // The first argument is the function name, and the rest of the arguments are // passed on to the actual function. // // This function is useful for executing templates that use components that may be // considered as optional in some cases (like during local development) where you do // not want to require everyone to have a custom Caddy build to be able to execute // your template. // // NOTE: This function is EXPERIMENTAL and subject to change or removal. func (c TemplateContext) funcMaybe(functionName string, args ...any) (any, error) { for _, funcMap := range c.CustomFuncs { if fn, ok := funcMap[functionName]; ok { val := reflect.ValueOf(fn) if val.Kind() != reflect.Func { continue } argVals := make([]reflect.Value, len(args)) for i, arg := range args { argVals[i] = reflect.ValueOf(arg) } returnVals := val.Call(argVals) switch len(returnVals) { case 0: return "", nil case 1: return returnVals[0].Interface(), nil case 2: var err error if !returnVals[1].IsNil() { err = returnVals[1].Interface().(error) } return returnVals[0].Interface(), err default: return nil, fmt.Errorf("maybe %s: invalid number of return values: %d", functionName, len(returnVals)) } } } c.config.logger.Named("maybe").Warn("template function could not be found; ignoring invocation", zap.String("name", functionName)) return "", nil } // WrappedHeader wraps niladic functions so that they // can be used in templates. (Template functions must // return a value.) type WrappedHeader struct{ http.Header } // Add adds a header field value, appending val to // existing values for that field. It returns an // empty string. func (h WrappedHeader) Add(field, val string) string { h.Header.Add(field, val) return "" } // Set sets a header field value, overwriting any // other values for that field. It returns an // empty string. func (h WrappedHeader) Set(field, val string) string { h.Header.Set(field, val) return "" } // Del deletes a header field. It returns an empty string. func (h WrappedHeader) Del(field string) string { h.Header.Del(field) return "" } var bufPool = sync.Pool{ New: func() any { return new(bytes.Buffer) }, } // at time of writing, sprig.FuncMap() makes a copy, thus // involves iterating the whole map, so do it just once var sprigFuncMap = sprig.TxtFuncMap() const recursionPreventionHeader = "Caddy-Templates-Include" ================================================ FILE: modules/caddyhttp/templates/tplcontext_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package templates import ( "bytes" "context" "errors" "fmt" "io/fs" "net/http" "os" "path/filepath" "reflect" "sort" "strings" "testing" "time" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) type handle struct{} func (h *handle) ServeHTTP(w http.ResponseWriter, r *http.Request) { if r.Header.Get("Accept-Encoding") == "identity" { w.Write([]byte("good contents")) } else { w.Write([]byte("bad cause Accept-Encoding: " + r.Header.Get("Accept-Encoding"))) } } func TestHTTPInclude(t *testing.T) { tplContext := getContextOrFail(t) for i, test := range []struct { uri string handler *handle expect string }{ { uri: "https://example.com/foo/bar", handler: &handle{}, expect: "good contents", }, } { ctx := context.WithValue(tplContext.Req.Context(), caddyhttp.ServerCtxKey, test.handler) tplContext.Req = tplContext.Req.WithContext(ctx) tplContext.Req.Header.Add("Accept-Encoding", "gzip") result, err := tplContext.funcHTTPInclude(test.uri) if result != test.expect { t.Errorf("Test %d: expected '%s' but got '%s'", i, test.expect, result) } if err != nil { t.Errorf("Test %d: got error: %v", i, result) } } } func TestMarkdown(t *testing.T) { tplContext := getContextOrFail(t) for i, test := range []struct { body string expect string }{ { body: "- str1\n- str2\n", expect: "
    \n
  • str1
  • \n
  • str2
  • \n
\n", }, } { result, err := tplContext.funcMarkdown(test.body) if result != test.expect { t.Errorf("Test %d: expected '%s' but got '%s'", i, test.expect, result) } if err != nil { t.Errorf("Test %d: got error: %v", i, result) } } } func TestCookie(t *testing.T) { for i, test := range []struct { cookie *http.Cookie cookieName string expect string }{ { // happy path cookie: &http.Cookie{Name: "cookieName", Value: "cookieValue"}, cookieName: "cookieName", expect: "cookieValue", }, { // try to get a non-existing cookie cookie: &http.Cookie{Name: "cookieName", Value: "cookieValue"}, cookieName: "notExisting", expect: "", }, { // partial name match cookie: &http.Cookie{Name: "cookie", Value: "cookieValue"}, cookieName: "cook", expect: "", }, { // cookie with optional fields cookie: &http.Cookie{Name: "cookie", Value: "cookieValue", Path: "/path", Domain: "https://localhost", Expires: time.Now().Add(10 * time.Minute), MaxAge: 120}, cookieName: "cookie", expect: "cookieValue", }, } { tplContext := getContextOrFail(t) tplContext.Req.AddCookie(test.cookie) actual := tplContext.Cookie(test.cookieName) if actual != test.expect { t.Errorf("Test %d: Expected cookie value '%s' but got '%s' for cookie with name '%s'", i, test.expect, actual, test.cookieName) } } } func TestImport(t *testing.T) { for i, test := range []struct { fileContent string fileName string shouldErr bool expect string }{ { // file exists, template is defined fileContent: `{{ define "imported" }}text{{end}}`, fileName: "file1", shouldErr: false, expect: `"imported"`, }, { // file does not exit fileContent: "", fileName: "", shouldErr: true, }, } { tplContext := getContextOrFail(t) var absFilePath string // create files for test case if test.fileName != "" { absFilePath := filepath.Join(fmt.Sprintf("%s", tplContext.Root), test.fileName) if err := os.WriteFile(absFilePath, []byte(test.fileContent), os.ModePerm); err != nil { os.Remove(absFilePath) t.Fatalf("Test %d: Expected no error creating file, got: '%s'", i, err.Error()) } } // perform test tplContext.NewTemplate("parent") actual, err := tplContext.funcImport(test.fileName) templateWasDefined := strings.Contains(tplContext.tpl.DefinedTemplates(), test.expect) if err != nil { if !test.shouldErr { t.Errorf("Test %d: Expected no error, got: '%s'", i, err) } } else if test.shouldErr { t.Errorf("Test %d: Expected error but had none", i) } else if !templateWasDefined && actual != "" { // template should be defined, return value should be an empty string t.Errorf("Test %d: Expected template %s to be define but got %s", i, test.expect, tplContext.tpl.DefinedTemplates()) } if absFilePath != "" { if err := os.Remove(absFilePath); err != nil && !errors.Is(err, fs.ErrNotExist) { t.Fatalf("Test %d: Expected no error removing temporary test file, got: %v", i, err) } } } } func TestNestedInclude(t *testing.T) { for i, test := range []struct { child string childFile string parent string parentFile string shouldErr bool expect string child2 string child2File string }{ { // include in parent child: `{{ include "file1" }}`, childFile: "file0", parent: `{{ $content := "file2" }}{{ $p := include $content}}`, parentFile: "file1", shouldErr: false, expect: ``, child2: `This shouldn't show`, child2File: "file2", }, } { context := getContextOrFail(t) var absFilePath string var absFilePath0 string var absFilePath1 string var buf *bytes.Buffer var err error // create files and for test case if test.parentFile != "" { absFilePath = filepath.Join(fmt.Sprintf("%s", context.Root), test.parentFile) if err := os.WriteFile(absFilePath, []byte(test.parent), os.ModePerm); err != nil { os.Remove(absFilePath) t.Fatalf("Test %d: Expected no error creating file, got: '%s'", i, err.Error()) } } if test.childFile != "" { absFilePath0 = filepath.Join(fmt.Sprintf("%s", context.Root), test.childFile) if err := os.WriteFile(absFilePath0, []byte(test.child), os.ModePerm); err != nil { os.Remove(absFilePath0) t.Fatalf("Test %d: Expected no error creating file, got: '%s'", i, err.Error()) } } if test.child2File != "" { absFilePath1 = filepath.Join(fmt.Sprintf("%s", context.Root), test.child2File) if err := os.WriteFile(absFilePath1, []byte(test.child2), os.ModePerm); err != nil { os.Remove(absFilePath0) t.Fatalf("Test %d: Expected no error creating file, got: '%s'", i, err.Error()) } } buf = bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) buf.WriteString(test.child) err = context.executeTemplateInBuffer(test.childFile, buf) if err != nil { if !test.shouldErr { t.Errorf("Test %d: Expected no error, got: '%s'", i, err) } } else if test.shouldErr { t.Errorf("Test %d: Expected error but had none", i) } else if buf.String() != test.expect { // t.Errorf("Test %d: Expected '%s' but got '%s'", i, test.expect, buf.String()) } if absFilePath != "" { if err := os.Remove(absFilePath); err != nil && !errors.Is(err, fs.ErrNotExist) { t.Fatalf("Test %d: Expected no error removing temporary test file, got: %v", i, err) } } if absFilePath0 != "" { if err := os.Remove(absFilePath0); err != nil && !errors.Is(err, fs.ErrNotExist) { t.Fatalf("Test %d: Expected no error removing temporary test file, got: %v", i, err) } } if absFilePath1 != "" { if err := os.Remove(absFilePath1); err != nil && !errors.Is(err, fs.ErrNotExist) { t.Fatalf("Test %d: Expected no error removing temporary test file, got: %v", i, err) } } } } func TestInclude(t *testing.T) { for i, test := range []struct { fileContent string fileName string shouldErr bool expect string args string }{ { // file exists, content is text only fileContent: "text", fileName: "file1", shouldErr: false, expect: "text", }, { // file exists, content is template fileContent: "{{ if . }}text{{ end }}", fileName: "file1", shouldErr: false, expect: "text", }, { // file does not exit fileContent: "", fileName: "", shouldErr: true, }, { // args fileContent: "{{ index .Args 0 }}", fileName: "file1", shouldErr: false, args: "text", expect: "text", }, { // args, reference arg out of range fileContent: "{{ index .Args 1 }}", fileName: "file1", shouldErr: true, args: "text", }, } { tplContext := getContextOrFail(t) var absFilePath string // create files for test case if test.fileName != "" { absFilePath := filepath.Join(fmt.Sprintf("%s", tplContext.Root), test.fileName) if err := os.WriteFile(absFilePath, []byte(test.fileContent), os.ModePerm); err != nil { os.Remove(absFilePath) t.Fatalf("Test %d: Expected no error creating file, got: '%s'", i, err.Error()) } } // perform test actual, err := tplContext.funcInclude(test.fileName, test.args) if err != nil { if !test.shouldErr { t.Errorf("Test %d: Expected no error, got: '%s'", i, err) } } else if test.shouldErr { t.Errorf("Test %d: Expected error but had none", i) } else if actual != test.expect { t.Errorf("Test %d: Expected %s but got %s", i, test.expect, actual) } if absFilePath != "" { if err := os.Remove(absFilePath); err != nil && !errors.Is(err, fs.ErrNotExist) { t.Fatalf("Test %d: Expected no error removing temporary test file, got: %v", i, err) } } } } func TestCookieMultipleCookies(t *testing.T) { tplContext := getContextOrFail(t) cookieNameBase, cookieValueBase := "cookieName", "cookieValue" for i := 0; i < 10; i++ { tplContext.Req.AddCookie(&http.Cookie{ Name: fmt.Sprintf("%s%d", cookieNameBase, i), Value: fmt.Sprintf("%s%d", cookieValueBase, i), }) } for i := 0; i < 10; i++ { expectedCookieVal := fmt.Sprintf("%s%d", cookieValueBase, i) actualCookieVal := tplContext.Cookie(fmt.Sprintf("%s%d", cookieNameBase, i)) if actualCookieVal != expectedCookieVal { t.Errorf("Expected cookie value %s, found %s", expectedCookieVal, actualCookieVal) } } } func TestIP(t *testing.T) { tplContext := getContextOrFail(t) for i, test := range []struct { inputRemoteAddr string expect string }{ {"1.1.1.1:1111", "1.1.1.1"}, {"1.1.1.1", "1.1.1.1"}, {"[::1]:11", "::1"}, {"[2001:db8:a0b:12f0::1]", "[2001:db8:a0b:12f0::1]"}, {`[fe80:1::3%eth0]:44`, `fe80:1::3%eth0`}, } { tplContext.Req.RemoteAddr = test.inputRemoteAddr if actual := tplContext.RemoteIP(); actual != test.expect { t.Errorf("Test %d: Expected %s but got %s", i, test.expect, actual) } } } func TestStripHTML(t *testing.T) { tplContext := getContextOrFail(t) for i, test := range []struct { input string expect string }{ { // no tags input: `h1`, expect: `h1`, }, { // happy path input: `

h1

`, expect: `h1`, }, { // tag in quotes input: `">h1`, expect: `h1`, }, { // multiple tags input: `

h1

`, expect: `h1`, }, { // tags not closed input: `hi`, expect: ` 0 && !reflect.DeepEqual(test.fileNames, actual) { t.Errorf("Test %d: Expected files %v, got: %v", i, test.fileNames, actual) } } } if dirPath != "" { if err := os.RemoveAll(dirPath); err != nil && !errors.Is(err, fs.ErrNotExist) { t.Fatalf("Test %d: Expected no error removing temporary test directory, got: %v", i, err) } } } } func TestSplitFrontMatter(t *testing.T) { tplContext := getContextOrFail(t) for i, test := range []struct { input string expect string body string }{ { // yaml with windows newline input: "---\r\ntitle: Welcome\r\n---\r\n# Test\\r\\n", expect: `Welcome`, body: "\r\n# Test\\r\\n", }, { // yaml input: `--- title: Welcome --- ### Test`, expect: `Welcome`, body: "\n### Test", }, { // yaml with dots for closer input: `--- title: Welcome ... ### Test`, expect: `Welcome`, body: "\n### Test", }, { // yaml with non-fence '...' line after closing fence (i.e. first matching closing fence should be used) input: `--- title: Welcome --- ### Test ... yeah`, expect: `Welcome`, body: "\n### Test\n...\nyeah", }, { // toml input: `+++ title = "Welcome" +++ ### Test`, expect: `Welcome`, body: "\n### Test", }, { // json input: `{ "title": "Welcome" } ### Test`, expect: `Welcome`, body: "\n### Test", }, } { result, _ := tplContext.funcSplitFrontMatter(test.input) if result.Meta["title"] != test.expect { t.Errorf("Test %d: Expected %s, found %s. Input was SplitFrontMatter(%s)", i, test.expect, result.Meta["title"], test.input) } if result.Body != test.body { t.Errorf("Test %d: Expected body %s, found %s. Input was SplitFrontMatter(%s)", i, test.body, result.Body, test.input) } } } func TestHumanize(t *testing.T) { tplContext := getContextOrFail(t) for i, test := range []struct { format string inputData string expect string errorCase bool verifyErr func(actual_string, substring string) bool }{ { format: "size", inputData: "2048000", expect: "2.0 MB", errorCase: false, verifyErr: strings.Contains, }, { format: "time", inputData: "Fri, 05 May 2022 15:04:05 +0200", expect: "ago", errorCase: false, verifyErr: strings.HasSuffix, }, { format: "time:2006-Jan-02", inputData: "2022-May-05", expect: "ago", errorCase: false, verifyErr: strings.HasSuffix, }, { format: "time", inputData: "Fri, 05 May 2022 15:04:05 GMT+0200", expect: "error:", errorCase: true, verifyErr: strings.HasPrefix, }, } { if actual, err := tplContext.funcHumanize(test.format, test.inputData); !test.verifyErr(actual, test.expect) { if !test.errorCase { t.Errorf("Test %d: Expected '%s' but got '%s'", i, test.expect, actual) if err != nil { t.Errorf("Test %d: error: %s", i, err.Error()) } } } } } func getContextOrFail(t *testing.T) TemplateContext { tplContext, err := initTestContext() t.Cleanup(func() { os.RemoveAll(string(tplContext.Root.(http.Dir))) }) if err != nil { t.Fatalf("failed to prepare test context: %v", err) } return tplContext } func initTestContext() (TemplateContext, error) { body := bytes.NewBufferString("request body") request, err := http.NewRequest("GET", "https://example.com/foo/bar", body) if err != nil { return TemplateContext{}, err } tmpDir, err := os.MkdirTemp(os.TempDir(), "caddy") if err != nil { return TemplateContext{}, err } return TemplateContext{ Root: http.Dir(tmpDir), Req: request, RespHeader: WrappedHeader{make(http.Header)}, }, nil } ================================================ FILE: modules/caddyhttp/tracing/module.go ================================================ package tracing import ( "fmt" "net/http" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(Tracing{}) httpcaddyfile.RegisterHandlerDirective("tracing", parseCaddyfile) } // Tracing implements an HTTP handler that adds support for distributed tracing, // using OpenTelemetry. This module is responsible for the injection and // propagation of the trace context. Configure this module via environment // variables (see https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md). // Some values can be overwritten in the configuration file. type Tracing struct { // SpanName is a span name. It should follow the naming guidelines here: // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#span SpanName string `json:"span"` // SpanAttributes are custom key-value pairs to be added to spans SpanAttributes map[string]string `json:"span_attributes,omitempty"` // otel implements opentelemetry related logic. otel openTelemetryWrapper logger *zap.Logger } // CaddyModule returns the Caddy module information. func (Tracing) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.tracing", New: func() caddy.Module { return new(Tracing) }, } } // Provision implements caddy.Provisioner. func (ot *Tracing) Provision(ctx caddy.Context) error { ot.logger = ctx.Logger() var err error ot.otel, err = newOpenTelemetryWrapper(ctx, ot.SpanName, ot.SpanAttributes) return err } // Cleanup implements caddy.CleanerUpper and closes any idle connections. It // calls Shutdown method for a trace provider https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#shutdown. func (ot *Tracing) Cleanup() error { if err := ot.otel.cleanup(ot.logger); err != nil { return fmt.Errorf("tracerProvider shutdown: %w", err) } return nil } // ServeHTTP implements caddyhttp.MiddlewareHandler. func (ot *Tracing) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { return ot.otel.ServeHTTP(w, r, next) } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. Syntax: // // tracing { // [span ] // [span_attributes { // attr1 value1 // attr2 value2 // }] // } func (ot *Tracing) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { setParameter := func(d *caddyfile.Dispenser, val *string) error { if d.NextArg() { *val = d.Val() } else { return d.ArgErr() } if d.NextArg() { return d.ArgErr() } return nil } // paramsMap is a mapping between "string" parameter from the Caddyfile and its destination within the module paramsMap := map[string]*string{ "span": &ot.SpanName, } d.Next() // consume directive name if d.NextArg() { return d.ArgErr() } for d.NextBlock(0) { switch d.Val() { case "span_attributes": if ot.SpanAttributes == nil { ot.SpanAttributes = make(map[string]string) } for d.NextBlock(1) { key := d.Val() if !d.NextArg() { return d.ArgErr() } value := d.Val() if d.NextArg() { return d.ArgErr() } ot.SpanAttributes[key] = value } default: if dst, ok := paramsMap[d.Val()]; ok { if err := setParameter(d, dst); err != nil { return err } } else { return d.ArgErr() } } } return nil } func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { var m Tracing err := m.UnmarshalCaddyfile(h.Dispenser) return &m, err } // Interface guards var ( _ caddy.Provisioner = (*Tracing)(nil) _ caddyhttp.MiddlewareHandler = (*Tracing)(nil) _ caddyfile.Unmarshaler = (*Tracing)(nil) ) ================================================ FILE: modules/caddyhttp/tracing/module_test.go ================================================ package tracing import ( "context" "encoding/json" "errors" "net/http" "net/http/httptest" "strings" "testing" "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func TestTracing_UnmarshalCaddyfile(t *testing.T) { tests := []struct { name string spanName string spanAttributes map[string]string d *caddyfile.Dispenser wantErr bool }{ { name: "Full config", spanName: "my-span", spanAttributes: map[string]string{ "attr1": "value1", "attr2": "value2", }, d: caddyfile.NewTestDispenser(` tracing { span my-span span_attributes { attr1 value1 attr2 value2 } }`), wantErr: false, }, { name: "Only span name in the config", spanName: "my-span", d: caddyfile.NewTestDispenser(` tracing { span my-span }`), wantErr: false, }, { name: "Empty config", d: caddyfile.NewTestDispenser(` tracing { }`), wantErr: false, }, { name: "Only span attributes", spanAttributes: map[string]string{ "service.name": "my-service", "service.version": "1.0.0", }, d: caddyfile.NewTestDispenser(` tracing { span_attributes { service.name my-service service.version 1.0.0 } }`), wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ot := &Tracing{} if err := ot.UnmarshalCaddyfile(tt.d); (err != nil) != tt.wantErr { t.Errorf("UnmarshalCaddyfile() error = %v, wantErrType %v", err, tt.wantErr) } if ot.SpanName != tt.spanName { t.Errorf("UnmarshalCaddyfile() SpanName = %v, want SpanName %v", ot.SpanName, tt.spanName) } if len(tt.spanAttributes) > 0 { if ot.SpanAttributes == nil { t.Errorf("UnmarshalCaddyfile() SpanAttributes is nil, expected %v", tt.spanAttributes) } else { for key, expectedValue := range tt.spanAttributes { if actualValue, exists := ot.SpanAttributes[key]; !exists { t.Errorf("UnmarshalCaddyfile() SpanAttributes missing key %v", key) } else if actualValue != expectedValue { t.Errorf("UnmarshalCaddyfile() SpanAttributes[%v] = %v, want %v", key, actualValue, expectedValue) } } } } }) } } func TestTracing_UnmarshalCaddyfile_Error(t *testing.T) { tests := []struct { name string d *caddyfile.Dispenser wantErr bool }{ { name: "Unknown parameter", d: caddyfile.NewTestDispenser(` tracing { foo bar }`), wantErr: true, }, { name: "Missed argument", d: caddyfile.NewTestDispenser(` tracing { span }`), wantErr: true, }, { name: "Span attributes missing value", d: caddyfile.NewTestDispenser(` tracing { span_attributes { key } }`), wantErr: true, }, { name: "Span attributes too many arguments", d: caddyfile.NewTestDispenser(` tracing { span_attributes { key value extra } }`), wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ot := &Tracing{} if err := ot.UnmarshalCaddyfile(tt.d); (err != nil) != tt.wantErr { t.Errorf("UnmarshalCaddyfile() error = %v, wantErrType %v", err, tt.wantErr) } }) } } func TestTracing_ServeHTTP_Propagation_Without_Initial_Headers(t *testing.T) { ot := &Tracing{ SpanName: "mySpan", } req := createRequestWithContext("GET", "https://example.com/foo") w := httptest.NewRecorder() var handler caddyhttp.HandlerFunc = func(writer http.ResponseWriter, request *http.Request) error { traceparent := request.Header.Get("Traceparent") if traceparent == "" || strings.HasPrefix(traceparent, "00-00000000000000000000000000000000-0000000000000000") { t.Errorf("Invalid traceparent: %v", traceparent) } return nil } ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() if err := ot.Provision(ctx); err != nil { t.Errorf("Provision error: %v", err) t.FailNow() } if err := ot.ServeHTTP(w, req, handler); err != nil { t.Errorf("ServeHTTP error: %v", err) } } func TestTracing_ServeHTTP_Propagation_With_Initial_Headers(t *testing.T) { ot := &Tracing{ SpanName: "mySpan", } req := createRequestWithContext("GET", "https://example.com/foo") req.Header.Set("traceparent", "00-11111111111111111111111111111111-1111111111111111-01") w := httptest.NewRecorder() var handler caddyhttp.HandlerFunc = func(writer http.ResponseWriter, request *http.Request) error { traceparent := request.Header.Get("Traceparent") if !strings.HasPrefix(traceparent, "00-11111111111111111111111111111111") { t.Errorf("Invalid traceparent: %v", traceparent) } return nil } ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() if err := ot.Provision(ctx); err != nil { t.Errorf("Provision error: %v", err) t.FailNow() } if err := ot.ServeHTTP(w, req, handler); err != nil { t.Errorf("ServeHTTP error: %v", err) } } func TestTracing_ServeHTTP_Next_Error(t *testing.T) { ot := &Tracing{ SpanName: "mySpan", } req := createRequestWithContext("GET", "https://example.com/foo") w := httptest.NewRecorder() expectErr := errors.New("test error") var handler caddyhttp.HandlerFunc = func(writer http.ResponseWriter, request *http.Request) error { return expectErr } ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() if err := ot.Provision(ctx); err != nil { t.Errorf("Provision error: %v", err) t.FailNow() } if err := ot.ServeHTTP(w, req, handler); err == nil || !errors.Is(err, expectErr) { t.Errorf("expected error, got: %v", err) } } func TestTracing_JSON_Configuration(t *testing.T) { // Test that our struct correctly marshals to and from JSON original := &Tracing{ SpanName: "test-span", SpanAttributes: map[string]string{ "service.name": "test-service", "service.version": "1.0.0", "env": "test", }, } jsonData, err := json.Marshal(original) if err != nil { t.Fatalf("Failed to marshal to JSON: %v", err) } var unmarshaled Tracing if err := json.Unmarshal(jsonData, &unmarshaled); err != nil { t.Fatalf("Failed to unmarshal from JSON: %v", err) } if unmarshaled.SpanName != original.SpanName { t.Errorf("Expected SpanName %s, got %s", original.SpanName, unmarshaled.SpanName) } if len(unmarshaled.SpanAttributes) != len(original.SpanAttributes) { t.Errorf("Expected %d span attributes, got %d", len(original.SpanAttributes), len(unmarshaled.SpanAttributes)) } for key, expectedValue := range original.SpanAttributes { if actualValue, exists := unmarshaled.SpanAttributes[key]; !exists { t.Errorf("Expected span attribute %s to exist", key) } else if actualValue != expectedValue { t.Errorf("Expected span attribute %s = %s, got %s", key, expectedValue, actualValue) } } t.Logf("JSON representation: %s", string(jsonData)) } func TestTracing_OpenTelemetry_Span_Attributes(t *testing.T) { // Create an in-memory span recorder to capture actual span data spanRecorder := tracetest.NewSpanRecorder() provider := trace.NewTracerProvider( trace.WithSpanProcessor(spanRecorder), ) // Create our tracing module with span attributes that include placeholders ot := &Tracing{ SpanName: "test-span", SpanAttributes: map[string]string{ "static": "test-service", "request-placeholder": "{http.request.method}", "response-placeholder": "{http.response.header.X-Some-Header}", "mixed": "prefix-{http.request.method}-{http.response.header.X-Some-Header}", }, } // Create a specific request to test against req, _ := http.NewRequest("POST", "https://api.example.com/v1/users?id=123", nil) req.Host = "api.example.com" w := httptest.NewRecorder() // Set up the replacer repl := caddy.NewReplacer() ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) ctx = context.WithValue(ctx, caddyhttp.VarsCtxKey, make(map[string]any)) req = req.WithContext(ctx) // Set up request placeholders repl.Set("http.request.method", req.Method) repl.Set("http.request.uri", req.URL.RequestURI()) // Handler to generate the response var handler caddyhttp.HandlerFunc = func(writer http.ResponseWriter, request *http.Request) error { writer.Header().Set("X-Some-Header", "some-value") writer.WriteHeader(200) // Make response headers available to replacer repl.Set("http.response.header.X-Some-Header", writer.Header().Get("X-Some-Header")) return nil } // Set up Caddy context caddyCtx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() // Override the global tracer provider with our test provider // This is a bit hacky but necessary to capture the actual spans originalProvider := globalTracerProvider globalTracerProvider = &tracerProvider{ tracerProvider: provider, tracerProvidersCounter: 1, // Simulate one user } defer func() { globalTracerProvider = originalProvider }() // Provision the tracing module if err := ot.Provision(caddyCtx); err != nil { t.Errorf("Provision error: %v", err) t.FailNow() } // Execute the request if err := ot.ServeHTTP(w, req, handler); err != nil { t.Errorf("ServeHTTP error: %v", err) } // Get the recorded spans spans := spanRecorder.Ended() if len(spans) == 0 { t.Fatal("Expected at least one span to be recorded") } // Find our span (should be the one with our test span name) var testSpan trace.ReadOnlySpan for _, span := range spans { if span.Name() == "test-span" { testSpan = span break } } if testSpan == nil { t.Fatal("Could not find test span in recorded spans") } // Verify that the span attributes were set correctly with placeholder replacement expectedAttributes := map[string]string{ "static": "test-service", "request-placeholder": "POST", "response-placeholder": "some-value", "mixed": "prefix-POST-some-value", } actualAttributes := make(map[string]string) for _, attr := range testSpan.Attributes() { actualAttributes[string(attr.Key)] = attr.Value.AsString() } for key, expectedValue := range expectedAttributes { if actualValue, exists := actualAttributes[key]; !exists { t.Errorf("Expected span attribute %s to be set", key) } else if actualValue != expectedValue { t.Errorf("Expected span attribute %s = %s, got %s", key, expectedValue, actualValue) } } t.Logf("Recorded span attributes: %+v", actualAttributes) } func createRequestWithContext(method string, url string) *http.Request { r, _ := http.NewRequest(method, url, nil) repl := caddy.NewReplacer() ctx := context.WithValue(r.Context(), caddy.ReplacerCtxKey, repl) r = r.WithContext(ctx) return r } ================================================ FILE: modules/caddyhttp/tracing/tracer.go ================================================ package tracing import ( "context" "fmt" "net/http" "go.opentelemetry.io/contrib/exporters/autoexport" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/contrib/propagators/autoprop" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) const ( webEngineName = "Caddy" defaultSpanName = "handler" nextCallCtxKey caddy.CtxKey = "nextCall" ) // nextCall store the next handler, and the error value return on calling it (if any) type nextCall struct { next caddyhttp.Handler err error } // openTelemetryWrapper is responsible for the tracing injection, extraction and propagation. type openTelemetryWrapper struct { propagators propagation.TextMapPropagator handler http.Handler spanName string spanAttributes map[string]string } // newOpenTelemetryWrapper is responsible for the openTelemetryWrapper initialization using provided configuration. func newOpenTelemetryWrapper( ctx context.Context, spanName string, spanAttributes map[string]string, ) (openTelemetryWrapper, error) { if spanName == "" { spanName = defaultSpanName } ot := openTelemetryWrapper{ spanName: spanName, spanAttributes: spanAttributes, } version, _ := caddy.Version() res, err := ot.newResource(webEngineName, version) if err != nil { return ot, fmt.Errorf("creating resource error: %w", err) } traceExporter, err := autoexport.NewSpanExporter(ctx) if err != nil { return ot, fmt.Errorf("creating trace exporter error: %w", err) } ot.propagators = autoprop.NewTextMapPropagator() tracerProvider := globalTracerProvider.getTracerProvider( sdktrace.WithBatcher(traceExporter), sdktrace.WithResource(res), ) ot.handler = otelhttp.NewHandler(http.HandlerFunc(ot.serveHTTP), ot.spanName, otelhttp.WithTracerProvider(tracerProvider), otelhttp.WithPropagators(ot.propagators), otelhttp.WithSpanNameFormatter(ot.spanNameFormatter), ) return ot, nil } // serveHTTP injects a tracing context and call the next handler. func (ot *openTelemetryWrapper) serveHTTP(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ot.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) spanCtx := trace.SpanContextFromContext(ctx) if spanCtx.IsValid() { traceID := spanCtx.TraceID().String() spanID := spanCtx.SpanID().String() // Add a trace_id placeholder, accessible via `{http.vars.trace_id}`. caddyhttp.SetVar(ctx, "trace_id", traceID) // Add a span_id placeholder, accessible via `{http.vars.span_id}`. caddyhttp.SetVar(ctx, "span_id", spanID) // Add the traceID and spanID to the log fields for the request. if extra, ok := ctx.Value(caddyhttp.ExtraLogFieldsCtxKey).(*caddyhttp.ExtraLogFields); ok { extra.Add(zap.String("traceID", traceID)) extra.Add(zap.String("spanID", spanID)) } } next := ctx.Value(nextCallCtxKey).(*nextCall) next.err = next.next.ServeHTTP(w, r) // Add custom span attributes to the current span span := trace.SpanFromContext(ctx) if span.IsRecording() && len(ot.spanAttributes) > 0 { replacer := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer) attributes := make([]attribute.KeyValue, 0, len(ot.spanAttributes)) for key, value := range ot.spanAttributes { // Allow placeholder replacement in attribute values replacedValue := replacer.ReplaceAll(value, "") attributes = append(attributes, attribute.String(key, replacedValue)) } span.SetAttributes(attributes...) } } // ServeHTTP propagates call to the by wrapped by `otelhttp` next handler. func (ot *openTelemetryWrapper) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { n := &nextCall{ next: next, err: nil, } ot.handler.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), nextCallCtxKey, n))) return n.err } // cleanup flush all remaining data and shutdown a tracerProvider func (ot *openTelemetryWrapper) cleanup(logger *zap.Logger) error { return globalTracerProvider.cleanupTracerProvider(logger) } // newResource creates a resource that describe current handler instance and merge it with a default attributes value. func (ot *openTelemetryWrapper) newResource( webEngineName, webEngineVersion string, ) (*resource.Resource, error) { return resource.Merge(resource.Default(), resource.NewSchemaless( semconv.WebEngineName(webEngineName), semconv.WebEngineVersion(webEngineVersion), )) } // spanNameFormatter performs the replacement of placeholders in the span name func (ot *openTelemetryWrapper) spanNameFormatter(operation string, r *http.Request) string { return r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer).ReplaceAll(operation, "") } ================================================ FILE: modules/caddyhttp/tracing/tracer_test.go ================================================ package tracing import ( "context" "testing" "github.com/caddyserver/caddy/v2" ) func TestOpenTelemetryWrapper_newOpenTelemetryWrapper(t *testing.T) { ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() var otw openTelemetryWrapper var err error if otw, err = newOpenTelemetryWrapper(ctx, "", nil, ); err != nil { t.Errorf("newOpenTelemetryWrapper() error = %v", err) t.FailNow() } if otw.propagators == nil { t.Errorf("Propagators should not be empty") } } ================================================ FILE: modules/caddyhttp/tracing/tracerprovider.go ================================================ package tracing import ( "context" "fmt" "sync" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) // globalTracerProvider stores global tracer provider and is responsible for graceful shutdown when nobody is using it. var globalTracerProvider = &tracerProvider{} type tracerProvider struct { mu sync.Mutex tracerProvider *sdktrace.TracerProvider tracerProvidersCounter int } // getTracerProvider create or return an existing global TracerProvider func (t *tracerProvider) getTracerProvider(opts ...sdktrace.TracerProviderOption) *sdktrace.TracerProvider { t.mu.Lock() defer t.mu.Unlock() t.tracerProvidersCounter++ if t.tracerProvider == nil { t.tracerProvider = sdktrace.NewTracerProvider( opts..., ) } return t.tracerProvider } // cleanupTracerProvider gracefully shutdown a TracerProvider func (t *tracerProvider) cleanupTracerProvider(logger *zap.Logger) error { t.mu.Lock() defer t.mu.Unlock() if t.tracerProvidersCounter > 0 { t.tracerProvidersCounter-- } if t.tracerProvidersCounter == 0 { if t.tracerProvider != nil { // tracerProvider.ForceFlush SHOULD be invoked according to https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#forceflush if err := t.tracerProvider.ForceFlush(context.Background()); err != nil { if c := logger.Check(zapcore.ErrorLevel, "forcing flush"); c != nil { c.Write(zap.Error(err)) } } // tracerProvider.Shutdown MUST be invoked according to https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#shutdown if err := t.tracerProvider.Shutdown(context.Background()); err != nil { return fmt.Errorf("tracerProvider shutdown error: %w", err) } } t.tracerProvider = nil } return nil } ================================================ FILE: modules/caddyhttp/tracing/tracerprovider_test.go ================================================ package tracing import ( "testing" "go.uber.org/zap" ) func Test_tracersProvider_getTracerProvider(t *testing.T) { tp := tracerProvider{} tp.getTracerProvider() tp.getTracerProvider() if tp.tracerProvider == nil { t.Errorf("There should be tracer provider") } if tp.tracerProvidersCounter != 2 { t.Errorf("Tracer providers counter should equal to 2") } } func Test_tracersProvider_cleanupTracerProvider(t *testing.T) { tp := tracerProvider{} tp.getTracerProvider() tp.getTracerProvider() err := tp.cleanupTracerProvider(zap.NewNop()) if err != nil { t.Errorf("There should be no error: %v", err) } if tp.tracerProvider == nil { t.Errorf("There should be tracer provider") } if tp.tracerProvidersCounter != 1 { t.Errorf("Tracer providers counter should equal to 1") } } ================================================ FILE: modules/caddyhttp/vars.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddyhttp import ( "context" "fmt" "net/http" "reflect" "strings" "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types/ref" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) var stringSliceType = reflect.TypeFor[[]string]() func init() { caddy.RegisterModule(VarsMiddleware{}) caddy.RegisterModule(VarsMatcher{}) caddy.RegisterModule(MatchVarsRE{}) } // VarsMiddleware is an HTTP middleware which sets variables to // have values that can be used in the HTTP request handler // chain. The primary way to access variables is with placeholders, // which have the form: `{http.vars.variable_name}`, or with // the `vars` and `vars_regexp` request matchers. // // The key is the variable name, and the value is the value of the // variable. Both the name and value may use or contain placeholders. type VarsMiddleware map[string]any // CaddyModule returns the Caddy module information. func (VarsMiddleware) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.vars", New: func() caddy.Module { return new(VarsMiddleware) }, } } func (m VarsMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error { vars := r.Context().Value(VarsCtxKey).(map[string]any) repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) for k, v := range m { keyExpanded := repl.ReplaceAll(k, "") if valStr, ok := v.(string); ok { v = repl.ReplaceAll(valStr, "") } vars[keyExpanded] = v // Special case: the user ID is in the replacer, pulled from there // for access logs. Allow users to override it with the vars handler. if keyExpanded == "http.auth.user.id" { repl.Set(keyExpanded, v) } } return next.ServeHTTP(w, r) } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. Syntax: // // vars [ ] { // // ... // } func (m *VarsMiddleware) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume directive name if *m == nil { *m = make(VarsMiddleware) } nextVar := func(headerLine bool) error { if headerLine { // header line is optional if !d.NextArg() { return nil } } varName := d.Val() if !d.NextArg() { return d.ArgErr() } varValue := d.ScalarVal() (*m)[varName] = varValue if d.NextArg() { return d.ArgErr() } return nil } if err := nextVar(true); err != nil { return err } for d.NextBlock(0) { if err := nextVar(false); err != nil { return err } } return nil } // VarsMatcher is an HTTP request matcher which can match // requests based on variables in the context or placeholder // values. The key is the placeholder or name of the variable, // and the values are possible values the variable can be in // order to match (logical OR'ed). // // If the key is surrounded by `{ }` it is assumed to be a // placeholder. Otherwise, it will be considered a variable // name. // // Placeholders in the keys are not expanded, but // placeholders in the values are. type VarsMatcher map[string][]string // CaddyModule returns the Caddy module information. func (VarsMatcher) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.vars", New: func() caddy.Module { return new(VarsMatcher) }, } } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (m *VarsMatcher) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { if *m == nil { *m = make(map[string][]string) } // iterate to merge multiple matchers into one for d.Next() { var field string if !d.Args(&field) { return d.Errf("malformed vars matcher: expected field name") } vals := d.RemainingArgs() if len(vals) == 0 { return d.Errf("malformed vars matcher: expected at least one value to match against") } (*m)[field] = append((*m)[field], vals...) if d.NextBlock(0) { return d.Err("malformed vars matcher: blocks are not supported") } } return nil } // Match matches a request based on variables in the context, // or placeholders if the key is not a variable. func (m VarsMatcher) Match(r *http.Request) bool { match, _ := m.MatchWithError(r) return match } // MatchWithError returns true if r matches m. func (m VarsMatcher) MatchWithError(r *http.Request) (bool, error) { if len(m) == 0 { return true, nil } vars := r.Context().Value(VarsCtxKey).(map[string]any) repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) var fromPlaceholder bool var matcherValExpanded, valExpanded, varStr, v string var varValue any for key, vals := range m { if strings.HasPrefix(key, "{") && strings.HasSuffix(key, "}") && strings.Count(key, "{") == 1 { varValue, _ = repl.Get(strings.Trim(key, "{}")) fromPlaceholder = true } else { varValue = vars[key] fromPlaceholder = false } switch vv := varValue.(type) { case string: varStr = vv case fmt.Stringer: varStr = vv.String() case error: varStr = vv.Error() case nil: varStr = "" default: varStr = fmt.Sprintf("%v", vv) } // Only expand placeholders in values from literal variable names // (e.g. map outputs). Values resolved from placeholder keys are // already final and must not be re-expanded, as that would allow // user input like {env.SECRET} to be evaluated. valExpanded = varStr if !fromPlaceholder { valExpanded = repl.ReplaceAll(varStr, "") } // see if any of the values given in the matcher match the actual value for _, v = range vals { matcherValExpanded = repl.ReplaceAll(v, "") if valExpanded == matcherValExpanded { return true, nil } } } return false, nil } // CELLibrary produces options that expose this matcher for use in CEL // expression matchers. // // Example: // // expression vars({'{magic_number}': ['3', '5']}) // expression vars({'{foo}': 'single_value'}) func (VarsMatcher) CELLibrary(_ caddy.Context) (cel.Library, error) { return CELMatcherImpl( "vars", "vars_matcher_request_map", []*cel.Type{CELTypeJSON}, func(data ref.Val) (RequestMatcherWithError, error) { mapStrListStr, err := CELValueToMapStrList(data) if err != nil { return nil, err } return VarsMatcher(mapStrListStr), nil }, ) } // MatchVarsRE matches the value of the context variables by a given regular expression. // // Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}` // where `name` is the regular expression's name, and `capture_group` is either // the named or positional capture group from the expression itself. If no name // is given, then the placeholder omits the name: `{http.regexp.capture_group}` // (potentially leading to collisions). type MatchVarsRE map[string]*MatchRegexp // CaddyModule returns the Caddy module information. func (MatchVarsRE) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.matchers.vars_regexp", New: func() caddy.Module { return new(MatchVarsRE) }, } } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (m *MatchVarsRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { if *m == nil { *m = make(map[string]*MatchRegexp) } // iterate to merge multiple matchers into one for d.Next() { var first, second, third string if !d.Args(&first, &second) { return d.ArgErr() } var name, field, val string if d.Args(&third) { name = first field = second val = third } else { field = first val = second } // Default to the named matcher's name, if no regexp name is provided if name == "" { name = d.GetContextString(caddyfile.MatcherNameCtxKey) } (*m)[field] = &MatchRegexp{Pattern: val, Name: name} if d.NextBlock(0) { return d.Err("malformed vars_regexp matcher: blocks are not supported") } } return nil } // Provision compiles m's regular expressions. func (m MatchVarsRE) Provision(ctx caddy.Context) error { for _, rm := range m { err := rm.Provision(ctx) if err != nil { return err } } return nil } // Match returns true if r matches m. func (m MatchVarsRE) Match(r *http.Request) bool { match, _ := m.MatchWithError(r) return match } // MatchWithError returns true if r matches m. func (m MatchVarsRE) MatchWithError(r *http.Request) (bool, error) { vars := r.Context().Value(VarsCtxKey).(map[string]any) repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) var fromPlaceholder, match bool var valExpanded, varStr string var varValue any for key, val := range m { if strings.HasPrefix(key, "{") && strings.HasSuffix(key, "}") && strings.Count(key, "{") == 1 { varValue, _ = repl.Get(strings.Trim(key, "{}")) fromPlaceholder = true } else { varValue = vars[key] fromPlaceholder = false } switch vv := varValue.(type) { case string: varStr = vv case fmt.Stringer: varStr = vv.String() case error: varStr = vv.Error() case nil: varStr = "" default: varStr = fmt.Sprintf("%v", vv) } // Only expand placeholders in values from literal variable names // (e.g. map outputs). Values resolved from placeholder keys are // already final and must not be re-expanded, as that would allow // user input like {env.SECRET} to be evaluated. valExpanded = varStr if !fromPlaceholder { valExpanded = repl.ReplaceAll(varStr, "") } if match = val.Match(valExpanded, repl); match { return match, nil } } return false, nil } // CELLibrary produces options that expose this matcher for use in CEL // expression matchers. // // Example: // // expression vars_regexp('foo', '{magic_number}', '[0-9]+') // expression vars_regexp('{magic_number}', '[0-9]+') func (MatchVarsRE) CELLibrary(ctx caddy.Context) (cel.Library, error) { unnamedPattern, err := CELMatcherImpl( "vars_regexp", "vars_regexp_request_string_string", []*cel.Type{cel.StringType, cel.StringType}, func(data ref.Val) (RequestMatcherWithError, error) { refStringList := stringSliceType params, err := data.ConvertToNative(refStringList) if err != nil { return nil, err } strParams := params.([]string) matcher := MatchVarsRE{} matcher[strParams[0]] = &MatchRegexp{ Pattern: strParams[1], Name: ctx.Value(MatcherNameCtxKey).(string), } err = matcher.Provision(ctx) return matcher, err }, ) if err != nil { return nil, err } namedPattern, err := CELMatcherImpl( "vars_regexp", "vars_regexp_request_string_string_string", []*cel.Type{cel.StringType, cel.StringType, cel.StringType}, func(data ref.Val) (RequestMatcherWithError, error) { refStringList := stringSliceType params, err := data.ConvertToNative(refStringList) if err != nil { return nil, err } strParams := params.([]string) name := strParams[0] if name == "" { name = ctx.Value(MatcherNameCtxKey).(string) } matcher := MatchVarsRE{} matcher[strParams[1]] = &MatchRegexp{ Pattern: strParams[2], Name: name, } err = matcher.Provision(ctx) return matcher, err }, ) if err != nil { return nil, err } envOpts := append(unnamedPattern.CompileOptions(), namedPattern.CompileOptions()...) prgOpts := append(unnamedPattern.ProgramOptions(), namedPattern.ProgramOptions()...) return NewMatcherCELLibrary(envOpts, prgOpts), nil } // Validate validates m's regular expressions. func (m MatchVarsRE) Validate() error { for _, rm := range m { err := rm.Validate() if err != nil { return err } } return nil } // GetVar gets a value out of the context's variable table by key. // If the key does not exist, the return value will be nil. func GetVar(ctx context.Context, key string) any { varMap, ok := ctx.Value(VarsCtxKey).(map[string]any) if !ok { return nil } return varMap[key] } // SetVar sets a value in the context's variable table with // the given key. It overwrites any previous value with the // same key. // // If the value is nil (note: non-nil interface with nil // underlying value does not count) and the key exists in // the table, the key+value will be deleted from the table. func SetVar(ctx context.Context, key string, value any) { varMap, ok := ctx.Value(VarsCtxKey).(map[string]any) if !ok { return } if value == nil { if _, ok := varMap[key]; ok { delete(varMap, key) return } } varMap[key] = value } // Interface guards var ( _ MiddlewareHandler = (*VarsMiddleware)(nil) _ caddyfile.Unmarshaler = (*VarsMiddleware)(nil) _ RequestMatcherWithError = (*VarsMatcher)(nil) _ caddyfile.Unmarshaler = (*VarsMatcher)(nil) _ RequestMatcherWithError = (*MatchVarsRE)(nil) _ caddyfile.Unmarshaler = (*MatchVarsRE)(nil) ) ================================================ FILE: modules/caddypki/acmeserver/acmeserver.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package acmeserver import ( "context" "fmt" weakrand "math/rand/v2" "net" "net/http" "os" "path/filepath" "regexp" "strings" "time" "github.com/go-chi/chi/v5" "github.com/smallstep/certificates/acme" "github.com/smallstep/certificates/acme/api" acmeNoSQL "github.com/smallstep/certificates/acme/db/nosql" "github.com/smallstep/certificates/authority" "github.com/smallstep/certificates/authority/provisioner" "github.com/smallstep/certificates/db" "github.com/smallstep/nosql" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" "github.com/caddyserver/caddy/v2/modules/caddypki" "github.com/caddyserver/caddy/v2/modules/caddytls" ) func init() { caddy.RegisterModule(Handler{}) } // Handler is an ACME server handler. type Handler struct { // The ID of the CA to use for signing. This refers to // the ID given to the CA in the `pki` app. If omitted, // the default ID is "local". CA string `json:"ca,omitempty"` // The lifetime for issued certificates Lifetime caddy.Duration `json:"lifetime,omitempty"` // The hostname or IP address by which ACME clients // will access the server. This is used to populate // the ACME directory endpoint. If not set, the Host // header of the request will be used. // COMPATIBILITY NOTE / TODO: This property may go away in the // future. Do not rely on this property long-term; check release notes. Host string `json:"host,omitempty"` // The path prefix under which to serve all ACME // endpoints. All other requests will not be served // by this handler and will be passed through to // the next one. Default: "/acme/". // COMPATIBILITY NOTE / TODO: This property may go away in the // future, as it is currently only required due to // limitations in the underlying library. Do not rely // on this property long-term; check release notes. PathPrefix string `json:"path_prefix,omitempty"` // If true, the CA's root will be the issuer instead of // the intermediate. This is NOT recommended and should // only be used when devices/clients do not properly // validate certificate chains. EXPERIMENTAL: Might be // changed or removed in the future. SignWithRoot bool `json:"sign_with_root,omitempty"` // The addresses of DNS resolvers to use when looking up // the TXT records for solving DNS challenges. // It accepts [network addresses](/docs/conventions#network-addresses) // with port range of only 1. If the host is an IP address, // it will be dialed directly to resolve the upstream server. // If the host is not an IP address, the addresses are resolved // using the [name resolution convention](https://golang.org/pkg/net/#hdr-Name_Resolution) // of the Go standard library. If the array contains more // than 1 resolver address, one is chosen at random. Resolvers []string `json:"resolvers,omitempty"` // Specify the set of enabled ACME challenges. An empty or absent value // means all challenges are enabled. Accepted values are: // "http-01", "dns-01", "tls-alpn-01" Challenges ACMEChallenges `json:"challenges,omitempty" ` // The policy to use for issuing certificates Policy *Policy `json:"policy,omitempty"` logger *zap.Logger resolvers []caddy.NetworkAddress ctx caddy.Context acmeDB acme.DB acmeAuth *authority.Authority acmeClient acme.Client acmeLinker acme.Linker acmeEndpoints http.Handler } // CaddyModule returns the Caddy module information. func (Handler) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.acme_server", New: func() caddy.Module { return new(Handler) }, } } // Provision sets up the ACME server handler. func (ash *Handler) Provision(ctx caddy.Context) error { ash.ctx = ctx ash.logger = ctx.Logger() // set some defaults if ash.CA == "" { ash.CA = caddypki.DefaultCAID } if ash.PathPrefix == "" { ash.PathPrefix = defaultPathPrefix } if ash.Lifetime == 0 { ash.Lifetime = caddy.Duration(12 * time.Hour) } if len(ash.Challenges) > 0 { if err := ash.Challenges.validate(); err != nil { return err } } ash.warnIfPolicyAllowsAll() // get a reference to the configured CA appModule, err := ctx.App("pki") if err != nil { return err } pkiApp := appModule.(*caddypki.PKI) ca, err := pkiApp.GetCA(ctx, ash.CA) if err != nil { return err } // make sure leaf cert lifetime is less than the intermediate cert lifetime. this check only // applies for caddy-managed intermediate certificates if ca.Intermediate == nil && ash.Lifetime >= ca.IntermediateLifetime { return fmt.Errorf("certificate lifetime (%s) should be less than intermediate certificate lifetime (%s)", time.Duration(ash.Lifetime), time.Duration(ca.IntermediateLifetime)) } database, err := ash.openDatabase() if err != nil { return err } authorityConfig := caddypki.AuthorityConfig{ SignWithRoot: ash.SignWithRoot, AuthConfig: &authority.AuthConfig{ Provisioners: provisioner.List{ &provisioner.ACME{ Name: ash.CA, Challenges: ash.Challenges.toSmallstepType(), Options: &provisioner.Options{ X509: ash.Policy.normalizeRules(), }, Type: provisioner.TypeACME.String(), Claims: &provisioner.Claims{ MinTLSDur: &provisioner.Duration{Duration: 5 * time.Minute}, MaxTLSDur: &provisioner.Duration{Duration: 24 * time.Hour * 365}, DefaultTLSDur: &provisioner.Duration{Duration: time.Duration(ash.Lifetime)}, }, }, }, }, DB: database, } ash.acmeAuth, err = ca.NewAuthority(authorityConfig) if err != nil { return err } ash.acmeDB, err = acmeNoSQL.New(ash.acmeAuth.GetDatabase().(nosql.DB)) if err != nil { return fmt.Errorf("configuring ACME DB: %v", err) } ash.acmeClient, err = ash.makeClient() if err != nil { return err } ash.acmeLinker = acme.NewLinker( ash.Host, strings.Trim(ash.PathPrefix, "/"), ) // extract its http.Handler so we can use it directly r := chi.NewRouter() r.Route(ash.PathPrefix, func(r chi.Router) { api.Route(r) }) ash.acmeEndpoints = r return nil } func (ash *Handler) warnIfPolicyAllowsAll() { allow := ash.Policy.normalizeAllowRules() deny := ash.Policy.normalizeDenyRules() if allow != nil || deny != nil { return } allowWildcardNames := ash.Policy != nil && ash.Policy.AllowWildcardNames ash.logger.Warn( "acme_server policy has no allow/deny rules; order identifiers are unrestricted (allow-all)", zap.String("ca", ash.CA), zap.Bool("allow_wildcard_names", allowWildcardNames), ) } func (ash Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { if strings.HasPrefix(r.URL.Path, ash.PathPrefix) { acmeCtx := acme.NewContext( r.Context(), ash.acmeDB, ash.acmeClient, ash.acmeLinker, nil, ) acmeCtx = authority.NewContext(acmeCtx, ash.acmeAuth) r = r.WithContext(acmeCtx) ash.acmeEndpoints.ServeHTTP(w, r) return nil } return next.ServeHTTP(w, r) } func (ash Handler) getDatabaseKey() string { key := ash.CA key = strings.ToLower(key) key = strings.TrimSpace(key) return keyCleaner.ReplaceAllLiteralString(key, "") } // Cleanup implements caddy.CleanerUpper and closes any idle databases. func (ash Handler) Cleanup() error { key := ash.getDatabaseKey() deleted, err := databasePool.Delete(key) if deleted { if c := ash.logger.Check(zapcore.DebugLevel, "unloading unused CA database"); c != nil { c.Write(zap.String("db_key", key)) } } if err != nil { if c := ash.logger.Check(zapcore.ErrorLevel, "closing CA database"); c != nil { c.Write(zap.String("db_key", key), zap.Error(err)) } } return err } func (ash Handler) openDatabase() (*db.AuthDB, error) { key := ash.getDatabaseKey() database, loaded, err := databasePool.LoadOrNew(key, func() (caddy.Destructor, error) { dbFolder := filepath.Join(caddy.AppDataDir(), "acme_server", key) dbPath := filepath.Join(dbFolder, "db") err := os.MkdirAll(dbFolder, 0o755) if err != nil { return nil, fmt.Errorf("making folder for CA database: %v", err) } dbConfig := &db.Config{ Type: "bbolt", DataSource: dbPath, } database, err := db.New(dbConfig) return databaseCloser{&database}, err }) if loaded { if c := ash.logger.Check(zapcore.DebugLevel, "loaded preexisting CA database"); c != nil { c.Write(zap.String("db_key", key)) } } return database.(databaseCloser).DB, err } // makeClient creates an ACME client which will use a custom // resolver instead of net.DefaultResolver. func (ash Handler) makeClient() (acme.Client, error) { // If no local resolvers are configured, check for global resolvers from TLS app resolversToUse := ash.Resolvers if len(resolversToUse) == 0 { tlsAppIface, err := ash.ctx.App("tls") if err == nil { tlsApp := tlsAppIface.(*caddytls.TLS) if len(tlsApp.Resolvers) > 0 { resolversToUse = tlsApp.Resolvers } } } for _, v := range resolversToUse { addr, err := caddy.ParseNetworkAddressWithDefaults(v, "udp", 53) if err != nil { return nil, err } if addr.PortRangeSize() != 1 { return nil, fmt.Errorf("resolver address must have exactly one address; cannot call %v", addr) } ash.resolvers = append(ash.resolvers, addr) } var resolver *net.Resolver if len(ash.resolvers) != 0 { dialer := &net.Dialer{ Timeout: 2 * time.Second, } resolver = &net.Resolver{ PreferGo: true, Dial: func(ctx context.Context, network, address string) (net.Conn, error) { //nolint:gosec addr := ash.resolvers[weakrand.IntN(len(ash.resolvers))] return dialer.DialContext(ctx, addr.Network, addr.JoinHostPort(0)) }, } } else { resolver = net.DefaultResolver } return resolverClient{ Client: acme.NewClient(), resolver: resolver, ctx: ash.ctx, }, nil } type resolverClient struct { acme.Client resolver *net.Resolver ctx context.Context } func (c resolverClient) LookupTxt(name string) ([]string, error) { return c.resolver.LookupTXT(c.ctx, name) } const defaultPathPrefix = "/acme/" var ( keyCleaner = regexp.MustCompile(`[^\w.-_]`) databasePool = caddy.NewUsagePool() ) type databaseCloser struct { DB *db.AuthDB } func (closer databaseCloser) Destruct() error { return (*closer.DB).Shutdown() } // Interface guards var ( _ caddyhttp.MiddlewareHandler = (*Handler)(nil) _ caddy.Provisioner = (*Handler)(nil) ) ================================================ FILE: modules/caddypki/acmeserver/acmeserver_test.go ================================================ package acmeserver import ( "strings" "testing" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" ) func TestHandler_warnIfPolicyAllowsAll(t *testing.T) { tests := []struct { name string policy *Policy wantWarns int wantAllowWildcard bool }{ { name: "warns when policy is nil", policy: nil, wantWarns: 1, wantAllowWildcard: false, }, { name: "warns when allow/deny rules are empty", policy: &Policy{}, wantWarns: 1, wantAllowWildcard: false, }, { name: "warns when only allow_wildcard_names is true", policy: &Policy{ AllowWildcardNames: true, }, wantWarns: 1, wantAllowWildcard: true, }, { name: "does not warn when allow rules are configured", policy: &Policy{ Allow: &RuleSet{ Domains: []string{"example.com"}, }, }, wantWarns: 0, wantAllowWildcard: false, }, { name: "does not warn when deny rules are configured", policy: &Policy{ Deny: &RuleSet{ Domains: []string{"bad.example.com"}, }, }, wantWarns: 0, wantAllowWildcard: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { core, logs := observer.New(zap.WarnLevel) ash := &Handler{ CA: "local", Policy: tt.policy, logger: zap.New(core), } ash.warnIfPolicyAllowsAll() if logs.Len() != tt.wantWarns { t.Fatalf("expected %d warning logs, got %d", tt.wantWarns, logs.Len()) } if tt.wantWarns == 0 { return } entry := logs.All()[0] if entry.Level != zap.WarnLevel { t.Fatalf("expected warn level, got %v", entry.Level) } if !strings.Contains(entry.Message, "policy has no allow/deny rules") { t.Fatalf("unexpected log message: %q", entry.Message) } ctx := entry.ContextMap() if ctx["ca"] != "local" { t.Fatalf("expected ca=local, got %v", ctx["ca"]) } if ctx["allow_wildcard_names"] != tt.wantAllowWildcard { t.Fatalf("expected allow_wildcard_names=%v, got %v", tt.wantAllowWildcard, ctx["allow_wildcard_names"]) } }) } } ================================================ FILE: modules/caddypki/acmeserver/caddyfile.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package acmeserver import ( "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddypki" ) func init() { httpcaddyfile.RegisterDirective("acme_server", parseACMEServer) } // parseACMEServer sets up an ACME server handler from Caddyfile tokens. // // acme_server [] { // ca // lifetime // resolvers // challenges // allow_wildcard_names // allow { // domains // ip_ranges // } // deny { // domains // ip_ranges // } // sign_with_root // } func parseACMEServer(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) { h.Next() // consume directive name matcherSet, err := h.ExtractMatcherSet() if err != nil { return nil, err } h.Next() // consume the directive name again (matcher parsing resets) // no inline args allowed if h.NextArg() { return nil, h.ArgErr() } var acmeServer Handler var ca *caddypki.CA for h.NextBlock(0) { switch h.Val() { case "ca": if !h.AllArgs(&acmeServer.CA) { return nil, h.ArgErr() } if ca == nil { ca = new(caddypki.CA) } ca.ID = acmeServer.CA case "lifetime": if !h.NextArg() { return nil, h.ArgErr() } dur, err := caddy.ParseDuration(h.Val()) if err != nil { return nil, err } acmeServer.Lifetime = caddy.Duration(dur) case "resolvers": acmeServer.Resolvers = h.RemainingArgs() if len(acmeServer.Resolvers) == 0 { return nil, h.Errf("must specify at least one resolver address") } case "challenges": acmeServer.Challenges = append(acmeServer.Challenges, stringToChallenges(h.RemainingArgs())...) case "allow_wildcard_names": if acmeServer.Policy == nil { acmeServer.Policy = &Policy{} } acmeServer.Policy.AllowWildcardNames = true case "allow": r := &RuleSet{} for nesting := h.Nesting(); h.NextBlock(nesting); { if h.CountRemainingArgs() == 0 { return nil, h.ArgErr() // TODO: } switch h.Val() { case "domains": r.Domains = append(r.Domains, h.RemainingArgs()...) case "ip_ranges": r.IPRanges = append(r.IPRanges, h.RemainingArgs()...) default: return nil, h.Errf("unrecognized 'allow' subdirective: %s", h.Val()) } } if acmeServer.Policy == nil { acmeServer.Policy = &Policy{} } acmeServer.Policy.Allow = r case "deny": r := &RuleSet{} for nesting := h.Nesting(); h.NextBlock(nesting); { if h.CountRemainingArgs() == 0 { return nil, h.ArgErr() // TODO: } switch h.Val() { case "domains": r.Domains = append(r.Domains, h.RemainingArgs()...) case "ip_ranges": r.IPRanges = append(r.IPRanges, h.RemainingArgs()...) default: return nil, h.Errf("unrecognized 'deny' subdirective: %s", h.Val()) } } if acmeServer.Policy == nil { acmeServer.Policy = &Policy{} } acmeServer.Policy.Deny = r case "sign_with_root": if h.NextArg() { return nil, h.ArgErr() } acmeServer.SignWithRoot = true default: return nil, h.Errf("unrecognized ACME server directive: %s", h.Val()) } } configVals := h.NewRoute(matcherSet, acmeServer) if ca == nil { return configVals, nil } return append(configVals, httpcaddyfile.ConfigValue{ Class: "pki.ca", Value: ca, }), nil } ================================================ FILE: modules/caddypki/acmeserver/challenges.go ================================================ package acmeserver import ( "encoding/json" "fmt" "strings" "github.com/smallstep/certificates/authority/provisioner" ) // ACMEChallenge is an opaque string that represents supported ACME challenges. type ACMEChallenge string const ( HTTP_01 ACMEChallenge = "http-01" DNS_01 ACMEChallenge = "dns-01" TLS_ALPN_01 ACMEChallenge = "tls-alpn-01" ) // validate checks if the given challenge is supported. func (c ACMEChallenge) validate() error { switch c { case HTTP_01, DNS_01, TLS_ALPN_01: return nil default: return fmt.Errorf("acme challenge %q is not supported", c) } } // The unmarshaller first marshals the value into a string. Then it // trims any space around it and lowercase it for normaliztion. The // method does not and should not validate the value within accepted enums. func (c *ACMEChallenge) UnmarshalJSON(b []byte) error { var s string if err := json.Unmarshal(b, &s); err != nil { return err } *c = ACMEChallenge(strings.ToLower(strings.TrimSpace(s))) return nil } // String returns a string representation of the challenge. func (c ACMEChallenge) String() string { return strings.ToLower(string(c)) } // ACMEChallenges is a list of ACME challenges. type ACMEChallenges []ACMEChallenge // validate checks if the given challenges are supported. func (c ACMEChallenges) validate() error { for _, ch := range c { if err := ch.validate(); err != nil { return err } } return nil } func (c ACMEChallenges) toSmallstepType() []provisioner.ACMEChallenge { if len(c) == 0 { return nil } ac := make([]provisioner.ACMEChallenge, len(c)) for i, ch := range c { ac[i] = provisioner.ACMEChallenge(ch) } return ac } func stringToChallenges(chs []string) ACMEChallenges { challenges := make(ACMEChallenges, len(chs)) for i, ch := range chs { challenges[i] = ACMEChallenge(ch) } return challenges } ================================================ FILE: modules/caddypki/acmeserver/policy.go ================================================ package acmeserver import ( "github.com/smallstep/certificates/authority/policy" "github.com/smallstep/certificates/authority/provisioner" ) // Policy defines the criteria for the ACME server // of when to issue a certificate. Refer to the // [Certificate Issuance Policy](https://smallstep.com/docs/step-ca/policies/) // on Smallstep website for the evaluation criteria. type Policy struct { // If a rule set is configured to allow a certain type of name, // all other types of names are automatically denied. Allow *RuleSet `json:"allow,omitempty"` // If a rule set is configured to deny a certain type of name, // all other types of names are still allowed. Deny *RuleSet `json:"deny,omitempty"` // If set to true, the ACME server will allow issuing wildcard certificates. AllowWildcardNames bool `json:"allow_wildcard_names,omitempty"` } // RuleSet is the specific set of SAN criteria for a certificate // to be issued or denied. type RuleSet struct { // Domains is a list of DNS domains that are allowed to be issued. // It can be in the form of FQDN for specific domain name, or // a wildcard domain name format, e.g. *.example.com, to allow // sub-domains of a domain. Domains []string `json:"domains,omitempty"` // IP ranges in the form of CIDR notation or specific IP addresses // to be approved or denied for certificates. Non-CIDR IP addresses // are matched exactly. IPRanges []string `json:"ip_ranges,omitempty"` } // normalizeAllowRules returns `nil` if policy is nil, the `Allow` rule is `nil`, // or all rules within the `Allow` rule are empty. Otherwise, it returns the X509NameOptions // with the content of the `Allow` rule. func (p *Policy) normalizeAllowRules() *policy.X509NameOptions { if (p == nil) || (p.Allow == nil) || (len(p.Allow.Domains) == 0 && len(p.Allow.IPRanges) == 0) { return nil } return &policy.X509NameOptions{ DNSDomains: p.Allow.Domains, IPRanges: p.Allow.IPRanges, } } // normalizeDenyRules returns `nil` if policy is nil, the `Deny` rule is `nil`, // or all rules within the `Deny` rule are empty. Otherwise, it returns the X509NameOptions // with the content of the `Deny` rule. func (p *Policy) normalizeDenyRules() *policy.X509NameOptions { if (p == nil) || (p.Deny == nil) || (len(p.Deny.Domains) == 0 && len(p.Deny.IPRanges) == 0) { return nil } return &policy.X509NameOptions{ DNSDomains: p.Deny.Domains, IPRanges: p.Deny.IPRanges, } } // normalizeRules returns `nil` if policy is nil, the `Allow` and `Deny` rules are `nil`, func (p *Policy) normalizeRules() *provisioner.X509Options { if p == nil { return nil } allow := p.normalizeAllowRules() deny := p.normalizeDenyRules() if allow == nil && deny == nil && !p.AllowWildcardNames { return nil } return &provisioner.X509Options{ AllowedNames: allow, DeniedNames: deny, AllowWildcardNames: p.AllowWildcardNames, } } ================================================ FILE: modules/caddypki/acmeserver/policy_test.go ================================================ package acmeserver import ( "reflect" "testing" "github.com/smallstep/certificates/authority/policy" "github.com/smallstep/certificates/authority/provisioner" ) func TestPolicyNormalizeAllowRules(t *testing.T) { type fields struct { Allow *RuleSet Deny *RuleSet AllowWildcardNames bool } tests := []struct { name string fields fields want *policy.X509NameOptions }{ { name: "providing no rules results in 'nil'", fields: fields{}, want: nil, }, { name: "providing 'nil' Allow rules results in 'nil', regardless of Deny rules", fields: fields{ Allow: nil, Deny: &RuleSet{}, AllowWildcardNames: true, }, want: nil, }, { name: "providing empty Allow rules results in 'nil', regardless of Deny rules", fields: fields{ Allow: &RuleSet{ Domains: []string{}, IPRanges: []string{}, }, }, want: nil, }, { name: "rules configured in Allow are returned in X509NameOptions", fields: fields{ Allow: &RuleSet{ Domains: []string{"example.com"}, IPRanges: []string{"127.0.0.1/32"}, }, }, want: &policy.X509NameOptions{ DNSDomains: []string{"example.com"}, IPRanges: []string{"127.0.0.1/32"}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := &Policy{ Allow: tt.fields.Allow, Deny: tt.fields.Deny, AllowWildcardNames: tt.fields.AllowWildcardNames, } if got := p.normalizeAllowRules(); !reflect.DeepEqual(got, tt.want) { t.Errorf("Policy.normalizeAllowRules() = %v, want %v", got, tt.want) } }) } } func TestPolicy_normalizeDenyRules(t *testing.T) { type fields struct { Allow *RuleSet Deny *RuleSet AllowWildcardNames bool } tests := []struct { name string fields fields want *policy.X509NameOptions }{ { name: "providing no rules results in 'nil'", fields: fields{}, want: nil, }, { name: "providing 'nil' Deny rules results in 'nil', regardless of Allow rules", fields: fields{ Deny: nil, Allow: &RuleSet{}, AllowWildcardNames: true, }, want: nil, }, { name: "providing empty Deny rules results in 'nil', regardless of Allow rules", fields: fields{ Deny: &RuleSet{ Domains: []string{}, IPRanges: []string{}, }, }, want: nil, }, { name: "rules configured in Deny are returned in X509NameOptions", fields: fields{ Deny: &RuleSet{ Domains: []string{"example.com"}, IPRanges: []string{"127.0.0.1/32"}, }, }, want: &policy.X509NameOptions{ DNSDomains: []string{"example.com"}, IPRanges: []string{"127.0.0.1/32"}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := &Policy{ Allow: tt.fields.Allow, Deny: tt.fields.Deny, AllowWildcardNames: tt.fields.AllowWildcardNames, } if got := p.normalizeDenyRules(); !reflect.DeepEqual(got, tt.want) { t.Errorf("Policy.normalizeDenyRules() = %v, want %v", got, tt.want) } }) } } func TestPolicy_normalizeRules(t *testing.T) { tests := []struct { name string policy *Policy want *provisioner.X509Options }{ { name: "'nil' policy results in 'nil' options", policy: nil, want: nil, }, { name: "'nil' Allow/Deny rules and disallowing wildcard names result in 'nil' X509Options", policy: &Policy{ Allow: nil, Deny: nil, AllowWildcardNames: false, }, want: nil, }, { name: "'nil' Allow/Deny rules and allowing wildcard names result in 'nil' Allow/Deny rules in X509Options but allowing wildcard names in X509Options", policy: &Policy{ Allow: nil, Deny: nil, AllowWildcardNames: true, }, want: &provisioner.X509Options{ AllowWildcardNames: true, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := tt.policy.normalizeRules(); !reflect.DeepEqual(got, tt.want) { t.Errorf("Policy.normalizeRules() = %v, want %v", got, tt.want) } }) } } ================================================ FILE: modules/caddypki/adminapi.go ================================================ // Copyright 2020 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddypki import ( "encoding/json" "fmt" "net/http" "strings" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(adminAPI{}) } // adminAPI is a module that serves PKI endpoints to retrieve // information about the CAs being managed by Caddy. type adminAPI struct { ctx caddy.Context log *zap.Logger pkiApp *PKI } // CaddyModule returns the Caddy module information. func (adminAPI) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "admin.api.pki", New: func() caddy.Module { return new(adminAPI) }, } } // Provision sets up the adminAPI module. func (a *adminAPI) Provision(ctx caddy.Context) error { a.ctx = ctx a.log = ctx.Logger(a) // TODO: passing in 'a' is a hack until the admin API is officially extensible (see #5032) // Avoid initializing PKI if it wasn't configured. // We intentionally ignore the error since it's not // fatal if the PKI app is not explicitly configured. pkiApp, err := ctx.AppIfConfigured("pki") if err == nil { a.pkiApp = pkiApp.(*PKI) } return nil } // Routes returns the admin routes for the PKI app. func (a *adminAPI) Routes() []caddy.AdminRoute { return []caddy.AdminRoute{ { Pattern: adminPKIEndpointBase, Handler: caddy.AdminHandlerFunc(a.handleAPIEndpoints), }, } } // handleAPIEndpoints routes API requests within adminPKIEndpointBase. func (a *adminAPI) handleAPIEndpoints(w http.ResponseWriter, r *http.Request) error { uri := strings.TrimPrefix(r.URL.Path, "/pki/") parts := strings.Split(uri, "/") switch { case len(parts) == 2 && parts[0] == "ca" && parts[1] != "": return a.handleCAInfo(w, r) case len(parts) == 3 && parts[0] == "ca" && parts[1] != "" && parts[2] == "certificates": return a.handleCACerts(w, r) } return caddy.APIError{ HTTPStatus: http.StatusNotFound, Err: fmt.Errorf("resource not found: %v", r.URL.Path), } } // handleCAInfo returns information about a particular // CA by its ID. If the CA ID is the default, then the CA will be // provisioned if it has not already been. Other CA IDs will return an // error if they have not been previously provisioned. func (a *adminAPI) handleCAInfo(w http.ResponseWriter, r *http.Request) error { if r.Method != http.MethodGet { return caddy.APIError{ HTTPStatus: http.StatusMethodNotAllowed, Err: fmt.Errorf("method not allowed: %v", r.Method), } } ca, err := a.getCAFromAPIRequestPath(r) if err != nil { return err } rootCert, interCert, err := rootAndIntermediatePEM(ca) if err != nil { return caddy.APIError{ HTTPStatus: http.StatusInternalServerError, Err: fmt.Errorf("failed to get root and intermediate cert for CA %s: %v", ca.ID, err), } } repl := ca.newReplacer() response := caInfo{ ID: ca.ID, Name: ca.Name, RootCN: repl.ReplaceAll(ca.RootCommonName, ""), IntermediateCN: repl.ReplaceAll(ca.IntermediateCommonName, ""), RootCert: string(rootCert), IntermediateCert: string(interCert), } encoded, err := json.Marshal(response) if err != nil { return caddy.APIError{ HTTPStatus: http.StatusInternalServerError, Err: err, } } w.Header().Set("Content-Type", "application/json") _, _ = w.Write(encoded) return nil } // handleCACerts returns the certificate chain for a particular // CA by its ID. If the CA ID is the default, then the CA will be // provisioned if it has not already been. Other CA IDs will return an // error if they have not been previously provisioned. func (a *adminAPI) handleCACerts(w http.ResponseWriter, r *http.Request) error { if r.Method != http.MethodGet { return caddy.APIError{ HTTPStatus: http.StatusMethodNotAllowed, Err: fmt.Errorf("method not allowed: %v", r.Method), } } ca, err := a.getCAFromAPIRequestPath(r) if err != nil { return err } rootCert, interCert, err := rootAndIntermediatePEM(ca) if err != nil { return caddy.APIError{ HTTPStatus: http.StatusInternalServerError, Err: fmt.Errorf("failed to get root and intermediate cert for CA %s: %v", ca.ID, err), } } w.Header().Set("Content-Type", "application/pem-certificate-chain") _, err = w.Write(interCert) //nolint:gosec // false positive... no XSS in a PEM for cryin' out loud if err == nil { _, _ = w.Write(rootCert) //nolint:gosec // false positive... no XSS in a PEM for cryin' out loud } return nil } func (a *adminAPI) getCAFromAPIRequestPath(r *http.Request) (*CA, error) { // Grab the CA ID from the request path, it should be the 4th segment (/pki/ca/) id := strings.Split(r.URL.Path, "/")[3] if id == "" { return nil, caddy.APIError{ HTTPStatus: http.StatusBadRequest, Err: fmt.Errorf("missing CA in path"), } } // Find the CA by ID, if PKI is configured var ca *CA var ok bool if a.pkiApp != nil { ca, ok = a.pkiApp.CAs[id] } // If we didn't find the CA, and PKI is not configured // then we'll either error out if the CA ID is not the // default. If the CA ID is the default, then we'll // provision it, because the user probably aims to // change their config to enable PKI immediately after // if they actually requested the local CA ID. if !ok { if id != DefaultCAID { return nil, caddy.APIError{ HTTPStatus: http.StatusNotFound, Err: fmt.Errorf("no certificate authority configured with id: %s", id), } } // Provision the default CA, which generates and stores a root // certificate in storage, if one doesn't already exist. ca = new(CA) err := ca.Provision(a.ctx, id, a.log) if err != nil { return nil, caddy.APIError{ HTTPStatus: http.StatusInternalServerError, Err: fmt.Errorf("failed to provision CA %s, %w", id, err), } } } return ca, nil } func rootAndIntermediatePEM(ca *CA) (root, inter []byte, err error) { root, err = pemEncodeCert(ca.RootCertificate().Raw) if err != nil { return root, inter, err } for _, interCert := range ca.IntermediateCertificateChain() { pemBytes, err := pemEncodeCert(interCert.Raw) if err != nil { return nil, nil, err } inter = append(inter, pemBytes...) } return } // caInfo is the response structure for the CA info API endpoint. type caInfo struct { ID string `json:"id"` Name string `json:"name"` RootCN string `json:"root_common_name"` IntermediateCN string `json:"intermediate_common_name"` RootCert string `json:"root_certificate"` IntermediateCert string `json:"intermediate_certificate"` } // adminPKIEndpointBase is the base admin endpoint under which all PKI admin endpoints exist. const adminPKIEndpointBase = "/pki/" // Interface guards var ( _ caddy.AdminRouter = (*adminAPI)(nil) _ caddy.Provisioner = (*adminAPI)(nil) ) ================================================ FILE: modules/caddypki/ca.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddypki import ( "crypto" "crypto/x509" "encoding/json" "errors" "fmt" "io/fs" "path" "sync" "time" "github.com/caddyserver/certmagic" "github.com/smallstep/certificates/authority" "github.com/smallstep/certificates/db" "github.com/smallstep/truststore" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" ) // CA describes a certificate authority, which consists of // root/signing certificates and various settings pertaining // to the issuance of certificates and trusting them. type CA struct { // The user-facing name of the certificate authority. Name string `json:"name,omitempty"` // The name to put in the CommonName field of the // root certificate. RootCommonName string `json:"root_common_name,omitempty"` // The name to put in the CommonName field of the // intermediate certificates. IntermediateCommonName string `json:"intermediate_common_name,omitempty"` // The lifetime for the intermediate certificates IntermediateLifetime caddy.Duration `json:"intermediate_lifetime,omitempty"` // Whether Caddy will attempt to install the CA's root // into the system trust store, as well as into Java // and Mozilla Firefox trust stores. Default: true. InstallTrust *bool `json:"install_trust,omitempty"` // The root certificate to use; if null, one will be generated. Root *KeyPair `json:"root,omitempty"` // The intermediate (signing) certificate; if null, one will be generated. Intermediate *KeyPair `json:"intermediate,omitempty"` // How often to check if intermediate (and root, when applicable) certificates need renewal. // Default: 10m. MaintenanceInterval caddy.Duration `json:"maintenance_interval,omitempty"` // The fraction of certificate lifetime (0.0–1.0) after which renewal is attempted. // For example, 0.2 means renew when 20% of the lifetime remains (e.g. ~73 days for a 1-year cert). // Default: 0.2. RenewalWindowRatio float64 `json:"renewal_window_ratio,omitempty"` // Optionally configure a separate storage module associated with this // issuer, instead of using Caddy's global/default-configured storage. // This can be useful if you want to keep your signing keys in a // separate location from your leaf certificates. StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"` // The unique config-facing ID of the certificate authority. // Since the ID is set in JSON config via object key, this // field is exported only for purposes of config generation // and module provisioning. ID string `json:"-"` storage certmagic.Storage root *x509.Certificate interChain []*x509.Certificate interKey crypto.Signer mu *sync.RWMutex rootCertPath string // mainly used for logging purposes if trusting log *zap.Logger ctx caddy.Context } // Provision sets up the CA. func (ca *CA) Provision(ctx caddy.Context, id string, log *zap.Logger) error { ca.mu = new(sync.RWMutex) ca.log = log.Named("ca." + id) ca.ctx = ctx if id == "" { return fmt.Errorf("CA ID is required (use 'local' for the default CA)") } ca.mu.Lock() ca.ID = id ca.mu.Unlock() if ca.StorageRaw != nil { val, err := ctx.LoadModule(ca, "StorageRaw") if err != nil { return fmt.Errorf("loading storage module: %v", err) } cmStorage, err := val.(caddy.StorageConverter).CertMagicStorage() if err != nil { return fmt.Errorf("creating storage configuration: %v", err) } ca.storage = cmStorage } if ca.storage == nil { ca.storage = ctx.Storage() } if ca.Name == "" { ca.Name = defaultCAName } if ca.RootCommonName == "" { ca.RootCommonName = defaultRootCommonName } if ca.IntermediateCommonName == "" { ca.IntermediateCommonName = defaultIntermediateCommonName } if ca.IntermediateLifetime == 0 { ca.IntermediateLifetime = caddy.Duration(defaultIntermediateLifetime) } if ca.MaintenanceInterval == 0 { ca.MaintenanceInterval = caddy.Duration(defaultMaintenanceInterval) } if ca.RenewalWindowRatio <= 0 || ca.RenewalWindowRatio > 1 { ca.RenewalWindowRatio = defaultRenewalWindowRatio } // load the certs and key that will be used for signing var rootCert *x509.Certificate var rootCertChain, interCertChain []*x509.Certificate var rootKey, interKey crypto.Signer var err error if ca.Root != nil { if ca.Root.Format == "" || ca.Root.Format == "pem_file" { ca.rootCertPath = ca.Root.Certificate } rootCertChain, rootKey, err = ca.Root.Load() rootCert = rootCertChain[0] } else { ca.rootCertPath = "storage:" + ca.storageKeyRootCert() rootCert, rootKey, err = ca.loadOrGenRoot() } if err != nil { return err } if ca.Intermediate != nil { interCertChain, interKey, err = ca.Intermediate.Load() } else { actualRootLifetime := time.Until(rootCert.NotAfter) if time.Duration(ca.IntermediateLifetime) >= actualRootLifetime { return fmt.Errorf("intermediate certificate lifetime must be less than actual root certificate lifetime (%s)", actualRootLifetime) } interCertChain, interKey, err = ca.loadOrGenIntermediate(rootCert, rootKey) } if err != nil { return err } ca.mu.Lock() ca.root, ca.interChain, ca.interKey = rootCert, interCertChain, interKey ca.mu.Unlock() return nil } // RootCertificate returns the CA's root certificate (public key). func (ca CA) RootCertificate() *x509.Certificate { ca.mu.RLock() defer ca.mu.RUnlock() return ca.root } // RootKey returns the CA's root private key. Since the root key is // not cached in memory long-term, it needs to be loaded from storage, // which could yield an error. func (ca CA) RootKey() (crypto.Signer, error) { _, rootKey, err := ca.loadOrGenRoot() return rootKey, err } // IntermediateCertificateChain returns the CA's intermediate // certificate chain. func (ca CA) IntermediateCertificateChain() []*x509.Certificate { ca.mu.RLock() defer ca.mu.RUnlock() return ca.interChain } // IntermediateKey returns the CA's intermediate private key. func (ca CA) IntermediateKey() crypto.Signer { ca.mu.RLock() defer ca.mu.RUnlock() return ca.interKey } // NewAuthority returns a new Smallstep-powered signing authority for this CA. // Note that we receive *CA (a pointer) in this method to ensure the closure within it, which // executes at a later time, always has the only copy of the CA so it can access the latest, // renewed certificates since NewAuthority was called. See #4517 and #4669. func (ca *CA) NewAuthority(authorityConfig AuthorityConfig) (*authority.Authority, error) { // get the root certificate and the issuer cert+key rootCert := ca.RootCertificate() // set up the signer; cert/key which signs the leaf certs var signerOption authority.Option if authorityConfig.SignWithRoot { // if we're signing with root, we can just pass the // cert/key directly, since it's unlikely to expire // while Caddy is running (long lifetime) var issuerCert *x509.Certificate var issuerKey crypto.Signer issuerCert = rootCert var err error issuerKey, err = ca.RootKey() if err != nil { return nil, fmt.Errorf("loading signing key: %v", err) } signerOption = authority.WithX509Signer(issuerCert, issuerKey) } else { // if we're signing with intermediate, we need to make // sure it's always fresh, because the intermediate may // renew while Caddy is running (medium lifetime) signerOption = authority.WithX509SignerFunc(func() ([]*x509.Certificate, crypto.Signer, error) { issuerChain := ca.IntermediateCertificateChain() issuerCert := issuerChain[0] issuerKey := ca.IntermediateKey() ca.log.Debug("using intermediate signer", zap.String("serial", issuerCert.SerialNumber.String()), zap.String("not_before", issuerCert.NotBefore.String()), zap.String("not_after", issuerCert.NotAfter.String())) return issuerChain, issuerKey, nil }) } opts := []authority.Option{ authority.WithConfig(&authority.Config{ AuthorityConfig: authorityConfig.AuthConfig, }), signerOption, authority.WithX509RootCerts(rootCert), } // Add a database if we have one if authorityConfig.DB != nil { opts = append(opts, authority.WithDatabase(*authorityConfig.DB)) } auth, err := authority.NewEmbedded(opts...) if err != nil { return nil, fmt.Errorf("initializing certificate authority: %v", err) } return auth, nil } func (ca CA) loadOrGenRoot() (rootCert *x509.Certificate, rootKey crypto.Signer, err error) { if ca.Root != nil { rootChain, rootSigner, err := ca.Root.Load() if err != nil { return nil, nil, err } return rootChain[0], rootSigner, nil } rootCertPEM, err := ca.storage.Load(ca.ctx, ca.storageKeyRootCert()) if err != nil { if !errors.Is(err, fs.ErrNotExist) { return nil, nil, fmt.Errorf("loading root cert: %v", err) } // TODO: should we require that all or none of the assets are required before overwriting anything? rootCert, rootKey, err = ca.genRoot() if err != nil { return nil, nil, fmt.Errorf("generating root: %v", err) } } if rootCert == nil { rootCert, err = pemDecodeCertificate(rootCertPEM) if err != nil { return nil, nil, fmt.Errorf("parsing root certificate PEM: %v", err) } } if rootKey == nil { rootKeyPEM, err := ca.storage.Load(ca.ctx, ca.storageKeyRootKey()) if err != nil { return nil, nil, fmt.Errorf("loading root key: %v", err) } rootKey, err = certmagic.PEMDecodePrivateKey(rootKeyPEM) if err != nil { return nil, nil, fmt.Errorf("decoding root key: %v", err) } } return rootCert, rootKey, nil } func (ca CA) genRoot() (rootCert *x509.Certificate, rootKey crypto.Signer, err error) { repl := ca.newReplacer() rootCert, rootKey, err = generateRoot(repl.ReplaceAll(ca.RootCommonName, "")) if err != nil { return nil, nil, fmt.Errorf("generating CA root: %v", err) } rootCertPEM, err := pemEncodeCert(rootCert.Raw) if err != nil { return nil, nil, fmt.Errorf("encoding root certificate: %v", err) } err = ca.storage.Store(ca.ctx, ca.storageKeyRootCert(), rootCertPEM) if err != nil { return nil, nil, fmt.Errorf("saving root certificate: %v", err) } rootKeyPEM, err := certmagic.PEMEncodePrivateKey(rootKey) if err != nil { return nil, nil, fmt.Errorf("encoding root key: %v", err) } err = ca.storage.Store(ca.ctx, ca.storageKeyRootKey(), rootKeyPEM) if err != nil { return nil, nil, fmt.Errorf("saving root key: %v", err) } return rootCert, rootKey, nil } func (ca CA) loadOrGenIntermediate(rootCert *x509.Certificate, rootKey crypto.Signer) (interCertChain []*x509.Certificate, interKey crypto.Signer, err error) { var interCert *x509.Certificate interCertPEM, err := ca.storage.Load(ca.ctx, ca.storageKeyIntermediateCert()) if err != nil { if !errors.Is(err, fs.ErrNotExist) { return nil, nil, fmt.Errorf("loading intermediate cert: %v", err) } // TODO: should we require that all or none of the assets are required before overwriting anything? interCert, interKey, err = ca.genIntermediate(rootCert, rootKey) if err != nil { return nil, nil, fmt.Errorf("generating new intermediate cert: %v", err) } interCertChain = append(interCertChain, interCert) } if len(interCertChain) == 0 { interCertChain, err = pemDecodeCertificateChain(interCertPEM) if err != nil { return nil, nil, fmt.Errorf("decoding intermediate certificate PEM: %v", err) } } if interKey == nil { interKeyPEM, err := ca.storage.Load(ca.ctx, ca.storageKeyIntermediateKey()) if err != nil { return nil, nil, fmt.Errorf("loading intermediate key: %v", err) } interKey, err = certmagic.PEMDecodePrivateKey(interKeyPEM) if err != nil { return nil, nil, fmt.Errorf("decoding intermediate key: %v", err) } } return interCertChain, interKey, nil } func (ca CA) genIntermediate(rootCert *x509.Certificate, rootKey crypto.Signer) (interCert *x509.Certificate, interKey crypto.Signer, err error) { repl := ca.newReplacer() interCert, interKey, err = generateIntermediate(repl.ReplaceAll(ca.IntermediateCommonName, ""), rootCert, rootKey, time.Duration(ca.IntermediateLifetime)) if err != nil { return nil, nil, fmt.Errorf("generating CA intermediate: %v", err) } interCertPEM, err := pemEncodeCert(interCert.Raw) if err != nil { return nil, nil, fmt.Errorf("encoding intermediate certificate: %v", err) } err = ca.storage.Store(ca.ctx, ca.storageKeyIntermediateCert(), interCertPEM) if err != nil { return nil, nil, fmt.Errorf("saving intermediate certificate: %v", err) } interKeyPEM, err := certmagic.PEMEncodePrivateKey(interKey) if err != nil { return nil, nil, fmt.Errorf("encoding intermediate key: %v", err) } err = ca.storage.Store(ca.ctx, ca.storageKeyIntermediateKey(), interKeyPEM) if err != nil { return nil, nil, fmt.Errorf("saving intermediate key: %v", err) } return interCert, interKey, nil } func (ca CA) storageKeyCAPrefix() string { return path.Join("pki", "authorities", certmagic.StorageKeys.Safe(ca.ID)) } func (ca CA) storageKeyRootCert() string { return path.Join(ca.storageKeyCAPrefix(), "root.crt") } func (ca CA) storageKeyRootKey() string { return path.Join(ca.storageKeyCAPrefix(), "root.key") } func (ca CA) storageKeyIntermediateCert() string { return path.Join(ca.storageKeyCAPrefix(), "intermediate.crt") } func (ca CA) storageKeyIntermediateKey() string { return path.Join(ca.storageKeyCAPrefix(), "intermediate.key") } func (ca CA) newReplacer() *caddy.Replacer { repl := caddy.NewReplacer() repl.Set("pki.ca.name", ca.Name) return repl } // installRoot installs this CA's root certificate into the // local trust store(s) if it is not already trusted. The CA // must already be provisioned. func (ca CA) installRoot() error { // avoid password prompt if already trusted if trusted(ca.root) { ca.log.Info("root certificate is already trusted by system", zap.String("path", ca.rootCertPath)) return nil } ca.log.Warn("installing root certificate (you might be prompted for password)", zap.String("path", ca.rootCertPath)) return truststore.Install(ca.root, truststore.WithDebug(), truststore.WithFirefox(), truststore.WithJava(), ) } // AuthorityConfig is used to help a CA configure // the underlying signing authority. type AuthorityConfig struct { SignWithRoot bool // TODO: should we just embed the underlying authority.Config struct type? DB *db.AuthDB AuthConfig *authority.AuthConfig } const ( // DefaultCAID is the default CA ID. DefaultCAID = "local" defaultCAName = "Caddy Local Authority" defaultRootCommonName = "{pki.ca.name} - {time.now.year} ECC Root" defaultIntermediateCommonName = "{pki.ca.name} - ECC Intermediate" defaultRootLifetime = 24 * time.Hour * 30 * 12 * 10 defaultIntermediateLifetime = 24 * time.Hour * 7 defaultMaintenanceInterval = 10 * time.Minute defaultRenewalWindowRatio = 0.2 ) ================================================ FILE: modules/caddypki/certificates.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddypki import ( "crypto" "crypto/x509" "time" "go.step.sm/crypto/keyutil" "go.step.sm/crypto/x509util" ) func generateRoot(commonName string) (*x509.Certificate, crypto.Signer, error) { template, signer, err := newCert(commonName, x509util.DefaultRootTemplate, defaultRootLifetime) if err != nil { return nil, nil, err } root, err := x509util.CreateCertificate(template, template, signer.Public(), signer) if err != nil { return nil, nil, err } return root, signer, nil } func generateIntermediate(commonName string, rootCrt *x509.Certificate, rootKey crypto.Signer, lifetime time.Duration) (*x509.Certificate, crypto.Signer, error) { template, signer, err := newCert(commonName, x509util.DefaultIntermediateTemplate, lifetime) if err != nil { return nil, nil, err } intermediate, err := x509util.CreateCertificate(template, rootCrt, signer.Public(), rootKey) if err != nil { return nil, nil, err } return intermediate, signer, nil } func newCert(commonName, templateName string, lifetime time.Duration) (cert *x509.Certificate, signer crypto.Signer, err error) { signer, err = keyutil.GenerateDefaultSigner() if err != nil { return nil, nil, err } csr, err := x509util.CreateCertificateRequest(commonName, []string{}, signer) if err != nil { return nil, nil, err } template, err := x509util.NewCertificate(csr, x509util.WithTemplate(templateName, x509util.CreateTemplateData(commonName, []string{}))) if err != nil { return nil, nil, err } cert = template.GetCertificate() cert.NotBefore = time.Now().Truncate(time.Second) cert.NotAfter = cert.NotBefore.Add(lifetime) return cert, signer, nil } ================================================ FILE: modules/caddypki/command.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddypki import ( "crypto/x509" "encoding/json" "encoding/pem" "fmt" "net/http" "os" "path" "github.com/smallstep/truststore" "github.com/spf13/cobra" caddycmd "github.com/caddyserver/caddy/v2/cmd" "github.com/caddyserver/caddy/v2" ) func init() { caddycmd.RegisterCommand(caddycmd.Command{ Name: "trust", Usage: "[--ca ] [--address ] [--config [--adapter ]]", Short: "Installs a CA certificate into local trust stores", Long: ` Adds a root certificate into the local trust stores. Caddy will attempt to install its root certificates into the local trust stores automatically when they are first generated, but it might fail if Caddy doesn't have the appropriate permissions to write to the trust store. This command is necessary to pre-install the certificates before using them, if the server process runs as an unprivileged user (such as via systemd). By default, this command installs the root certificate for Caddy's default CA (i.e. 'local'). You may specify the ID of another CA with the --ca flag. This command will attempt to connect to Caddy's admin API running at '` + caddy.DefaultAdminListen + `' to fetch the root certificate. You may explicitly specify the --address, or use the --config flag to load the admin address from your config, if not using the default.`, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringP("ca", "", "", "The ID of the CA to trust (defaults to 'local')") cmd.Flags().StringP("address", "", "", "Address of the administration API listener (if --config is not used)") cmd.Flags().StringP("config", "c", "", "Configuration file (if --address is not used)") cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply (if --config is used)") cmd.RunE = caddycmd.WrapCommandFuncForCobra(cmdTrust) }, }) caddycmd.RegisterCommand(caddycmd.Command{ Name: "untrust", Usage: "[--cert ] | [[--ca ] [--address ] [--config [--adapter ]]]", Short: "Untrusts a locally-trusted CA certificate", Long: ` Untrusts a root certificate from the local trust store(s). This command uninstalls trust; it does not necessarily delete the root certificate from trust stores entirely. Thus, repeatedly trusting and untrusting new certificates can fill up trust databases. This command does not delete or modify certificate files from Caddy's configured storage. This command can be used in one of two ways. Either by specifying which certificate to untrust by a direct path to the certificate file with the --cert flag, or by fetching the root certificate for the CA from the admin API (default behaviour). If the admin API is used, then the CA defaults to 'local'. You may specify the ID of another CA with the --ca flag. By default, this will attempt to connect to the Caddy's admin API running at '` + caddy.DefaultAdminListen + `' to fetch the root certificate. You may explicitly specify the --address, or use the --config flag to load the admin address from your config, if not using the default.`, CobraFunc: func(cmd *cobra.Command) { cmd.Flags().StringP("cert", "p", "", "The path to the CA certificate to untrust") cmd.Flags().StringP("ca", "", "", "The ID of the CA to untrust (defaults to 'local')") cmd.Flags().StringP("address", "", "", "Address of the administration API listener (if --config is not used)") cmd.Flags().StringP("config", "c", "", "Configuration file (if --address is not used)") cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply (if --config is used)") cmd.RunE = caddycmd.WrapCommandFuncForCobra(cmdUntrust) }, }) } func cmdTrust(fl caddycmd.Flags) (int, error) { caID := fl.String("ca") addrFlag := fl.String("address") configFlag := fl.String("config") configAdapterFlag := fl.String("adapter") // Prepare the URI to the admin endpoint if caID == "" { caID = DefaultCAID } // Determine where we're sending the request to get the CA info adminAddr, err := caddycmd.DetermineAdminAPIAddress(addrFlag, nil, configFlag, configAdapterFlag) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err) } // Fetch the root cert from the admin API rootCert, err := rootCertFromAdmin(adminAddr, caID) if err != nil { return caddy.ExitCodeFailedStartup, err } // Set up the CA struct; we only need to fill in the root // because we're only using it to make use of the installRoot() // function. Also needs a logger for warnings, and a "cert path" // for the root cert; since we're loading from the API and we // don't know the actual storage path via this flow, we'll just // pass through the admin API address instead. ca := CA{ log: caddy.Log(), root: rootCert, rootCertPath: adminAddr + path.Join(adminPKIEndpointBase, "ca", caID), } // Install the cert! err = ca.installRoot() if err != nil { return caddy.ExitCodeFailedStartup, err } return caddy.ExitCodeSuccess, nil } func cmdUntrust(fl caddycmd.Flags) (int, error) { certFile := fl.String("cert") caID := fl.String("ca") addrFlag := fl.String("address") configFlag := fl.String("config") configAdapterFlag := fl.String("adapter") if certFile != "" && (caID != "" || addrFlag != "" || configFlag != "") { return caddy.ExitCodeFailedStartup, fmt.Errorf("conflicting command line arguments, cannot use --cert with other flags") } // If a file was specified, try to uninstall the cert matching that file if certFile != "" { // Sanity check, make sure cert file exists first _, err := os.Stat(certFile) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("accessing certificate file: %v", err) } // Uninstall the file! err = truststore.UninstallFile(certFile, truststore.WithDebug(), truststore.WithFirefox(), truststore.WithJava()) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("failed to uninstall certificate file: %v", err) } return caddy.ExitCodeSuccess, nil } // Prepare the URI to the admin endpoint if caID == "" { caID = DefaultCAID } // Determine where we're sending the request to get the CA info adminAddr, err := caddycmd.DetermineAdminAPIAddress(addrFlag, nil, configFlag, configAdapterFlag) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err) } // Fetch the root cert from the admin API rootCert, err := rootCertFromAdmin(adminAddr, caID) if err != nil { return caddy.ExitCodeFailedStartup, err } // Uninstall the cert! err = truststore.Uninstall(rootCert, truststore.WithDebug(), truststore.WithFirefox(), truststore.WithJava()) if err != nil { return caddy.ExitCodeFailedStartup, fmt.Errorf("failed to uninstall certificate file: %v", err) } return caddy.ExitCodeSuccess, nil } // rootCertFromAdmin makes the API request to fetch the root certificate for the named CA via admin API. func rootCertFromAdmin(adminAddr string, caID string) (*x509.Certificate, error) { uri := path.Join(adminPKIEndpointBase, "ca", caID) // Make the request to fetch the CA info resp, err := caddycmd.AdminAPIRequest(adminAddr, http.MethodGet, uri, make(http.Header), nil) if err != nil { return nil, fmt.Errorf("requesting CA info: %v", err) } defer resp.Body.Close() // Decode the response caInfo := new(caInfo) err = json.NewDecoder(resp.Body).Decode(caInfo) if err != nil { return nil, fmt.Errorf("failed to decode JSON response: %v", err) } // Decode the root cert rootBlock, _ := pem.Decode([]byte(caInfo.RootCert)) if rootBlock == nil { return nil, fmt.Errorf("failed to decode root certificate: %v", err) } rootCert, err := x509.ParseCertificate(rootBlock.Bytes) if err != nil { return nil, fmt.Errorf("failed to parse root certificate: %v", err) } return rootCert, nil } ================================================ FILE: modules/caddypki/crypto.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddypki import ( "bytes" "crypto" "crypto/ecdsa" "crypto/ed25519" "crypto/rsa" "crypto/x509" "encoding/pem" "errors" "fmt" "os" "github.com/caddyserver/certmagic" "go.step.sm/crypto/pemutil" ) func pemDecodeCertificate(pemDER []byte) (*x509.Certificate, error) { pemBlock, remaining := pem.Decode(pemDER) if pemBlock == nil { return nil, fmt.Errorf("no PEM block found") } if len(remaining) > 0 { return nil, fmt.Errorf("input contained more than a single PEM block") } if pemBlock.Type != "CERTIFICATE" { return nil, fmt.Errorf("expected PEM block type to be CERTIFICATE, but got '%s'", pemBlock.Type) } return x509.ParseCertificate(pemBlock.Bytes) } func pemDecodeCertificateChain(pemDER []byte) ([]*x509.Certificate, error) { chain, err := pemutil.ParseCertificateBundle(pemDER) if err != nil { return nil, fmt.Errorf("failed parsing certificate chain: %w", err) } return chain, nil } func pemEncodeCert(der []byte) ([]byte, error) { return pemEncode("CERTIFICATE", der) } func pemEncode(blockType string, b []byte) ([]byte, error) { var buf bytes.Buffer err := pem.Encode(&buf, &pem.Block{Type: blockType, Bytes: b}) return buf.Bytes(), err } func trusted(cert *x509.Certificate) bool { chains, err := cert.Verify(x509.VerifyOptions{}) return len(chains) > 0 && err == nil } // KeyPair represents a public-private key pair, where the // public key is also called a certificate. type KeyPair struct { // The certificate. By default, this should be the path to // a PEM file unless format is something else. Certificate string `json:"certificate,omitempty"` // The private key. By default, this should be the path to // a PEM file unless format is something else. PrivateKey string `json:"private_key,omitempty"` //nolint:gosec // false positive: yes it's exported, since it needs to encode/decode as JSON; and is often just a filepath // The format in which the certificate and private // key are provided. Default: pem_file Format string `json:"format,omitempty"` } // Load loads the certificate chain and (optional) private key from // the corresponding files, using the configured format. If a // private key is read, it will be verified to belong to the first // certificate in the chain. func (kp KeyPair) Load() ([]*x509.Certificate, crypto.Signer, error) { switch kp.Format { case "", "pem_file": certData, err := os.ReadFile(kp.Certificate) if err != nil { return nil, nil, err } chain, err := pemDecodeCertificateChain(certData) if err != nil { return nil, nil, err } var key crypto.Signer if kp.PrivateKey != "" { keyData, err := os.ReadFile(kp.PrivateKey) if err != nil { return nil, nil, err } key, err = certmagic.PEMDecodePrivateKey(keyData) if err != nil { return nil, nil, err } if err := verifyKeysMatch(chain[0], key); err != nil { return nil, nil, err } } return chain, key, nil default: return nil, nil, fmt.Errorf("unsupported format: %s", kp.Format) } } // verifyKeysMatch verifies that the public key in the [x509.Certificate] matches // the public key of the [crypto.Signer]. func verifyKeysMatch(crt *x509.Certificate, signer crypto.Signer) error { switch pub := crt.PublicKey.(type) { case *rsa.PublicKey: pk, ok := signer.Public().(*rsa.PublicKey) if !ok { return fmt.Errorf("private key type %T does not match issuer public key type %T", signer.Public(), pub) } if !pub.Equal(pk) { return errors.New("private key does not match issuer public key") } case *ecdsa.PublicKey: pk, ok := signer.Public().(*ecdsa.PublicKey) if !ok { return fmt.Errorf("private key type %T does not match issuer public key type %T", signer.Public(), pub) } if !pub.Equal(pk) { return errors.New("private key does not match issuer public key") } case ed25519.PublicKey: pk, ok := signer.Public().(ed25519.PublicKey) if !ok { return fmt.Errorf("private key type %T does not match issuer public key type %T", signer.Public(), pub) } if !pub.Equal(pk) { return errors.New("private key does not match issuer public key") } default: return fmt.Errorf("unsupported key type: %T", pub) } return nil } ================================================ FILE: modules/caddypki/crypto_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddypki import ( "crypto/rand" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "os" "path/filepath" "testing" "time" "go.step.sm/crypto/keyutil" "go.step.sm/crypto/pemutil" ) func TestKeyPair_Load(t *testing.T) { rootSigner, err := keyutil.GenerateDefaultSigner() if err != nil { t.Fatalf("Failed creating signer: %v", err) } tmpl := &x509.Certificate{ Subject: pkix.Name{CommonName: "test-root"}, IsCA: true, MaxPathLen: 3, } rootBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, rootSigner.Public(), rootSigner) if err != nil { t.Fatalf("Creating root certificate failed: %v", err) } root, err := x509.ParseCertificate(rootBytes) if err != nil { t.Fatalf("Parsing root certificate failed: %v", err) } intermediateSigner, err := keyutil.GenerateDefaultSigner() if err != nil { t.Fatalf("Creating intermedaite signer failed: %v", err) } intermediateBytes, err := x509.CreateCertificate(rand.Reader, &x509.Certificate{ Subject: pkix.Name{CommonName: "test-first-intermediate"}, IsCA: true, MaxPathLen: 2, NotAfter: time.Now().Add(time.Hour), }, root, intermediateSigner.Public(), rootSigner) if err != nil { t.Fatalf("Creating intermediate certificate failed: %v", err) } intermediate, err := x509.ParseCertificate(intermediateBytes) if err != nil { t.Fatalf("Parsing intermediate certificate failed: %v", err) } var chainContents []byte chain := []*x509.Certificate{intermediate, root} for _, cert := range chain { b, err := pemutil.Serialize(cert) if err != nil { t.Fatalf("Failed serializing intermediate certificate: %v", err) } chainContents = append(chainContents, pem.EncodeToMemory(b)...) } dir := t.TempDir() rootCertFile := filepath.Join(dir, "root.pem") if _, err = pemutil.Serialize(root, pemutil.WithFilename(rootCertFile)); err != nil { t.Fatalf("Failed serializing root certificate: %v", err) } rootKeyFile := filepath.Join(dir, "root.key") if _, err = pemutil.Serialize(rootSigner, pemutil.WithFilename(rootKeyFile)); err != nil { t.Fatalf("Failed serializing root key: %v", err) } intermediateCertFile := filepath.Join(dir, "intermediate.pem") if _, err = pemutil.Serialize(intermediate, pemutil.WithFilename(intermediateCertFile)); err != nil { t.Fatalf("Failed serializing intermediate certificate: %v", err) } intermediateKeyFile := filepath.Join(dir, "intermediate.key") if _, err = pemutil.Serialize(intermediateSigner, pemutil.WithFilename(intermediateKeyFile)); err != nil { t.Fatalf("Failed serializing intermediate key: %v", err) } chainFile := filepath.Join(dir, "chain.pem") if err := os.WriteFile(chainFile, chainContents, 0644); err != nil { t.Fatalf("Failed writing intermediate chain: %v", err) } t.Run("ok/single-certificate-without-signer", func(t *testing.T) { kp := KeyPair{ Certificate: rootCertFile, } chain, signer, err := kp.Load() if err != nil { t.Fatalf("Failed loading KeyPair: %v", err) } if len(chain) != 1 { t.Errorf("Expected 1 certificate in chain; got %d", len(chain)) } if signer != nil { t.Error("Expected no signer to be returned") } }) t.Run("ok/single-certificate-with-signer", func(t *testing.T) { kp := KeyPair{ Certificate: rootCertFile, PrivateKey: rootKeyFile, } chain, signer, err := kp.Load() if err != nil { t.Fatalf("Failed loading KeyPair: %v", err) } if len(chain) != 1 { t.Errorf("Expected 1 certificate in chain; got %d", len(chain)) } if signer == nil { t.Error("Expected signer to be returned") } }) t.Run("ok/multiple-certificates-with-signer", func(t *testing.T) { kp := KeyPair{ Certificate: chainFile, PrivateKey: intermediateKeyFile, } chain, signer, err := kp.Load() if err != nil { t.Fatalf("Failed loading KeyPair: %v", err) } if len(chain) != 2 { t.Errorf("Expected 2 certificates in chain; got %d", len(chain)) } if signer == nil { t.Error("Expected signer to be returned") } }) t.Run("fail/non-matching-public-key", func(t *testing.T) { kp := KeyPair{ Certificate: intermediateCertFile, PrivateKey: rootKeyFile, } chain, signer, err := kp.Load() if err == nil { t.Error("Expected loading KeyPair to return an error") } if chain != nil { t.Error("Expected no chain to be returned") } if signer != nil { t.Error("Expected no signer to be returned") } }) } func Test_pemDecodeCertificate(t *testing.T) { signer, err := keyutil.GenerateDefaultSigner() if err != nil { t.Fatalf("Failed creating signer: %v", err) } tmpl := &x509.Certificate{ Subject: pkix.Name{CommonName: "test-cert"}, IsCA: true, MaxPathLen: 3, } derBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, signer.Public(), signer) if err != nil { t.Fatalf("Creating root certificate failed: %v", err) } cert, err := x509.ParseCertificate(derBytes) if err != nil { t.Fatalf("Parsing root certificate failed: %v", err) } pemBlock, err := pemutil.Serialize(cert) if err != nil { t.Fatalf("Failed serializing certificate: %v", err) } pemData := pem.EncodeToMemory(pemBlock) t.Run("ok", func(t *testing.T) { cert, err := pemDecodeCertificate(pemData) if err != nil { t.Fatalf("Failed decoding PEM data: %v", err) } if cert == nil { t.Errorf("Expected a certificate in PEM data") } }) t.Run("fail/no-pem-data", func(t *testing.T) { cert, err := pemDecodeCertificate(nil) if err == nil { t.Fatalf("Expected pemDecodeCertificate to return an error") } if cert != nil { t.Errorf("Expected pemDecodeCertificate to return nil") } }) t.Run("fail/multiple", func(t *testing.T) { multiplePEMData := append(pemData, pemData...) cert, err := pemDecodeCertificate(multiplePEMData) if err == nil { t.Fatalf("Expected pemDecodeCertificate to return an error") } if cert != nil { t.Errorf("Expected pemDecodeCertificate to return nil") } }) t.Run("fail/no-pem-certificate", func(t *testing.T) { pkData := pem.EncodeToMemory(&pem.Block{ Type: "PRIVATE KEY", Bytes: []byte("some-bogus-private-key"), }) cert, err := pemDecodeCertificate(pkData) if err == nil { t.Fatalf("Expected pemDecodeCertificate to return an error") } if cert != nil { t.Errorf("Expected pemDecodeCertificate to return nil") } }) } func Test_pemDecodeCertificateChain(t *testing.T) { signer, err := keyutil.GenerateDefaultSigner() if err != nil { t.Fatalf("Failed creating signer: %v", err) } tmpl := &x509.Certificate{ Subject: pkix.Name{CommonName: "test-cert"}, IsCA: true, MaxPathLen: 3, } derBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, signer.Public(), signer) if err != nil { t.Fatalf("Creating root certificate failed: %v", err) } cert, err := x509.ParseCertificate(derBytes) if err != nil { t.Fatalf("Parsing root certificate failed: %v", err) } pemBlock, err := pemutil.Serialize(cert) if err != nil { t.Fatalf("Failed serializing certificate: %v", err) } pemData := pem.EncodeToMemory(pemBlock) t.Run("ok/single", func(t *testing.T) { certs, err := pemDecodeCertificateChain(pemData) if err != nil { t.Fatalf("Failed decoding PEM data: %v", err) } if len(certs) != 1 { t.Errorf("Expected 1 certificate in PEM data; got %d", len(certs)) } }) t.Run("ok/multiple", func(t *testing.T) { multiplePEMData := append(pemData, pemData...) certs, err := pemDecodeCertificateChain(multiplePEMData) if err != nil { t.Fatalf("Failed decoding PEM data: %v", err) } if len(certs) != 2 { t.Errorf("Expected 2 certificates in PEM data; got %d", len(certs)) } }) t.Run("fail/no-pem-certificate", func(t *testing.T) { pkData := pem.EncodeToMemory(&pem.Block{ Type: "PRIVATE KEY", Bytes: []byte("some-bogus-private-key"), }) certs, err := pemDecodeCertificateChain(pkData) if err == nil { t.Fatalf("Expected pemDecodeCertificateChain to return an error") } if len(certs) != 0 { t.Errorf("Expected 0 certificates in PEM data; got %d", len(certs)) } }) t.Run("fail/no-der-certificate", func(t *testing.T) { certs, err := pemDecodeCertificateChain([]byte("invalid-der-data")) if err == nil { t.Fatalf("Expected pemDecodeCertificateChain to return an error") } if len(certs) != 0 { t.Errorf("Expected 0 certificates in PEM data; got %d", len(certs)) } }) } ================================================ FILE: modules/caddypki/maintain.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddypki import ( "crypto/x509" "fmt" "log" "runtime/debug" "time" "go.uber.org/zap" ) func (p *PKI) maintenanceForCA(ca *CA) { defer func() { if err := recover(); err != nil { log.Printf("[PANIC] PKI maintenance for CA %s: %v\n%s", ca.ID, err, debug.Stack()) } }() interval := time.Duration(ca.MaintenanceInterval) if interval <= 0 { interval = defaultMaintenanceInterval } ticker := time.NewTicker(interval) defer ticker.Stop() for { select { case <-ticker.C: _ = p.renewCertsForCA(ca) case <-p.ctx.Done(): return } } } func (p *PKI) renewCerts() { for _, ca := range p.CAs { err := p.renewCertsForCA(ca) if err != nil { p.log.Error("renewing intermediate certificates", zap.Error(err), zap.String("ca", ca.ID)) } } } func (p *PKI) renewCertsForCA(ca *CA) error { ca.mu.Lock() defer ca.mu.Unlock() log := p.log.With(zap.String("ca", ca.ID)) // only maintain the root if it's not manually provided in the config if ca.Root == nil { if ca.needsRenewal(ca.root) { // TODO: implement root renewal (use same key) log.Warn("root certificate expiring soon (FIXME: ROOT RENEWAL NOT YET IMPLEMENTED)", zap.Duration("time_remaining", time.Until(ca.interChain[0].NotAfter)), ) } } // only maintain the intermediate if it's not manually provided in the config if ca.Intermediate == nil { if ca.needsRenewal(ca.interChain[0]) { log.Info("intermediate expires soon; renewing", zap.Duration("time_remaining", time.Until(ca.interChain[0].NotAfter)), ) rootCert, rootKey, err := ca.loadOrGenRoot() if err != nil { return fmt.Errorf("loading root key: %v", err) } interCert, interKey, err := ca.genIntermediate(rootCert, rootKey) if err != nil { return fmt.Errorf("generating new certificate: %v", err) } ca.interChain, ca.interKey = []*x509.Certificate{interCert}, interKey log.Info("renewed intermediate", zap.Time("new_expiration", ca.interChain[0].NotAfter), ) } } return nil } // needsRenewal reports whether the certificate is within its renewal window // (i.e. the fraction of lifetime remaining is less than or equal to RenewalWindowRatio). func (ca *CA) needsRenewal(cert *x509.Certificate) bool { ratio := ca.RenewalWindowRatio if ratio <= 0 { ratio = defaultRenewalWindowRatio } lifetime := cert.NotAfter.Sub(cert.NotBefore) renewalWindow := time.Duration(float64(lifetime) * ratio) renewalWindowStart := cert.NotAfter.Add(-renewalWindow) return time.Now().After(renewalWindowStart) } ================================================ FILE: modules/caddypki/maintain_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddypki import ( "crypto/x509" "testing" "time" ) func TestCA_needsRenewal(t *testing.T) { now := time.Now() // cert with 100 days lifetime; last 20% = 20 days before expiry // So renewal window starts at (NotAfter - 20 days) makeCert := func(daysUntilExpiry int, lifetimeDays int) *x509.Certificate { notAfter := now.AddDate(0, 0, daysUntilExpiry) notBefore := notAfter.AddDate(0, 0, -lifetimeDays) return &x509.Certificate{NotBefore: notBefore, NotAfter: notAfter} } tests := []struct { name string ca *CA cert *x509.Certificate expect bool }{ { name: "inside renewal window with ratio 0.2", ca: &CA{RenewalWindowRatio: 0.2}, cert: makeCert(10, 100), expect: true, }, { name: "outside renewal window with ratio 0.2", ca: &CA{RenewalWindowRatio: 0.2}, cert: makeCert(50, 100), expect: false, }, { name: "outside renewal window with 21 days left", ca: &CA{RenewalWindowRatio: 0.2}, cert: makeCert(21, 100), expect: false, }, { name: "just inside renewal window with ratio 0.5", ca: &CA{RenewalWindowRatio: 0.5}, cert: makeCert(30, 100), expect: true, }, { name: "zero ratio uses default", ca: &CA{RenewalWindowRatio: 0}, cert: makeCert(10, 100), expect: true, }, { name: "invalid ratio uses default", ca: &CA{RenewalWindowRatio: 1.5}, cert: makeCert(10, 100), expect: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.ca.needsRenewal(tt.cert) if got != tt.expect { t.Errorf("needsRenewal() = %v, want %v", got, tt.expect) } }) } } ================================================ FILE: modules/caddypki/pki.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddypki import ( "fmt" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(PKI{}) } // PKI provides Public Key Infrastructure facilities for Caddy. // // This app can define certificate authorities (CAs) which are capable // of signing certificates. Other modules can be configured to use // the CAs defined by this app for issuing certificates or getting // key information needed for establishing trust. type PKI struct { // The certificate authorities to manage. Each CA is keyed by an // ID that is used to uniquely identify it from other CAs. // At runtime, the GetCA() method should be used instead to ensure // the default CA is provisioned if it hadn't already been. // The default CA ID is "local". CAs map[string]*CA `json:"certificate_authorities,omitempty"` ctx caddy.Context log *zap.Logger } // CaddyModule returns the Caddy module information. func (PKI) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "pki", New: func() caddy.Module { return new(PKI) }, } } // Provision sets up the configuration for the PKI app. func (p *PKI) Provision(ctx caddy.Context) error { p.ctx = ctx p.log = ctx.Logger() for caID, ca := range p.CAs { err := ca.Provision(ctx, caID, p.log) if err != nil { return fmt.Errorf("provisioning CA '%s': %v", caID, err) } } // if this app is initialized at all, ensure there's at // least a default CA that can be used: the standard CA // which is used implicitly for signing local-use certs if len(p.CAs) == 0 { err := p.ProvisionDefaultCA(ctx) if err != nil { return fmt.Errorf("provisioning CA '%s': %v", DefaultCAID, err) } } return nil } // ProvisionDefaultCA sets up the default CA. func (p *PKI) ProvisionDefaultCA(ctx caddy.Context) error { if p.CAs == nil { p.CAs = make(map[string]*CA) } p.CAs[DefaultCAID] = new(CA) return p.CAs[DefaultCAID].Provision(ctx, DefaultCAID, p.log) } // Start starts the PKI app. func (p *PKI) Start() error { // install roots to trust store, if not disabled for _, ca := range p.CAs { if ca.InstallTrust != nil && !*ca.InstallTrust { ca.log.Info("root certificate trust store installation disabled; unconfigured clients may show warnings", zap.String("path", ca.rootCertPath)) continue } if err := ca.installRoot(); err != nil { // could be some system dependencies that are missing; // shouldn't totally prevent startup, but we should log it ca.log.Error("failed to install root certificate", zap.Error(err), zap.String("certificate_file", ca.rootCertPath)) } } // see if root/intermediates need renewal... p.renewCerts() // ...and keep them renewed (one goroutine per CA with its own interval) for _, ca := range p.CAs { go p.maintenanceForCA(ca) } return nil } // Stop stops the PKI app. func (p *PKI) Stop() error { return nil } // GetCA retrieves a CA by ID. If the ID is the default // CA ID, and it hasn't been provisioned yet, it will // be provisioned. func (p *PKI) GetCA(ctx caddy.Context, id string) (*CA, error) { ca, ok := p.CAs[id] if !ok { // for anything other than the default CA ID, error out if it wasn't configured if id != DefaultCAID { return nil, fmt.Errorf("no certificate authority configured with id: %s", id) } // for the default CA ID, provision it, because we want it to "just work" err := p.ProvisionDefaultCA(ctx) if err != nil { return nil, fmt.Errorf("failed to provision default CA: %s", err) } ca = p.CAs[id] } return ca, nil } // Interface guards var ( _ caddy.Provisioner = (*PKI)(nil) _ caddy.App = (*PKI)(nil) ) ================================================ FILE: modules/caddytls/acmeissuer.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "context" "crypto/x509" "encoding/json" "fmt" "net/http" "net/url" "os" "strconv" "strings" "time" "github.com/caddyserver/certmagic" "github.com/caddyserver/zerossl" "github.com/mholt/acmez/v3/acme" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(ACMEIssuer{}) } // ACMEIssuer manages certificates using the ACME protocol (RFC 8555). type ACMEIssuer struct { // The URL to the CA's ACME directory endpoint. Default: // https://acme-v02.api.letsencrypt.org/directory CA string `json:"ca,omitempty"` // The URL to the test CA's ACME directory endpoint. // This endpoint is only used during retries if there // is a failure using the primary CA. Default: // https://acme-staging-v02.api.letsencrypt.org/directory TestCA string `json:"test_ca,omitempty"` // Your email address, so the CA can contact you if necessary. // Not required, but strongly recommended to provide one so // you can be reached if there is a problem. Your email is // not sent to any Caddy mothership or used for any purpose // other than ACME transactions. Email string `json:"email,omitempty"` // Optionally select an ACME profile to use for certificate // orders. Must be a profile name offered by the ACME server, // which are listed at its directory endpoint. // // EXPERIMENTAL: Subject to change. // See https://datatracker.ietf.org/doc/draft-aaron-acme-profiles/ Profile string `json:"profile,omitempty"` // If you have an existing account with the ACME server, put // the private key here in PEM format. The ACME client will // look up your account information with this key first before // trying to create a new one. You can use placeholders here, // for example if you have it in an environment variable. AccountKey string `json:"account_key,omitempty"` // If using an ACME CA that requires an external account // binding, specify the CA-provided credentials here. ExternalAccount *acme.EAB `json:"external_account,omitempty"` // Time to wait before timing out an ACME operation. // Default: 0 (no timeout) ACMETimeout caddy.Duration `json:"acme_timeout,omitempty"` // Configures the various ACME challenge types. Challenges *ChallengesConfig `json:"challenges,omitempty"` // An array of files of CA certificates to accept when connecting to the // ACME CA. Generally, you should only use this if the ACME CA endpoint // is internal or for development/testing purposes. TrustedRootsPEMFiles []string `json:"trusted_roots_pem_files,omitempty"` // Preferences for selecting alternate certificate chains, if offered // by the CA. By default, the first offered chain will be selected. // If configured, the chains may be sorted and the first matching chain // will be selected. PreferredChains *ChainPreference `json:"preferred_chains,omitempty"` // The validity period to ask the CA to issue a certificate for. // Default: 0 (CA chooses lifetime). // This value is used to compute the "notAfter" field of the ACME order; // therefore the system must have a reasonably synchronized clock. // NOTE: Not all CAs support this. Check with your CA's ACME // documentation to see if this is allowed and what values may // be used. EXPERIMENTAL: Subject to change. CertificateLifetime caddy.Duration `json:"certificate_lifetime,omitempty"` // Forward proxy module NetworkProxyRaw json.RawMessage `json:"network_proxy,omitempty" caddy:"namespace=caddy.network_proxy inline_key=from"` rootPool *x509.CertPool logger *zap.Logger template certmagic.ACMEIssuer // set at Provision magic *certmagic.Config // set at PreCheck issuer *certmagic.ACMEIssuer // set at PreCheck; result of template + magic } // CaddyModule returns the Caddy module information. func (ACMEIssuer) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.issuance.acme", New: func() caddy.Module { return new(ACMEIssuer) }, } } // Provision sets up iss. func (iss *ACMEIssuer) Provision(ctx caddy.Context) error { iss.logger = ctx.Logger() repl := caddy.NewReplacer() // expand email address, if non-empty if iss.Email != "" { email, err := repl.ReplaceOrErr(iss.Email, true, true) if err != nil { return fmt.Errorf("expanding email address '%s': %v", iss.Email, err) } iss.Email = email } // expand account key, if non-empty if iss.AccountKey != "" { accountKey, err := repl.ReplaceOrErr(iss.AccountKey, true, true) if err != nil { return fmt.Errorf("expanding account key PEM '%s': %v", iss.AccountKey, err) } iss.AccountKey = accountKey } // DNS challenge provider, if not already established if iss.Challenges != nil && iss.Challenges.DNS != nil && iss.Challenges.DNS.solver == nil { var prov certmagic.DNSProvider if iss.Challenges.DNS.ProviderRaw != nil { // a challenge provider has been locally configured - use it val, err := ctx.LoadModule(iss.Challenges.DNS, "ProviderRaw") if err != nil { return fmt.Errorf("loading DNS provider module: %v", err) } prov = val.(certmagic.DNSProvider) } else if tlsAppIface, err := ctx.AppIfConfigured("tls"); err == nil { // no locally configured DNS challenge provider, but if there is // a global DNS module configured with the TLS app, use that tlsApp := tlsAppIface.(*TLS) if tlsApp.dns != nil { prov = tlsApp.dns.(certmagic.DNSProvider) } } if prov == nil { return fmt.Errorf("DNS challenge enabled, but no DNS provider configured") } iss.Challenges.DNS.solver = &certmagic.DNS01Solver{ DNSManager: certmagic.DNSManager{ DNSProvider: prov, TTL: time.Duration(iss.Challenges.DNS.TTL), PropagationDelay: time.Duration(iss.Challenges.DNS.PropagationDelay), PropagationTimeout: time.Duration(iss.Challenges.DNS.PropagationTimeout), Resolvers: iss.Challenges.DNS.Resolvers, OverrideDomain: iss.Challenges.DNS.OverrideDomain, Logger: iss.logger.Named("dns_manager"), }, } } // add any custom CAs to trust store if len(iss.TrustedRootsPEMFiles) > 0 { iss.rootPool = x509.NewCertPool() for _, pemFile := range iss.TrustedRootsPEMFiles { pemData, err := os.ReadFile(pemFile) if err != nil { return fmt.Errorf("loading trusted root CA's PEM file: %s: %v", pemFile, err) } if !iss.rootPool.AppendCertsFromPEM(pemData) { return fmt.Errorf("unable to add %s to trust pool: %v", pemFile, err) } } } var err error iss.template, err = iss.makeIssuerTemplate(ctx) if err != nil { return err } return nil } func (iss *ACMEIssuer) makeIssuerTemplate(ctx caddy.Context) (certmagic.ACMEIssuer, error) { template := certmagic.ACMEIssuer{ CA: iss.CA, TestCA: iss.TestCA, Email: iss.Email, Profile: iss.Profile, AccountKeyPEM: iss.AccountKey, CertObtainTimeout: time.Duration(iss.ACMETimeout), TrustedRoots: iss.rootPool, ExternalAccount: iss.ExternalAccount, NotAfter: time.Duration(iss.CertificateLifetime), Logger: iss.logger, } if len(iss.NetworkProxyRaw) != 0 { proxyMod, err := ctx.LoadModule(iss, "NetworkProxyRaw") if err != nil { return template, fmt.Errorf("failed to load network_proxy module: %v", err) } if m, ok := proxyMod.(caddy.ProxyFuncProducer); ok { template.HTTPProxy = m.ProxyFunc() } else { return template, fmt.Errorf("network_proxy module is not `(func(*http.Request) (*url.URL, error))``") } } if iss.Challenges != nil { if iss.Challenges.HTTP != nil { template.DisableHTTPChallenge = iss.Challenges.HTTP.Disabled template.AltHTTPPort = iss.Challenges.HTTP.AlternatePort } if iss.Challenges.TLSALPN != nil { template.DisableTLSALPNChallenge = iss.Challenges.TLSALPN.Disabled template.AltTLSALPNPort = iss.Challenges.TLSALPN.AlternatePort } if iss.Challenges.DNS != nil { template.DNS01Solver = iss.Challenges.DNS.solver } template.ListenHost = iss.Challenges.BindHost if iss.Challenges.Distributed != nil { template.DisableDistributedSolvers = !*iss.Challenges.Distributed } } if iss.PreferredChains != nil { template.PreferredChains = certmagic.ChainPreference{ Smallest: iss.PreferredChains.Smallest, AnyCommonName: iss.PreferredChains.AnyCommonName, RootCommonName: iss.PreferredChains.RootCommonName, } } // ZeroSSL requires EAB, but we can generate that automatically (requires an email address be configured) if strings.HasPrefix(iss.CA, "https://acme.zerossl.com/") { template.NewAccountFunc = func(ctx context.Context, acmeIss *certmagic.ACMEIssuer, acct acme.Account) (acme.Account, error) { if acmeIss.ExternalAccount != nil { return acct, nil } var err error acmeIss.ExternalAccount, acct, err = iss.generateZeroSSLEABCredentials(ctx, acct) return acct, err } } return template, nil } // SetConfig sets the associated certmagic config for this issuer. // This is required because ACME needs values from the config in // order to solve the challenges during issuance. This implements // the ConfigSetter interface. func (iss *ACMEIssuer) SetConfig(cfg *certmagic.Config) { iss.magic = cfg iss.issuer = certmagic.NewACMEIssuer(cfg, iss.template) } // PreCheck implements the certmagic.PreChecker interface. func (iss *ACMEIssuer) PreCheck(ctx context.Context, names []string, interactive bool) error { return iss.issuer.PreCheck(ctx, names, interactive) } // Issue obtains a certificate for the given csr. func (iss *ACMEIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) { return iss.issuer.Issue(ctx, csr) } // IssuerKey returns the unique issuer key for the configured CA endpoint. func (iss *ACMEIssuer) IssuerKey() string { return iss.issuer.IssuerKey() } // Revoke revokes the given certificate. func (iss *ACMEIssuer) Revoke(ctx context.Context, cert certmagic.CertificateResource, reason int) error { return iss.issuer.Revoke(ctx, cert, reason) } // GetACMEIssuer returns iss. This is useful when other types embed ACMEIssuer, because // type-asserting them to *ACMEIssuer will fail, but type-asserting them to an interface // with only this method will succeed, and will still allow the embedded ACMEIssuer // to be accessed and manipulated. func (iss *ACMEIssuer) GetACMEIssuer() *ACMEIssuer { return iss } // GetRenewalInfo wraps the underlying GetRenewalInfo method and satisfies // the CertMagic interface for ARI support. func (iss *ACMEIssuer) GetRenewalInfo(ctx context.Context, cert certmagic.Certificate) (acme.RenewalInfo, error) { return iss.issuer.GetRenewalInfo(ctx, cert) } // generateZeroSSLEABCredentials generates ZeroSSL EAB credentials for the primary contact email // on the issuer. It should only be usedif the CA endpoint is ZeroSSL. An email address is required. func (iss *ACMEIssuer) generateZeroSSLEABCredentials(ctx context.Context, acct acme.Account) (*acme.EAB, acme.Account, error) { if strings.TrimSpace(iss.Email) == "" { return nil, acme.Account{}, fmt.Errorf("your email address is required to use ZeroSSL's ACME endpoint") } if len(acct.Contact) == 0 { // we borrow the email from config or the default email, so ensure it's saved with the account acct.Contact = []string{"mailto:" + iss.Email} } endpoint := zerossl.BaseURL + "/acme/eab-credentials-email" form := url.Values{"email": []string{iss.Email}} body := strings.NewReader(form.Encode()) req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, body) if err != nil { return nil, acct, fmt.Errorf("forming request: %v", err) } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") req.Header.Set("User-Agent", certmagic.UserAgent) resp, err := http.DefaultClient.Do(req) //nolint:gosec // no SSRF since URL is from trusted config if err != nil { return nil, acct, fmt.Errorf("performing EAB credentials request: %v", err) } defer resp.Body.Close() var result struct { Success bool `json:"success"` Error struct { Code int `json:"code"` Type string `json:"type"` } `json:"error"` EABKID string `json:"eab_kid"` EABHMACKey string `json:"eab_hmac_key"` } err = json.NewDecoder(resp.Body).Decode(&result) if err != nil { return nil, acct, fmt.Errorf("decoding API response: %v", err) } if result.Error.Code != 0 { // do this check first because ZeroSSL's API returns 200 on errors return nil, acct, fmt.Errorf("failed getting EAB credentials: HTTP %d: %s (code %d)", resp.StatusCode, result.Error.Type, result.Error.Code) } if resp.StatusCode != http.StatusOK { return nil, acct, fmt.Errorf("failed getting EAB credentials: HTTP %d", resp.StatusCode) } if c := iss.logger.Check(zapcore.InfoLevel, "generated EAB credentials"); c != nil { c.Write(zap.String("key_id", result.EABKID)) } return &acme.EAB{ KeyID: result.EABKID, MACKey: result.EABHMACKey, }, acct, nil } // UnmarshalCaddyfile deserializes Caddyfile tokens into iss. // // ... acme [] { // dir // test_dir // email // profile // timeout // disable_http_challenge // disable_tlsalpn_challenge // alt_http_port // alt_tlsalpn_port // eab // trusted_roots // dns [] // propagation_delay // propagation_timeout // resolvers // dns_ttl // dns_challenge_override_domain // preferred_chains [smallest] { // root_common_name // any_common_name // } // } func (iss *ACMEIssuer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume issuer name if d.NextArg() { iss.CA = d.Val() if d.NextArg() { return d.ArgErr() } } for d.NextBlock(0) { switch d.Val() { case "lifetime": var lifetimeStr string if !d.AllArgs(&lifetimeStr) { return d.ArgErr() } lifetime, err := caddy.ParseDuration(lifetimeStr) if err != nil { return d.Errf("invalid lifetime %s: %v", lifetimeStr, err) } if lifetime < 0 { return d.Errf("lifetime must be >= 0: %s", lifetime) } iss.CertificateLifetime = caddy.Duration(lifetime) case "dir": if iss.CA != "" { return d.Errf("directory is already specified: %s", iss.CA) } if !d.AllArgs(&iss.CA) { return d.ArgErr() } case "test_dir": if !d.AllArgs(&iss.TestCA) { return d.ArgErr() } case "email": if !d.AllArgs(&iss.Email) { return d.ArgErr() } case "profile": if !d.AllArgs(&iss.Profile) { return d.ArgErr() } case "timeout": var timeoutStr string if !d.AllArgs(&timeoutStr) { return d.ArgErr() } timeout, err := caddy.ParseDuration(timeoutStr) if err != nil { return d.Errf("invalid timeout duration %s: %v", timeoutStr, err) } iss.ACMETimeout = caddy.Duration(timeout) case "disable_http_challenge": if d.NextArg() { return d.ArgErr() } if iss.Challenges == nil { iss.Challenges = new(ChallengesConfig) } if iss.Challenges.HTTP == nil { iss.Challenges.HTTP = new(HTTPChallengeConfig) } iss.Challenges.HTTP.Disabled = true case "disable_tlsalpn_challenge": if d.NextArg() { return d.ArgErr() } if iss.Challenges == nil { iss.Challenges = new(ChallengesConfig) } if iss.Challenges.TLSALPN == nil { iss.Challenges.TLSALPN = new(TLSALPNChallengeConfig) } iss.Challenges.TLSALPN.Disabled = true case "distributed": if !d.NextArg() { return d.ArgErr() } if d.Val() != "false" { return d.Errf("only accepted value is 'false'") } if iss.Challenges == nil { iss.Challenges = new(ChallengesConfig) } if iss.Challenges.Distributed == nil { iss.Challenges.Distributed = new(bool) } case "alt_http_port": if !d.NextArg() { return d.ArgErr() } port, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("invalid port %s: %v", d.Val(), err) } if iss.Challenges == nil { iss.Challenges = new(ChallengesConfig) } if iss.Challenges.HTTP == nil { iss.Challenges.HTTP = new(HTTPChallengeConfig) } iss.Challenges.HTTP.AlternatePort = port case "alt_tlsalpn_port": if !d.NextArg() { return d.ArgErr() } port, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("invalid port %s: %v", d.Val(), err) } if iss.Challenges == nil { iss.Challenges = new(ChallengesConfig) } if iss.Challenges.TLSALPN == nil { iss.Challenges.TLSALPN = new(TLSALPNChallengeConfig) } iss.Challenges.TLSALPN.AlternatePort = port case "eab": iss.ExternalAccount = new(acme.EAB) if !d.AllArgs(&iss.ExternalAccount.KeyID, &iss.ExternalAccount.MACKey) { return d.ArgErr() } case "trusted_roots": iss.TrustedRootsPEMFiles = d.RemainingArgs() case "dns": if iss.Challenges == nil { iss.Challenges = new(ChallengesConfig) } if iss.Challenges.DNS == nil { iss.Challenges.DNS = new(DNSChallengeConfig) } if d.NextArg() { provName := d.Val() unm, err := caddyfile.UnmarshalModule(d, "dns.providers."+provName) if err != nil { return err } iss.Challenges.DNS.ProviderRaw = caddyconfig.JSONModuleObject(unm, "name", provName, nil) } case "propagation_delay": if !d.NextArg() { return d.ArgErr() } delayStr := d.Val() delay, err := caddy.ParseDuration(delayStr) if err != nil { return d.Errf("invalid propagation_delay duration %s: %v", delayStr, err) } if iss.Challenges == nil { iss.Challenges = new(ChallengesConfig) } if iss.Challenges.DNS == nil { iss.Challenges.DNS = new(DNSChallengeConfig) } iss.Challenges.DNS.PropagationDelay = caddy.Duration(delay) case "propagation_timeout": if !d.NextArg() { return d.ArgErr() } timeoutStr := d.Val() var timeout time.Duration if timeoutStr == "-1" { timeout = time.Duration(-1) } else { var err error timeout, err = caddy.ParseDuration(timeoutStr) if err != nil { return d.Errf("invalid propagation_timeout duration %s: %v", timeoutStr, err) } } if iss.Challenges == nil { iss.Challenges = new(ChallengesConfig) } if iss.Challenges.DNS == nil { iss.Challenges.DNS = new(DNSChallengeConfig) } iss.Challenges.DNS.PropagationTimeout = caddy.Duration(timeout) case "resolvers": if iss.Challenges == nil { iss.Challenges = new(ChallengesConfig) } if iss.Challenges.DNS == nil { iss.Challenges.DNS = new(DNSChallengeConfig) } iss.Challenges.DNS.Resolvers = d.RemainingArgs() if len(iss.Challenges.DNS.Resolvers) == 0 { return d.ArgErr() } case "dns_ttl": if !d.NextArg() { return d.ArgErr() } ttlStr := d.Val() ttl, err := caddy.ParseDuration(ttlStr) if err != nil { return d.Errf("invalid dns_ttl duration %s: %v", ttlStr, err) } if iss.Challenges == nil { iss.Challenges = new(ChallengesConfig) } if iss.Challenges.DNS == nil { iss.Challenges.DNS = new(DNSChallengeConfig) } iss.Challenges.DNS.TTL = caddy.Duration(ttl) case "dns_challenge_override_domain": arg := d.RemainingArgs() if len(arg) != 1 { return d.ArgErr() } if iss.Challenges == nil { iss.Challenges = new(ChallengesConfig) } if iss.Challenges.DNS == nil { iss.Challenges.DNS = new(DNSChallengeConfig) } iss.Challenges.DNS.OverrideDomain = arg[0] case "preferred_chains": chainPref, err := ParseCaddyfilePreferredChainsOptions(d) if err != nil { return err } iss.PreferredChains = chainPref default: return d.Errf("unrecognized ACME issuer property: %s", d.Val()) } } return nil } func ParseCaddyfilePreferredChainsOptions(d *caddyfile.Dispenser) (*ChainPreference, error) { chainPref := new(ChainPreference) if d.NextArg() { smallestOpt := d.Val() if smallestOpt == "smallest" { trueBool := true chainPref.Smallest = &trueBool if d.NextArg() { // Only one argument allowed return nil, d.ArgErr() } if d.NextBlock(d.Nesting()) { // Don't allow other options when smallest == true return nil, d.Err("No more options are accepted when using the 'smallest' option") } } else { // Smallest option should always be 'smallest' or unset return nil, d.Errf("Invalid argument '%s'", smallestOpt) } } for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "root_common_name": rootCommonNameOpt := d.RemainingArgs() chainPref.RootCommonName = append(chainPref.RootCommonName, rootCommonNameOpt...) if rootCommonNameOpt == nil { return nil, d.ArgErr() } if chainPref.AnyCommonName != nil { return nil, d.Err("Can't set root_common_name when any_common_name is already set") } case "any_common_name": anyCommonNameOpt := d.RemainingArgs() chainPref.AnyCommonName = append(chainPref.AnyCommonName, anyCommonNameOpt...) if anyCommonNameOpt == nil { return nil, d.ArgErr() } if chainPref.RootCommonName != nil { return nil, d.Err("Can't set any_common_name when root_common_name is already set") } default: return nil, d.Errf("Received unrecognized parameter '%s'", d.Val()) } } if chainPref.Smallest == nil && chainPref.RootCommonName == nil && chainPref.AnyCommonName == nil { return nil, d.Err("No options for preferred_chains received") } return chainPref, nil } // ChainPreference describes the client's preferred certificate chain, // useful if the CA offers alternate chains. The first matching chain // will be selected. type ChainPreference struct { // Prefer chains with the fewest number of bytes. Smallest *bool `json:"smallest,omitempty"` // Select first chain having a root with one of // these common names. RootCommonName []string `json:"root_common_name,omitempty"` // Select first chain that has any issuer with one // of these common names. AnyCommonName []string `json:"any_common_name,omitempty"` } // Interface guards var ( _ certmagic.PreChecker = (*ACMEIssuer)(nil) _ certmagic.Issuer = (*ACMEIssuer)(nil) _ certmagic.Revoker = (*ACMEIssuer)(nil) _ certmagic.RenewalInfoGetter = (*ACMEIssuer)(nil) _ caddy.Provisioner = (*ACMEIssuer)(nil) _ ConfigSetter = (*ACMEIssuer)(nil) _ caddyfile.Unmarshaler = (*ACMEIssuer)(nil) ) ================================================ FILE: modules/caddytls/automation.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "context" "crypto/tls" "encoding/json" "errors" "fmt" "net" "slices" "strings" "github.com/caddyserver/certmagic" "github.com/mholt/acmez/v3" "go.uber.org/zap" "go.uber.org/zap/zapcore" "golang.org/x/net/idna" "github.com/caddyserver/caddy/v2" ) // AutomationConfig governs the automated management of TLS certificates. type AutomationConfig struct { // The list of automation policies. The first policy matching // a certificate or subject name will be applied. Policies []*AutomationPolicy `json:"policies,omitempty"` // On-Demand TLS defers certificate operations to the // moment they are needed, e.g. during a TLS handshake. // Useful when you don't know all the hostnames at // config-time, or when you are not in control of the // domain names you are managing certificates for. // In 2015, Caddy became the first web server to // implement this experimental technology. // // Note that this field does not enable on-demand TLS; // it only configures it for when it is used. To enable // it, create an automation policy with `on_demand`. OnDemand *OnDemandConfig `json:"on_demand,omitempty"` // Caddy staples OCSP (and caches the response) for all // qualifying certificates by default. This setting // changes how often it scans responses for freshness, // and updates them if they are getting stale. Default: 1h OCSPCheckInterval caddy.Duration `json:"ocsp_interval,omitempty"` // Every so often, Caddy will scan all loaded, managed // certificates for expiration. This setting changes how // frequently the scan for expiring certificates is // performed. Default: 10m RenewCheckInterval caddy.Duration `json:"renew_interval,omitempty"` // How often to scan storage units for old or expired // assets and remove them. These scans exert lots of // reads (and list operations) on the storage module, so // choose a longer interval for large deployments. // Default: 24h // // Storage will always be cleaned when the process first // starts. Then, a new cleaning will be started this // duration after the previous cleaning started if the // previous cleaning finished in less than half the time // of this interval (otherwise next start will be skipped). StorageCleanInterval caddy.Duration `json:"storage_clean_interval,omitempty"` defaultPublicAutomationPolicy *AutomationPolicy defaultInternalAutomationPolicy *AutomationPolicy // only initialized if necessary } // AutomationPolicy designates the policy for automating the // management (obtaining, renewal, and revocation) of managed // TLS certificates. // // An AutomationPolicy value is not valid until it has been // provisioned; use the `AddAutomationPolicy()` method on the // TLS app to properly provision a new policy. type AutomationPolicy struct { // Which subjects (hostnames or IP addresses) this policy applies to. // // This list is a filter, not a command. In other words, it is used // only to filter whether this policy should apply to a subject that // needs a certificate; it does NOT command the TLS app to manage a // certificate for that subject. To have Caddy automate a certificate // or specific subjects, use the "automate" certificate loader module // of the TLS app. SubjectsRaw []string `json:"subjects,omitempty"` // The modules that may issue certificates. Default: internal if all // subjects do not qualify for public certificates; otherwise acme and // zerossl. IssuersRaw []json.RawMessage `json:"issuers,omitempty" caddy:"namespace=tls.issuance inline_key=module"` // Modules that can get a custom certificate to use for any // given TLS handshake at handshake-time. Custom certificates // can be useful if another entity is managing certificates // and Caddy need only get it and serve it. Specifying a Manager // enables on-demand TLS, i.e. it has the side-effect of setting // the on_demand parameter to `true`. // // TODO: This is an EXPERIMENTAL feature. Subject to change or removal. ManagersRaw []json.RawMessage `json:"get_certificate,omitempty" caddy:"namespace=tls.get_certificate inline_key=via"` // If true, certificates will be requested with MustStaple. Not all // CAs support this, and there are potentially serious consequences // of enabling this feature without proper threat modeling. MustStaple bool `json:"must_staple,omitempty"` // How long before a certificate's expiration to try renewing it, // as a function of its total lifetime. As a general and conservative // rule, it is a good idea to renew a certificate when it has about // 1/3 of its total lifetime remaining. This utilizes the majority // of the certificate's lifetime while still saving time to // troubleshoot problems. However, for extremely short-lived certs, // you may wish to increase the ratio to ~1/2. RenewalWindowRatio float64 `json:"renewal_window_ratio,omitempty"` // The type of key to generate for certificates. // Supported values: `ed25519`, `p256`, `p384`, `rsa2048`, `rsa4096`. KeyType string `json:"key_type,omitempty"` // Optionally configure a separate storage module associated with this // manager, instead of using Caddy's global/default-configured storage. StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"` // If true, certificates will be managed "on demand"; that is, during // TLS handshakes or when needed, as opposed to at startup or config // load. This enables On-Demand TLS for this policy. OnDemand bool `json:"on_demand,omitempty"` // If true, private keys already existing in storage // will be reused. Otherwise, a new key will be // created for every new certificate to mitigate // pinning and reduce the scope of key compromise. // TEMPORARY: Key pinning is against industry best practices. // This property will likely be removed in the future. // Do not rely on it forever; watch the release notes. ReusePrivateKeys bool `json:"reuse_private_keys,omitempty"` // Disables OCSP stapling. Disabling OCSP stapling puts clients at // greater risk, reduces their privacy, and usually lowers client // performance. It is NOT recommended to disable this unless you // are able to justify the costs. // EXPERIMENTAL. Subject to change. DisableOCSPStapling bool `json:"disable_ocsp_stapling,omitempty"` // Overrides the URLs of OCSP responders embedded in certificates. // Each key is a OCSP server URL to override, and its value is the // replacement. An empty value will disable querying of that server. // EXPERIMENTAL. Subject to change. OCSPOverrides map[string]string `json:"ocsp_overrides,omitempty"` // Issuers and Managers store the decoded issuer and manager modules; // they are only used to populate an underlying certmagic.Config's // fields during provisioning so that the modules can survive a // re-provisioning. Issuers []certmagic.Issuer `json:"-"` Managers []certmagic.Manager `json:"-"` subjects []string magic *certmagic.Config storage certmagic.Storage // Whether this policy had explicit managers configured directly on it. hadExplicitManagers bool } // Provision sets up ap and builds its underlying CertMagic config. func (ap *AutomationPolicy) Provision(tlsApp *TLS) error { // replace placeholders in subjects to allow environment variables repl := caddy.NewReplacer() subjects := make([]string, len(ap.SubjectsRaw)) for i, sub := range ap.SubjectsRaw { sub = repl.ReplaceAll(sub, "") subASCII, err := idna.ToASCII(sub) if err != nil { return fmt.Errorf("could not convert automation policy subject '%s' to punycode: %v", sub, err) } subjects[i] = subASCII } ap.subjects = subjects // policy-specific storage implementation if ap.StorageRaw != nil { val, err := tlsApp.ctx.LoadModule(ap, "StorageRaw") if err != nil { return fmt.Errorf("loading TLS storage module: %v", err) } cmStorage, err := val.(caddy.StorageConverter).CertMagicStorage() if err != nil { return fmt.Errorf("creating TLS storage configuration: %v", err) } ap.storage = cmStorage } // we don't store loaded modules directly in the certmagic config since // policy provisioning may happen more than once (during auto-HTTPS) and // loading a module clears its config bytes; thus, load the module and // store them on the policy before putting it on the config // load and provision any cert manager modules if ap.ManagersRaw != nil { ap.hadExplicitManagers = true vals, err := tlsApp.ctx.LoadModule(ap, "ManagersRaw") if err != nil { return fmt.Errorf("loading external certificate manager modules: %v", err) } for _, getCertVal := range vals.([]any) { ap.Managers = append(ap.Managers, getCertVal.(certmagic.Manager)) } } // load and provision any explicitly-configured issuer modules if ap.IssuersRaw != nil { val, err := tlsApp.ctx.LoadModule(ap, "IssuersRaw") if err != nil { return fmt.Errorf("loading TLS automation management module: %s", err) } for _, issVal := range val.([]any) { ap.Issuers = append(ap.Issuers, issVal.(certmagic.Issuer)) } } issuers := ap.Issuers if len(issuers) == 0 { var err error issuers, err = DefaultIssuersProvisioned(tlsApp.ctx) if err != nil { return err } } // build certmagic.Config and attach it to the policy storage := ap.storage if storage == nil { storage = tlsApp.ctx.Storage() } cfg, err := ap.makeCertMagicConfig(tlsApp, issuers, storage) if err != nil { return err } certCacheMu.RLock() ap.magic = certmagic.New(certCache, cfg) certCacheMu.RUnlock() // give issuers a chance to see the config pointer for _, issuer := range ap.magic.Issuers { if annoying, ok := issuer.(ConfigSetter); ok { annoying.SetConfig(ap.magic) } } return nil } // makeCertMagicConfig constructs a certmagic.Config for this policy using the // provided issuers and storage. It encapsulates common logic shared between // Provision and RebuildCertMagic so we don't duplicate code. func (ap *AutomationPolicy) makeCertMagicConfig(tlsApp *TLS, issuers []certmagic.Issuer, storage certmagic.Storage) (certmagic.Config, error) { // key source keyType := ap.KeyType if keyType != "" { var err error keyType, err = caddy.NewReplacer().ReplaceOrErr(ap.KeyType, true, true) if err != nil { return certmagic.Config{}, fmt.Errorf("invalid key type %s: %s", ap.KeyType, err) } if _, ok := supportedCertKeyTypes[keyType]; !ok { return certmagic.Config{}, fmt.Errorf("unrecognized key type: %s", keyType) } } keySource := certmagic.StandardKeyGenerator{ KeyType: supportedCertKeyTypes[keyType], } if storage == nil { storage = tlsApp.ctx.Storage() } // on-demand TLS var ond *certmagic.OnDemandConfig if ap.OnDemand || len(ap.Managers) > 0 { // permission module is now required after a number of negligence cases that allowed abuse; // but it may still be optional for explicit subjects (bounded, non-wildcard), for the // internal issuer since it doesn't cause public PKI pressure on ACME servers; subtly, it // is useful to allow on-demand TLS to be enabled so Managers can be used, but to still // prevent issuance from Issuers (when Managers don't provide a certificate) if there's no // permission module configured noProtections := ap.isWildcardOrDefault() && !ap.onlyInternalIssuer() && (tlsApp.Automation == nil || tlsApp.Automation.OnDemand == nil || tlsApp.Automation.OnDemand.permission == nil) failClosed := noProtections && !ap.hadExplicitManagers // don't allow on-demand issuance (other than implicit managers) if no managers have been explicitly configured if noProtections { if !ap.hadExplicitManagers { // no managers, no explicitly-configured permission module, this is a config error return certmagic.Config{}, fmt.Errorf("on-demand TLS cannot be enabled without a permission module to prevent abuse; please refer to documentation for details") } // allow on-demand to be enabled but only for the purpose of the Managers; issuance won't be allowed from Issuers tlsApp.logger.Warn("on-demand TLS can only get certificates from the configured external manager(s) because no ask endpoint / permission module is specified") } ond = &certmagic.OnDemandConfig{ DecisionFunc: func(ctx context.Context, name string) error { if failClosed { return fmt.Errorf("no permission module configured; certificates not allowed except from external Managers") } if tlsApp.Automation == nil || tlsApp.Automation.OnDemand == nil { return nil } // logging the remote IP can be useful for servers that want to count // attempts from clients to detect patterns of abuse -- it should NOT be // used solely for decision making, however var remoteIP string if hello, ok := ctx.Value(certmagic.ClientHelloInfoCtxKey).(*tls.ClientHelloInfo); ok && hello != nil { if remote := hello.Conn.RemoteAddr(); remote != nil { remoteIP, _, _ = net.SplitHostPort(remote.String()) } } if c := tlsApp.logger.Check(zapcore.DebugLevel, "asking for permission for on-demand certificate"); c != nil { c.Write( zap.String("remote_ip", remoteIP), zap.String("domain", name), ) } // ask the permission module if this cert is allowed if err := tlsApp.Automation.OnDemand.permission.CertificateAllowed(ctx, name); err != nil { // distinguish true errors from denials, because it's important to elevate actual errors if errors.Is(err, ErrPermissionDenied) { if c := tlsApp.logger.Check(zapcore.DebugLevel, "on-demand certificate issuance denied"); c != nil { c.Write( zap.String("domain", name), zap.Error(err), ) } } else { if c := tlsApp.logger.Check(zapcore.ErrorLevel, "failed to get permission for on-demand certificate"); c != nil { c.Write( zap.String("domain", name), zap.Error(err), ) } } return err } return nil }, Managers: ap.Managers, } } cfg := certmagic.Config{ MustStaple: ap.MustStaple, RenewalWindowRatio: ap.RenewalWindowRatio, KeySource: keySource, OnEvent: tlsApp.onEvent, OnDemand: ond, ReusePrivateKeys: ap.ReusePrivateKeys, OCSP: certmagic.OCSPConfig{ DisableStapling: ap.DisableOCSPStapling, ResponderOverrides: ap.OCSPOverrides, }, Storage: storage, Issuers: issuers, Logger: tlsApp.logger, } return cfg, nil } // IsProvisioned reports whether the automation policy has been // provisioned. A provisioned policy has an initialized CertMagic // instance (i.e. ap.magic != nil). func (ap *AutomationPolicy) IsProvisioned() bool { return ap.magic != nil } // RebuildCertMagic rebuilds the policy's CertMagic configuration from the // policy's already-populated fields (Issuers, Managers, storage, etc.) and // replaces the internal CertMagic instance. This is a lightweight // alternative to calling Provision because it does not re-provision // modules or re-run module Provision; instead, it constructs a new // certmagic.Config and calls SetConfig on issuers so they receive updated // templates (for example, alternate HTTP/TLS ports supplied by the HTTP // app). RebuildCertMagic should only be called when the policy's required // fields are already populated. func (ap *AutomationPolicy) RebuildCertMagic(tlsApp *TLS) error { cfg, err := ap.makeCertMagicConfig(tlsApp, ap.Issuers, ap.storage) if err != nil { return err } certCacheMu.RLock() ap.magic = certmagic.New(certCache, cfg) certCacheMu.RUnlock() // sometimes issuers may need the parent certmagic.Config in // order to function properly (for example, ACMEIssuer needs // access to the correct storage and cache so it can solve // ACME challenges -- it's an annoying, inelegant circular // dependency that I don't know how to resolve nicely!) for _, issuer := range ap.magic.Issuers { if annoying, ok := issuer.(ConfigSetter); ok { annoying.SetConfig(ap.magic) } } return nil } // Subjects returns the list of subjects with all placeholders replaced. func (ap *AutomationPolicy) Subjects() []string { return ap.subjects } // AllInternalSubjects returns true if all the subjects on this policy are internal. func (ap *AutomationPolicy) AllInternalSubjects() bool { return !slices.ContainsFunc(ap.subjects, func(s string) bool { return !certmagic.SubjectIsInternal(s) }) } func (ap *AutomationPolicy) onlyInternalIssuer() bool { if len(ap.Issuers) != 1 { return false } _, ok := ap.Issuers[0].(*InternalIssuer) return ok } // isWildcardOrDefault determines if the subjects include any wildcard domains, // or is the "default" policy (i.e. no subjects) which is unbounded. func (ap *AutomationPolicy) isWildcardOrDefault() bool { isWildcardOrDefault := len(ap.subjects) == 0 for _, sub := range ap.subjects { if strings.HasPrefix(sub, "*") { isWildcardOrDefault = true break } } return isWildcardOrDefault } // DefaultIssuers returns empty Issuers (not provisioned) to be used as defaults. // This function is experimental and has no compatibility promises. func DefaultIssuers(userEmail string) []certmagic.Issuer { issuers := []certmagic.Issuer{new(ACMEIssuer)} if strings.TrimSpace(userEmail) != "" { issuers = append(issuers, &ACMEIssuer{ CA: certmagic.ZeroSSLProductionCA, Email: userEmail, }) } return issuers } // DefaultIssuersProvisioned returns empty but provisioned default Issuers from // DefaultIssuers(). This function is experimental and has no compatibility promises. func DefaultIssuersProvisioned(ctx caddy.Context) ([]certmagic.Issuer, error) { issuers := DefaultIssuers("") for i, iss := range issuers { if prov, ok := iss.(caddy.Provisioner); ok { err := prov.Provision(ctx) if err != nil { return nil, fmt.Errorf("provisioning default issuer %d: %T: %v", i, iss, err) } } } return issuers, nil } // ChallengesConfig configures the ACME challenges. type ChallengesConfig struct { // HTTP configures the ACME HTTP challenge. This // challenge is enabled and used automatically // and by default. HTTP *HTTPChallengeConfig `json:"http,omitempty"` // TLSALPN configures the ACME TLS-ALPN challenge. // This challenge is enabled and used automatically // and by default. TLSALPN *TLSALPNChallengeConfig `json:"tls-alpn,omitempty"` // Configures the ACME DNS challenge. Because this // challenge typically requires credentials for // interfacing with a DNS provider, this challenge is // not enabled by default. This is the only challenge // type which does not require a direct connection // to Caddy from an external server. // // NOTE: DNS providers are currently being upgraded, // and this API is subject to change, but should be // stabilized soon. DNS *DNSChallengeConfig `json:"dns,omitempty"` // Optionally customize the host to which a listener // is bound if required for solving a challenge. BindHost string `json:"bind_host,omitempty"` // Whether distributed solving is enabled. This is // enabled by default, so this is only used to // disable it, which should only need to be done if // you cannot reliably or affordably use storage // backend for writing/distributing challenge info. // (Applies to HTTP and TLS-ALPN challenges.) // If set to false, challenges can only be solved // from the Caddy instance that initiated the // challenge, with the exception of HTTP challenges // initiated with the same ACME account that this // config uses. (Caddy can still solve those challenges // without explicitly writing the info to storage.) // // Default: true Distributed *bool `json:"distributed,omitempty"` } // HTTPChallengeConfig configures the ACME HTTP challenge. type HTTPChallengeConfig struct { // If true, the HTTP challenge will be disabled. Disabled bool `json:"disabled,omitempty"` // An alternate port on which to service this // challenge. Note that the HTTP challenge port is // hard-coded into the spec and cannot be changed, // so you would have to forward packets from the // standard HTTP challenge port to this one. AlternatePort int `json:"alternate_port,omitempty"` } // TLSALPNChallengeConfig configures the ACME TLS-ALPN challenge. type TLSALPNChallengeConfig struct { // If true, the TLS-ALPN challenge will be disabled. Disabled bool `json:"disabled,omitempty"` // An alternate port on which to service this // challenge. Note that the TLS-ALPN challenge port // is hard-coded into the spec and cannot be changed, // so you would have to forward packets from the // standard TLS-ALPN challenge port to this one. AlternatePort int `json:"alternate_port,omitempty"` } // DNSChallengeConfig configures the ACME DNS challenge. // // NOTE: This API is still experimental and is subject to change. type DNSChallengeConfig struct { // The DNS provider module to use which will manage // the DNS records relevant to the ACME challenge. // Required. ProviderRaw json.RawMessage `json:"provider,omitempty" caddy:"namespace=dns.providers inline_key=name"` // The TTL of the TXT record used for the DNS challenge. TTL caddy.Duration `json:"ttl,omitempty"` // How long to wait before starting propagation checks. // Default: 0 (no wait). PropagationDelay caddy.Duration `json:"propagation_delay,omitempty"` // Maximum time to wait for temporary DNS record to appear. // Set to -1 to disable propagation checks. // Default: 2 minutes. PropagationTimeout caddy.Duration `json:"propagation_timeout,omitempty"` // Custom DNS resolvers to prefer over system/built-in defaults. // Often necessary to configure when using split-horizon DNS. Resolvers []string `json:"resolvers,omitempty"` // Override the domain to use for the DNS challenge. This // is to delegate the challenge to a different domain, // e.g. one that updates faster or one with a provider API. OverrideDomain string `json:"override_domain,omitempty"` solver acmez.Solver } // ConfigSetter is implemented by certmagic.Issuers that // need access to a parent certmagic.Config as part of // their provisioning phase. For example, the ACMEIssuer // requires a config so it can access storage and the // cache to solve ACME challenges. type ConfigSetter interface { SetConfig(cfg *certmagic.Config) } ================================================ FILE: modules/caddytls/capools.go ================================================ package caddytls import ( "crypto/tls" "crypto/x509" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "os" "reflect" "github.com/caddyserver/certmagic" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddypki" ) func init() { caddy.RegisterModule(InlineCAPool{}) caddy.RegisterModule(FileCAPool{}) caddy.RegisterModule(PKIRootCAPool{}) caddy.RegisterModule(PKIIntermediateCAPool{}) caddy.RegisterModule(StoragePool{}) caddy.RegisterModule(HTTPCertPool{}) } // The interface to be implemented by all guest modules part of // the namespace 'tls.ca_pool.source.' type CA interface { CertPool() *x509.CertPool } // InlineCAPool is a certificate authority pool provider coming from // a DER-encoded certificates in the config type InlineCAPool struct { // A list of base64 DER-encoded CA certificates // against which to validate client certificates. // Client certs which are not signed by any of // these CAs will be rejected. TrustedCACerts []string `json:"trusted_ca_certs,omitempty"` pool *x509.CertPool } // CaddyModule implements caddy.Module. func (icp InlineCAPool) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.ca_pool.source.inline", New: func() caddy.Module { return new(InlineCAPool) }, } } // Provision implements caddy.Provisioner. func (icp *InlineCAPool) Provision(ctx caddy.Context) error { caPool := x509.NewCertPool() for i, clientCAString := range icp.TrustedCACerts { clientCA, err := decodeBase64DERCert(clientCAString) if err != nil { return fmt.Errorf("parsing certificate at index %d: %v", i, err) } caPool.AddCert(clientCA) } icp.pool = caPool return nil } // Syntax: // // trust_pool inline { // trust_der ... // } // // The 'trust_der' directive can be specified multiple times. func (icp *InlineCAPool) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume module name if d.CountRemainingArgs() > 0 { return d.ArgErr() } for d.NextBlock(0) { switch d.Val() { case "trust_der": icp.TrustedCACerts = append(icp.TrustedCACerts, d.RemainingArgs()...) default: return d.Errf("unrecognized directive: %s", d.Val()) } } if len(icp.TrustedCACerts) == 0 { return d.Err("no certificates specified") } return nil } // CertPool implements CA. func (icp InlineCAPool) CertPool() *x509.CertPool { return icp.pool } // FileCAPool generates trusted root certificates pool from the designated DER and PEM file type FileCAPool struct { // TrustedCACertPEMFiles is a list of PEM file names // from which to load certificates of trusted CAs. // Client certificates which are not signed by any of // these CA certificates will be rejected. TrustedCACertPEMFiles []string `json:"pem_files,omitempty"` pool *x509.CertPool } // CaddyModule implements caddy.Module. func (FileCAPool) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.ca_pool.source.file", New: func() caddy.Module { return new(FileCAPool) }, } } // Loads and decodes the DER and pem files to generate the certificate pool func (f *FileCAPool) Provision(ctx caddy.Context) error { caPool := x509.NewCertPool() for _, pemFile := range f.TrustedCACertPEMFiles { pemContents, err := os.ReadFile(pemFile) if err != nil { return fmt.Errorf("reading %s: %v", pemFile, err) } caPool.AppendCertsFromPEM(pemContents) } f.pool = caPool return nil } // Syntax: // // trust_pool file [...] { // pem_file ... // } // // The 'pem_file' directive can be specified multiple times. func (fcap *FileCAPool) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume module name fcap.TrustedCACertPEMFiles = append(fcap.TrustedCACertPEMFiles, d.RemainingArgs()...) for d.NextBlock(0) { switch d.Val() { case "pem_file": fcap.TrustedCACertPEMFiles = append(fcap.TrustedCACertPEMFiles, d.RemainingArgs()...) default: return d.Errf("unrecognized directive: %s", d.Val()) } } if len(fcap.TrustedCACertPEMFiles) == 0 { return d.Err("no certificates specified") } return nil } func (f FileCAPool) CertPool() *x509.CertPool { return f.pool } // PKIRootCAPool extracts the trusted root certificates from Caddy's native 'pki' app type PKIRootCAPool struct { // List of the Authority names that are configured in the `pki` app whose root certificates are trusted Authority []string `json:"authority,omitempty"` ca []*caddypki.CA pool *x509.CertPool } // CaddyModule implements caddy.Module. func (PKIRootCAPool) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.ca_pool.source.pki_root", New: func() caddy.Module { return new(PKIRootCAPool) }, } } // Loads the PKI app and load the root certificates into the certificate pool func (p *PKIRootCAPool) Provision(ctx caddy.Context) error { pkiApp, err := ctx.AppIfConfigured("pki") if err != nil { return fmt.Errorf("pki_root CA pool requires that a PKI app is configured: %v", err) } pki := pkiApp.(*caddypki.PKI) for _, caID := range p.Authority { c, err := pki.GetCA(ctx, caID) if err != nil || c == nil { return fmt.Errorf("getting CA %s: %v", caID, err) } p.ca = append(p.ca, c) } caPool := x509.NewCertPool() for _, ca := range p.ca { caPool.AddCert(ca.RootCertificate()) } p.pool = caPool return nil } // Syntax: // // trust_pool pki_root [...] { // authority ... // } // // The 'authority' directive can be specified multiple times. func (pkir *PKIRootCAPool) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume module name pkir.Authority = append(pkir.Authority, d.RemainingArgs()...) for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "authority": pkir.Authority = append(pkir.Authority, d.RemainingArgs()...) default: return d.Errf("unrecognized directive: %s", d.Val()) } } if len(pkir.Authority) == 0 { return d.Err("no authorities specified") } return nil } // return the certificate pool generated with root certificates from the PKI app func (p PKIRootCAPool) CertPool() *x509.CertPool { return p.pool } // PKIIntermediateCAPool extracts the trusted intermediate certificates from Caddy's native 'pki' app type PKIIntermediateCAPool struct { // List of the Authority names that are configured in the `pki` app whose intermediate certificates are trusted Authority []string `json:"authority,omitempty"` ca []*caddypki.CA pool *x509.CertPool } // CaddyModule implements caddy.Module. func (PKIIntermediateCAPool) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.ca_pool.source.pki_intermediate", New: func() caddy.Module { return new(PKIIntermediateCAPool) }, } } // Loads the PKI app and loads the intermediate certificates into the certificate pool func (p *PKIIntermediateCAPool) Provision(ctx caddy.Context) error { pkiApp, err := ctx.AppIfConfigured("pki") if err != nil { return fmt.Errorf("pki_intermediate CA pool requires that a PKI app is configured: %v", err) } pki := pkiApp.(*caddypki.PKI) for _, caID := range p.Authority { c, err := pki.GetCA(ctx, caID) if err != nil || c == nil { return fmt.Errorf("getting CA %s: %v", caID, err) } p.ca = append(p.ca, c) } caPool := x509.NewCertPool() for _, ca := range p.ca { for _, c := range ca.IntermediateCertificateChain() { caPool.AddCert(c) } } p.pool = caPool return nil } // Syntax: // // trust_pool pki_intermediate [...] { // authority ... // } // // The 'authority' directive can be specified multiple times. func (pic *PKIIntermediateCAPool) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume module name pic.Authority = append(pic.Authority, d.RemainingArgs()...) for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "authority": pic.Authority = append(pic.Authority, d.RemainingArgs()...) default: return d.Errf("unrecognized directive: %s", d.Val()) } } if len(pic.Authority) == 0 { return d.Err("no authorities specified") } return nil } // return the certificate pool generated with intermediate certificates from the PKI app func (p PKIIntermediateCAPool) CertPool() *x509.CertPool { return p.pool } // StoragePool extracts the trusted certificates root from Caddy storage type StoragePool struct { // The storage module where the trusted root certificates are stored. Absent // explicit storage implies the use of Caddy default storage. StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"` // The storage key/index to the location of the certificates PEMKeys []string `json:"pem_keys,omitempty"` storage certmagic.Storage pool *x509.CertPool } // CaddyModule implements caddy.Module. func (StoragePool) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.ca_pool.source.storage", New: func() caddy.Module { return new(StoragePool) }, } } // Provision implements caddy.Provisioner. func (ca *StoragePool) Provision(ctx caddy.Context) error { if ca.StorageRaw != nil { val, err := ctx.LoadModule(ca, "StorageRaw") if err != nil { return fmt.Errorf("loading storage module: %v", err) } cmStorage, err := val.(caddy.StorageConverter).CertMagicStorage() if err != nil { return fmt.Errorf("creating storage configuration: %v", err) } ca.storage = cmStorage } if ca.storage == nil { ca.storage = ctx.Storage() } if len(ca.PEMKeys) == 0 { return fmt.Errorf("no PEM keys specified") } caPool := x509.NewCertPool() for _, caID := range ca.PEMKeys { bs, err := ca.storage.Load(ctx, caID) if err != nil { return fmt.Errorf("error loading cert '%s' from storage: %s", caID, err) } if !caPool.AppendCertsFromPEM(bs) { return fmt.Errorf("failed to add certificate '%s' to pool", caID) } } ca.pool = caPool return nil } // Syntax: // // trust_pool storage [...] { // storage // keys ... // } // // The 'keys' directive can be specified multiple times. // The'storage' directive is optional and defaults to the default storage module. func (sp *StoragePool) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume module name sp.PEMKeys = append(sp.PEMKeys, d.RemainingArgs()...) for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "storage": if sp.StorageRaw != nil { return d.Err("storage module already set") } if !d.NextArg() { return d.ArgErr() } modStem := d.Val() modID := "caddy.storage." + modStem unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return err } storage, ok := unm.(caddy.StorageConverter) if !ok { return d.Errf("module %s is not a caddy.StorageConverter", modID) } sp.StorageRaw = caddyconfig.JSONModuleObject(storage, "module", modStem, nil) case "keys": sp.PEMKeys = append(sp.PEMKeys, d.RemainingArgs()...) default: return d.Errf("unrecognized directive: %s", d.Val()) } } return nil } func (p StoragePool) CertPool() *x509.CertPool { return p.pool } // TLSConfig holds configuration related to the TLS configuration for the // transport/client. // copied from with minor modifications: modules/caddyhttp/reverseproxy/httptransport.go type TLSConfig struct { // Provides the guest module that provides the trusted certificate authority (CA) certificates CARaw json.RawMessage `json:"ca,omitempty" caddy:"namespace=tls.ca_pool.source inline_key=provider"` // If true, TLS verification of server certificates will be disabled. // This is insecure and may be removed in the future. Do not use this // option except in testing or local development environments. InsecureSkipVerify bool `json:"insecure_skip_verify,omitempty"` // The duration to allow a TLS handshake to a server. Default: No timeout. HandshakeTimeout caddy.Duration `json:"handshake_timeout,omitempty"` // The server name used when verifying the certificate received in the TLS // handshake. By default, this will use the upstream address' host part. // You only need to override this if your upstream address does not match the // certificate the upstream is likely to use. For example if the upstream // address is an IP address, then you would need to configure this to the // hostname being served by the upstream server. Currently, this does not // support placeholders because the TLS config is not provisioned on each // connection, so a static value must be used. ServerName string `json:"server_name,omitempty"` // TLS renegotiation level. TLS renegotiation is the act of performing // subsequent handshakes on a connection after the first. // The level can be: // - "never": (the default) disables renegotiation. // - "once": allows a remote server to request renegotiation once per connection. // - "freely": allows a remote server to repeatedly request renegotiation. Renegotiation string `json:"renegotiation,omitempty"` } func (t *TLSConfig) unmarshalCaddyfile(d *caddyfile.Dispenser) error { for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "ca": if !d.NextArg() { return d.ArgErr() } modStem := d.Val() modID := "tls.ca_pool.source." + modStem unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return err } ca, ok := unm.(CA) if !ok { return d.Errf("module %s is not a caddytls.CA", modID) } t.CARaw = caddyconfig.JSONModuleObject(ca, "provider", modStem, nil) case "insecure_skip_verify": t.InsecureSkipVerify = true case "handshake_timeout": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("bad timeout value '%s': %v", d.Val(), err) } t.HandshakeTimeout = caddy.Duration(dur) case "server_name": if !d.Args(&t.ServerName) { return d.ArgErr() } case "renegotiation": if !d.Args(&t.Renegotiation) { return d.ArgErr() } switch t.Renegotiation { case "never", "once", "freely": continue default: t.Renegotiation = "" return d.Errf("unrecognized renegotiation level: %s", t.Renegotiation) } default: return d.Errf("unrecognized directive: %s", d.Val()) } } return nil } // MakeTLSClientConfig returns a tls.Config usable by a client to a backend. // If there is no custom TLS configuration, a nil config may be returned. // copied from with minor modifications: modules/caddyhttp/reverseproxy/httptransport.go func (t *TLSConfig) makeTLSClientConfig(ctx caddy.Context) (*tls.Config, error) { repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer) if !ok || repl == nil { repl = caddy.NewReplacer() } cfg := new(tls.Config) if t.CARaw != nil { caRaw, err := ctx.LoadModule(t, "CARaw") if err != nil { return nil, err } ca := caRaw.(CA) cfg.RootCAs = ca.CertPool() } // Renegotiation switch t.Renegotiation { case "never", "": cfg.Renegotiation = tls.RenegotiateNever case "once": cfg.Renegotiation = tls.RenegotiateOnceAsClient case "freely": cfg.Renegotiation = tls.RenegotiateFreelyAsClient default: return nil, fmt.Errorf("invalid TLS renegotiation level: %v", t.Renegotiation) } // override for the server name used verify the TLS handshake cfg.ServerName = repl.ReplaceKnown(cfg.ServerName, "") // throw all security out the window cfg.InsecureSkipVerify = t.InsecureSkipVerify // only return a config if it's not empty if reflect.DeepEqual(cfg, new(tls.Config)) { return nil, nil } return cfg, nil } // The HTTPCertPool fetches the trusted root certificates from HTTP(S) // endpoints. The TLS connection properties can be customized, including custom // trusted root certificate. One example usage of this module is to get the trusted // certificates from another Caddy instance that is running the PKI app and ACME server. type HTTPCertPool struct { // the list of URLs that respond with PEM-encoded certificates to trust. Endpoints []string `json:"endpoints,omitempty"` // Customize the TLS connection knobs to used during the HTTP call TLS *TLSConfig `json:"tls,omitempty"` pool *x509.CertPool } // CaddyModule implements caddy.Module. func (HTTPCertPool) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.ca_pool.source.http", New: func() caddy.Module { return new(HTTPCertPool) }, } } // Provision implements caddy.Provisioner. func (hcp *HTTPCertPool) Provision(ctx caddy.Context) error { caPool := x509.NewCertPool() customTransport := http.DefaultTransport.(*http.Transport).Clone() if hcp.TLS != nil { tlsConfig, err := hcp.TLS.makeTLSClientConfig(ctx) if err != nil { return err } customTransport.TLSClientConfig = tlsConfig } httpClient := *http.DefaultClient httpClient.Transport = customTransport for _, uri := range hcp.Endpoints { req, err := http.NewRequestWithContext(ctx, http.MethodGet, uri, nil) if err != nil { return err } res, err := httpClient.Do(req) //nolint:gosec // SSRF false positive... uri comes from config if err != nil { return err } pembs, err := io.ReadAll(res.Body) res.Body.Close() if err != nil { return err } if !caPool.AppendCertsFromPEM(pembs) { return fmt.Errorf("failed to add certs from URL: %s", uri) } } hcp.pool = caPool return nil } // Syntax: // // trust_pool http [] { // endpoints // tls // } // // tls_config: // // ca // insecure_skip_verify // handshake_timeout // server_name // renegotiation // // is the name of the CA module to source the trust // // certificate pool and follows the syntax of the named CA module. func (hcp *HTTPCertPool) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume module name hcp.Endpoints = append(hcp.Endpoints, d.RemainingArgs()...) for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "endpoints": if d.CountRemainingArgs() == 0 { return d.ArgErr() } hcp.Endpoints = append(hcp.Endpoints, d.RemainingArgs()...) case "tls": if hcp.TLS != nil { return d.Err("tls block already defined") } hcp.TLS = new(TLSConfig) if err := hcp.TLS.unmarshalCaddyfile(d); err != nil { return err } default: return d.Errf("unrecognized directive: %s", d.Val()) } } return nil } // report error if the endpoints are not valid URLs func (hcp HTTPCertPool) Validate() (err error) { for _, u := range hcp.Endpoints { _, e := url.Parse(u) if e != nil { err = errors.Join(err, e) } } return err } // CertPool return the certificate pool generated from the HTTP responses func (hcp HTTPCertPool) CertPool() *x509.CertPool { return hcp.pool } var ( _ caddy.Module = (*InlineCAPool)(nil) _ caddy.Provisioner = (*InlineCAPool)(nil) _ CA = (*InlineCAPool)(nil) _ caddyfile.Unmarshaler = (*InlineCAPool)(nil) _ caddy.Module = (*FileCAPool)(nil) _ caddy.Provisioner = (*FileCAPool)(nil) _ CA = (*FileCAPool)(nil) _ caddyfile.Unmarshaler = (*FileCAPool)(nil) _ caddy.Module = (*PKIRootCAPool)(nil) _ caddy.Provisioner = (*PKIRootCAPool)(nil) _ CA = (*PKIRootCAPool)(nil) _ caddyfile.Unmarshaler = (*PKIRootCAPool)(nil) _ caddy.Module = (*PKIIntermediateCAPool)(nil) _ caddy.Provisioner = (*PKIIntermediateCAPool)(nil) _ CA = (*PKIIntermediateCAPool)(nil) _ caddyfile.Unmarshaler = (*PKIIntermediateCAPool)(nil) _ caddy.Module = (*StoragePool)(nil) _ caddy.Provisioner = (*StoragePool)(nil) _ CA = (*StoragePool)(nil) _ caddyfile.Unmarshaler = (*StoragePool)(nil) _ caddy.Module = (*HTTPCertPool)(nil) _ caddy.Provisioner = (*HTTPCertPool)(nil) _ caddy.Validator = (*HTTPCertPool)(nil) _ CA = (*HTTPCertPool)(nil) _ caddyfile.Unmarshaler = (*HTTPCertPool)(nil) ) ================================================ FILE: modules/caddytls/capools_test.go ================================================ package caddytls import ( "encoding/json" "fmt" "reflect" "testing" "time" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" _ "github.com/caddyserver/caddy/v2/modules/filestorage" ) const ( test_der_1 = `MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==` test_cert_file_1 = "../../caddytest/caddy.ca.cer" ) func TestInlineCAPoolUnmarshalCaddyfile(t *testing.T) { type args struct { d *caddyfile.Dispenser } tests := []struct { name string args args expected InlineCAPool wantErr bool }{ { name: "configuring no certificatest produces an error", args: args{ d: caddyfile.NewTestDispenser(` inline { } `), }, wantErr: true, }, { name: "configuring certificates as arguments in-line produces an error", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` inline %s `, test_der_1)), }, wantErr: true, }, { name: "single cert", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` inline { trust_der %s } `, test_der_1)), }, expected: InlineCAPool{ TrustedCACerts: []string{test_der_1}, }, wantErr: false, }, { name: "multiple certs in one line", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` inline { trust_der %s %s } `, test_der_1, test_der_1), ), }, expected: InlineCAPool{ TrustedCACerts: []string{test_der_1, test_der_1}, }, }, { name: "multiple certs in multiple lines", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` inline { trust_der %s trust_der %s } `, test_der_1, test_der_1)), }, expected: InlineCAPool{ TrustedCACerts: []string{test_der_1, test_der_1}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { icp := &InlineCAPool{} if err := icp.UnmarshalCaddyfile(tt.args.d); (err != nil) != tt.wantErr { t.Errorf("InlineCAPool.UnmarshalCaddyfile() error = %v, wantErr %v", err, tt.wantErr) } if !tt.wantErr && !reflect.DeepEqual(&tt.expected, icp) { t.Errorf("InlineCAPool.UnmarshalCaddyfile() = %v, want %v", icp, tt.expected) } }) } } func TestFileCAPoolUnmarshalCaddyfile(t *testing.T) { type args struct { d *caddyfile.Dispenser } tests := []struct { name string expected FileCAPool args args wantErr bool }{ { name: "configuring no certificatest produces an error", args: args{ d: caddyfile.NewTestDispenser(` file { } `), }, wantErr: true, }, { name: "configuring certificates as arguments in-line produces an error", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` file %s `, test_cert_file_1)), }, expected: FileCAPool{ TrustedCACertPEMFiles: []string{test_cert_file_1}, }, }, { name: "single cert", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` file { pem_file %s } `, test_cert_file_1)), }, expected: FileCAPool{ TrustedCACertPEMFiles: []string{test_cert_file_1}, }, wantErr: false, }, { name: "multiple certs inline and in-block are merged", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` file %s { pem_file %s } `, test_cert_file_1, test_cert_file_1)), }, expected: FileCAPool{ TrustedCACertPEMFiles: []string{test_cert_file_1, test_cert_file_1}, }, wantErr: false, }, { name: "multiple certs in one line", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` file { pem_file %s %s } `, test_der_1, test_der_1), ), }, expected: FileCAPool{ TrustedCACertPEMFiles: []string{test_der_1, test_der_1}, }, }, { name: "multiple certs in multiple lines", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` file { pem_file %s pem_file %s } `, test_cert_file_1, test_cert_file_1)), }, expected: FileCAPool{ TrustedCACertPEMFiles: []string{test_cert_file_1, test_cert_file_1}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { fcap := &FileCAPool{} if err := fcap.UnmarshalCaddyfile(tt.args.d); (err != nil) != tt.wantErr { t.Errorf("FileCAPool.UnmarshalCaddyfile() error = %v, wantErr %v", err, tt.wantErr) } if !tt.wantErr && !reflect.DeepEqual(&tt.expected, fcap) { t.Errorf("FileCAPool.UnmarshalCaddyfile() = %v, want %v", fcap, tt.expected) } }) } } func TestPKIRootCAPoolUnmarshalCaddyfile(t *testing.T) { type args struct { d *caddyfile.Dispenser } tests := []struct { name string expected PKIRootCAPool args args wantErr bool }{ { name: "configuring no certificatest produces an error", args: args{ d: caddyfile.NewTestDispenser(` pki_root { } `), }, wantErr: true, }, { name: "single authority as arguments in-line", args: args{ d: caddyfile.NewTestDispenser(` pki_root ca_1 `), }, expected: PKIRootCAPool{ Authority: []string{"ca_1"}, }, }, { name: "multiple authorities as arguments in-line", args: args{ d: caddyfile.NewTestDispenser(` pki_root ca_1 ca_2 `), }, expected: PKIRootCAPool{ Authority: []string{"ca_1", "ca_2"}, }, }, { name: "single authority in block", args: args{ d: caddyfile.NewTestDispenser(` pki_root { authority ca_1 }`), }, expected: PKIRootCAPool{ Authority: []string{"ca_1"}, }, wantErr: false, }, { name: "multiple authorities in one line", args: args{ d: caddyfile.NewTestDispenser(` pki_root { authority ca_1 ca_2 }`), }, expected: PKIRootCAPool{ Authority: []string{"ca_1", "ca_2"}, }, }, { name: "multiple authorities in multiple lines", args: args{ d: caddyfile.NewTestDispenser(` pki_root { authority ca_1 authority ca_2 }`), }, expected: PKIRootCAPool{ Authority: []string{"ca_1", "ca_2"}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { pkir := &PKIRootCAPool{} if err := pkir.UnmarshalCaddyfile(tt.args.d); (err != nil) != tt.wantErr { t.Errorf("PKIRootCAPool.UnmarshalCaddyfile() error = %v, wantErr %v", err, tt.wantErr) } if !tt.wantErr && !reflect.DeepEqual(&tt.expected, pkir) { t.Errorf("PKIRootCAPool.UnmarshalCaddyfile() = %v, want %v", pkir, tt.expected) } }) } } func TestPKIIntermediateCAPoolUnmarshalCaddyfile(t *testing.T) { type args struct { d *caddyfile.Dispenser } tests := []struct { name string expected PKIIntermediateCAPool args args wantErr bool }{ { name: "configuring no certificatest produces an error", args: args{ d: caddyfile.NewTestDispenser(` pki_intermediate { }`), }, wantErr: true, }, { name: "single authority as arguments in-line", args: args{ d: caddyfile.NewTestDispenser(`pki_intermediate ca_1`), }, expected: PKIIntermediateCAPool{ Authority: []string{"ca_1"}, }, }, { name: "multiple authorities as arguments in-line", args: args{ d: caddyfile.NewTestDispenser(`pki_intermediate ca_1 ca_2`), }, expected: PKIIntermediateCAPool{ Authority: []string{"ca_1", "ca_2"}, }, }, { name: "single authority in block", args: args{ d: caddyfile.NewTestDispenser(` pki_intermediate { authority ca_1 }`), }, expected: PKIIntermediateCAPool{ Authority: []string{"ca_1"}, }, wantErr: false, }, { name: "multiple authorities in one line", args: args{ d: caddyfile.NewTestDispenser(` pki_intermediate { authority ca_1 ca_2 }`), }, expected: PKIIntermediateCAPool{ Authority: []string{"ca_1", "ca_2"}, }, }, { name: "multiple authorities in multiple lines", args: args{ d: caddyfile.NewTestDispenser(` pki_intermediate { authority ca_1 authority ca_2 }`), }, expected: PKIIntermediateCAPool{ Authority: []string{"ca_1", "ca_2"}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { pic := &PKIIntermediateCAPool{} if err := pic.UnmarshalCaddyfile(tt.args.d); (err != nil) != tt.wantErr { t.Errorf("PKIIntermediateCAPool.UnmarshalCaddyfile() error = %v, wantErr %v", err, tt.wantErr) } if !tt.wantErr && !reflect.DeepEqual(&tt.expected, pic) { t.Errorf("PKIIntermediateCAPool.UnmarshalCaddyfile() = %v, want %v", pic, tt.expected) } }) } } func TestStoragePoolUnmarshalCaddyfile(t *testing.T) { type args struct { d *caddyfile.Dispenser } tests := []struct { name string args args expected StoragePool wantErr bool }{ { name: "empty block", args: args{ d: caddyfile.NewTestDispenser(`storage { }`), }, expected: StoragePool{}, wantErr: false, }, { name: "providing single storage key inline", args: args{ d: caddyfile.NewTestDispenser(`storage key-1`), }, expected: StoragePool{ PEMKeys: []string{"key-1"}, }, wantErr: false, }, { name: "providing multiple storage keys inline", args: args{ d: caddyfile.NewTestDispenser(`storage key-1 key-2`), }, expected: StoragePool{ PEMKeys: []string{"key-1", "key-2"}, }, wantErr: false, }, { name: "providing keys inside block without specifying storage type", args: args{ d: caddyfile.NewTestDispenser(` storage { keys key-1 key-2 } `), }, expected: StoragePool{ PEMKeys: []string{"key-1", "key-2"}, }, wantErr: false, }, { name: "providing keys in-line and inside block merges them", args: args{ d: caddyfile.NewTestDispenser(`storage key-1 key-2 key-3 { keys key-4 key-5 }`), }, expected: StoragePool{ PEMKeys: []string{"key-1", "key-2", "key-3", "key-4", "key-5"}, }, wantErr: false, }, { name: "specifying storage type in block", args: args{ d: caddyfile.NewTestDispenser(`storage { storage file_system /var/caddy/storage }`), }, expected: StoragePool{ StorageRaw: json.RawMessage(`{"module":"file_system","root":"/var/caddy/storage"}`), }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { sp := &StoragePool{} if err := sp.UnmarshalCaddyfile(tt.args.d); (err != nil) != tt.wantErr { t.Errorf("StoragePool.UnmarshalCaddyfile() error = %v, wantErr %v", err, tt.wantErr) } if !tt.wantErr && !reflect.DeepEqual(&tt.expected, sp) { t.Errorf("StoragePool.UnmarshalCaddyfile() = %s, want %s", sp.StorageRaw, tt.expected.StorageRaw) } }) } } func TestTLSConfig_unmarshalCaddyfile(t *testing.T) { type args struct { d *caddyfile.Dispenser } tests := []struct { name string args args expected TLSConfig wantErr bool }{ { name: "no arguments is valid", args: args{ d: caddyfile.NewTestDispenser(` { }`), }, expected: TLSConfig{}, }, { name: "setting 'renegotiation' to 'never' is valid", args: args{ d: caddyfile.NewTestDispenser(` { renegotiation never }`), }, expected: TLSConfig{ Renegotiation: "never", }, }, { name: "setting 'renegotiation' to 'once' is valid", args: args{ d: caddyfile.NewTestDispenser(` { renegotiation once }`), }, expected: TLSConfig{ Renegotiation: "once", }, }, { name: "setting 'renegotiation' to 'freely' is valid", args: args{ d: caddyfile.NewTestDispenser(` { renegotiation freely }`), }, expected: TLSConfig{ Renegotiation: "freely", }, }, { name: "setting 'renegotiation' to other than 'none', 'once, or 'freely' is invalid", args: args{ d: caddyfile.NewTestDispenser(` { renegotiation foo }`), }, wantErr: true, }, { name: "setting 'renegotiation' without argument is invalid", args: args{ d: caddyfile.NewTestDispenser(` { renegotiation }`), }, wantErr: true, }, { name: "setting 'ca' without argument is an error", args: args{ d: caddyfile.NewTestDispenser(`{ ca }`), }, wantErr: true, }, { name: "setting 'ca' to 'file' with in-line cert is valid", args: args{ d: caddyfile.NewTestDispenser(`{ ca file /var/caddy/ca.pem }`), }, expected: TLSConfig{ CARaw: []byte(`{"pem_files":["/var/caddy/ca.pem"],"provider":"file"}`), }, }, { name: "setting 'ca' to 'file' with appropriate block is valid", args: args{ d: caddyfile.NewTestDispenser(`{ ca file /var/caddy/ca.pem { pem_file /var/caddy/ca.pem } }`), }, expected: TLSConfig{ CARaw: []byte(`{"pem_files":["/var/caddy/ca.pem","/var/caddy/ca.pem"],"provider":"file"}`), }, }, { name: "setting 'ca' multiple times is an error", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(`{ ca file /var/caddy/ca.pem { pem_file /var/caddy/ca.pem } ca inline %s }`, test_der_1)), }, wantErr: true, }, { name: "setting 'handshake_timeout' without value is an error", args: args{ d: caddyfile.NewTestDispenser(`{ handshake_timeout }`), }, wantErr: true, }, { name: "setting 'handshake_timeout' properly is successful", args: args{ d: caddyfile.NewTestDispenser(`{ handshake_timeout 42m }`), }, expected: TLSConfig{ HandshakeTimeout: caddy.Duration(42 * time.Minute), }, }, { name: "setting 'server_name' without value is an error", args: args{ d: caddyfile.NewTestDispenser(`{ server_name }`), }, wantErr: true, }, { name: "setting 'server_name' properly is successful", args: args{ d: caddyfile.NewTestDispenser(`{ server_name example.com }`), }, expected: TLSConfig{ ServerName: "example.com", }, }, { name: "unsupported directives are errors", args: args{ d: caddyfile.NewTestDispenser(`{ foo }`), }, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tr := &TLSConfig{} if err := tr.unmarshalCaddyfile(tt.args.d); (err != nil) != tt.wantErr { t.Errorf("TLSConfig.unmarshalCaddyfile() error = %v, wantErr %v", err, tt.wantErr) } if !tt.wantErr && !reflect.DeepEqual(&tt.expected, tr) { t.Errorf("TLSConfig.UnmarshalCaddyfile() = %v, want %v", tr, tt.expected) } }) } } func TestHTTPCertPoolUnmarshalCaddyfile(t *testing.T) { type args struct { d *caddyfile.Dispenser } tests := []struct { name string args args expected HTTPCertPool wantErr bool }{ { name: "no block, inline http endpoint", args: args{ d: caddyfile.NewTestDispenser(`http http://localhost/ca-certs`), }, expected: HTTPCertPool{ Endpoints: []string{"http://localhost/ca-certs"}, }, wantErr: false, }, { name: "no block, inline https endpoint", args: args{ d: caddyfile.NewTestDispenser(`http https://localhost/ca-certs`), }, expected: HTTPCertPool{ Endpoints: []string{"https://localhost/ca-certs"}, }, wantErr: false, }, { name: "no block, mixed http and https endpoints inline", args: args{ d: caddyfile.NewTestDispenser(`http http://localhost/ca-certs https://localhost/ca-certs`), }, expected: HTTPCertPool{ Endpoints: []string{"http://localhost/ca-certs", "https://localhost/ca-certs"}, }, wantErr: false, }, { name: "multiple endpoints in separate lines in block", args: args{ d: caddyfile.NewTestDispenser(` http { endpoints http://localhost/ca-certs endpoints http://remotehost/ca-certs } `), }, expected: HTTPCertPool{ Endpoints: []string{"http://localhost/ca-certs", "http://remotehost/ca-certs"}, }, wantErr: false, }, { name: "endpoints defined inline and in block are merged", args: args{ d: caddyfile.NewTestDispenser(`http http://localhost/ca-certs { endpoints http://remotehost/ca-certs }`), }, expected: HTTPCertPool{ Endpoints: []string{"http://localhost/ca-certs", "http://remotehost/ca-certs"}, }, wantErr: false, }, { name: "multiple endpoints defined in block on the same line", args: args{ d: caddyfile.NewTestDispenser(`http { endpoints http://remotehost/ca-certs http://localhost/ca-certs }`), }, expected: HTTPCertPool{ Endpoints: []string{"http://remotehost/ca-certs", "http://localhost/ca-certs"}, }, wantErr: false, }, { name: "declaring 'endpoints' in block without argument is an error", args: args{ d: caddyfile.NewTestDispenser(`http { endpoints }`), }, wantErr: true, }, { name: "multiple endpoints in separate lines in block", args: args{ d: caddyfile.NewTestDispenser(` http { endpoints http://localhost/ca-certs endpoints http://remotehost/ca-certs tls { renegotiation freely } } `), }, expected: HTTPCertPool{ Endpoints: []string{"http://localhost/ca-certs", "http://remotehost/ca-certs"}, TLS: &TLSConfig{ Renegotiation: "freely", }, }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { hcp := &HTTPCertPool{} if err := hcp.UnmarshalCaddyfile(tt.args.d); (err != nil) != tt.wantErr { t.Errorf("HTTPCertPool.UnmarshalCaddyfile() error = %v, wantErr %v", err, tt.wantErr) } if !tt.wantErr && !reflect.DeepEqual(&tt.expected, hcp) { t.Errorf("HTTPCertPool.UnmarshalCaddyfile() = %v, want %v", hcp, tt.expected) } }) } } ================================================ FILE: modules/caddytls/certmanagers.go ================================================ package caddytls import ( "context" "crypto/tls" "fmt" "io" "net" "net/http" "net/url" "strings" "github.com/caddyserver/certmagic" "github.com/tailscale/tscert" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(Tailscale{}) caddy.RegisterModule(HTTPCertGetter{}) } // Tailscale is a module that can get certificates from the local Tailscale process. type Tailscale struct { logger *zap.Logger } // CaddyModule returns the Caddy module information. func (Tailscale) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.get_certificate.tailscale", New: func() caddy.Module { return new(Tailscale) }, } } func (ts *Tailscale) Provision(ctx caddy.Context) error { ts.logger = ctx.Logger() return nil } func (ts Tailscale) GetCertificate(ctx context.Context, hello *tls.ClientHelloInfo) (*tls.Certificate, error) { canGetCert, err := ts.canHazCertificate(ctx, hello) if err == nil && !canGetCert { return nil, nil // pass-thru: Tailscale can't offer a cert for this name } if err != nil { if c := ts.logger.Check(zapcore.WarnLevel, "could not get status; will try to get certificate anyway"); c != nil { c.Write(zap.Error(err)) } } return tscert.GetCertificateWithContext(ctx, hello) } // canHazCertificate returns true if Tailscale reports it can get a certificate for the given ClientHello. func (ts Tailscale) canHazCertificate(ctx context.Context, hello *tls.ClientHelloInfo) (bool, error) { if !strings.HasSuffix(strings.ToLower(hello.ServerName), tailscaleDomainAliasEnding) { return false, nil } status, err := tscert.GetStatus(ctx) if err != nil { return false, err } for _, domain := range status.CertDomains { if certmagic.MatchWildcard(hello.ServerName, domain) { return true, nil } } return false, nil } // UnmarshalCaddyfile deserializes Caddyfile tokens into ts. // // ... tailscale func (Tailscale) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume cert manager name if d.NextArg() { return d.ArgErr() } return nil } // tailscaleDomainAliasEnding is the ending for all Tailscale custom domains. const tailscaleDomainAliasEnding = ".ts.net" // HTTPCertGetter can get a certificate via HTTP(S) request. type HTTPCertGetter struct { // The URL from which to download the certificate. Required. // // The URL will be augmented with query string parameters taken // from the TLS handshake: // // - server_name: The SNI value // - signature_schemes: Comma-separated list of hex IDs of signatures // - cipher_suites: Comma-separated list of hex IDs of cipher suites // // To be valid, the response must be HTTP 200 with a PEM body // consisting of blocks for the certificate chain and the private // key. // // To indicate that this manager is not managing a certificate for // the described handshake, the endpoint should return HTTP 204 // (No Content). Error statuses will indicate that the manager is // capable of providing a certificate but was unable to. URL string `json:"url,omitempty"` ctx context.Context } // CaddyModule returns the Caddy module information. func (hcg HTTPCertGetter) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.get_certificate.http", New: func() caddy.Module { return new(HTTPCertGetter) }, } } func (hcg *HTTPCertGetter) Provision(ctx caddy.Context) error { hcg.ctx = ctx if hcg.URL == "" { return fmt.Errorf("URL is required") } return nil } func (hcg HTTPCertGetter) GetCertificate(ctx context.Context, hello *tls.ClientHelloInfo) (*tls.Certificate, error) { sigs := make([]string, len(hello.SignatureSchemes)) for i, sig := range hello.SignatureSchemes { sigs[i] = fmt.Sprintf("%x", uint16(sig)) // you won't believe what %x uses if the val is a Stringer } suites := make([]string, len(hello.CipherSuites)) for i, cs := range hello.CipherSuites { suites[i] = fmt.Sprintf("%x", cs) } parsed, err := url.Parse(hcg.URL) if err != nil { return nil, err } qs := parsed.Query() qs.Set("server_name", hello.ServerName) qs.Set("signature_schemes", strings.Join(sigs, ",")) qs.Set("cipher_suites", strings.Join(suites, ",")) localIP, _, err := net.SplitHostPort(hello.Conn.LocalAddr().String()) if err == nil && localIP != "" { qs.Set("local_ip", localIP) } parsed.RawQuery = qs.Encode() req, err := http.NewRequestWithContext(hcg.ctx, http.MethodGet, parsed.String(), nil) if err != nil { return nil, err } resp, err := http.DefaultClient.Do(req) //nolint:gosec // SSRF false positive... request URI comes from config if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusNoContent { // endpoint is not managing certs for this handshake return nil, nil } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("got HTTP %d", resp.StatusCode) } bodyBytes, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("error reading response body: %v", err) } cert, err := tlsCertFromCertAndKeyPEMBundle(bodyBytes) if err != nil { return nil, err } return &cert, nil } // UnmarshalCaddyfile deserializes Caddyfile tokens into ts. // // ... http func (hcg *HTTPCertGetter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume cert manager name if !d.NextArg() { return d.ArgErr() } hcg.URL = d.Val() if d.NextArg() { return d.ArgErr() } if d.NextBlock(0) { return d.Err("block not allowed here") } return nil } // Interface guards var ( _ certmagic.Manager = (*Tailscale)(nil) _ caddy.Provisioner = (*Tailscale)(nil) _ caddyfile.Unmarshaler = (*Tailscale)(nil) _ certmagic.Manager = (*HTTPCertGetter)(nil) _ caddy.Provisioner = (*HTTPCertGetter)(nil) _ caddyfile.Unmarshaler = (*HTTPCertGetter)(nil) ) ================================================ FILE: modules/caddytls/certselection.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "crypto/tls" "crypto/x509" "encoding/json" "fmt" "math/big" "slices" "github.com/caddyserver/certmagic" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) // CustomCertSelectionPolicy represents a policy for selecting the certificate // used to complete a handshake when there may be multiple options. All fields // specified must match the candidate certificate for it to be chosen. // This was needed to solve https://github.com/caddyserver/caddy/issues/2588. type CustomCertSelectionPolicy struct { // The certificate must have one of these serial numbers. SerialNumber []bigInt `json:"serial_number,omitempty"` // The certificate must have one of these organization names. SubjectOrganization []string `json:"subject_organization,omitempty"` // The certificate must use this public key algorithm. PublicKeyAlgorithm PublicKeyAlgorithm `json:"public_key_algorithm,omitempty"` // The certificate must have at least one of the tags in the list. AnyTag []string `json:"any_tag,omitempty"` // The certificate must have all of the tags in the list. AllTags []string `json:"all_tags,omitempty"` } // SelectCertificate implements certmagic.CertificateSelector. It // only chooses a certificate that at least meets the criteria in // p. It then chooses the first non-expired certificate that is // compatible with the client. If none are valid, it chooses the // first viable candidate anyway. func (p CustomCertSelectionPolicy) SelectCertificate(hello *tls.ClientHelloInfo, choices []certmagic.Certificate) (certmagic.Certificate, error) { viable := make([]certmagic.Certificate, 0, len(choices)) nextChoice: for _, cert := range choices { if len(p.SerialNumber) > 0 { var found bool for _, sn := range p.SerialNumber { snInt := sn.Int // avoid taking address of iteration variable (gosec warning) if cert.Leaf.SerialNumber.Cmp(&snInt) == 0 { found = true break } } if !found { continue } } if len(p.SubjectOrganization) > 0 { found := slices.ContainsFunc(p.SubjectOrganization, func(s string) bool { return slices.Contains(cert.Leaf.Subject.Organization, s) }) if !found { continue } } if p.PublicKeyAlgorithm != PublicKeyAlgorithm(x509.UnknownPublicKeyAlgorithm) && PublicKeyAlgorithm(cert.Leaf.PublicKeyAlgorithm) != p.PublicKeyAlgorithm { continue } if len(p.AnyTag) > 0 { found := slices.ContainsFunc(p.AnyTag, cert.HasTag) if !found { continue } } if len(p.AllTags) > 0 { for _, tag := range p.AllTags { if !cert.HasTag(tag) { continue nextChoice } } } // this certificate at least meets the policy's requirements, // but we still have to check expiration and compatibility viable = append(viable, cert) } if len(viable) == 0 { return certmagic.Certificate{}, fmt.Errorf("no certificates matched custom selection policy") } return certmagic.DefaultCertificateSelector(hello, viable) } // UnmarshalCaddyfile sets up the CustomCertSelectionPolicy from Caddyfile tokens. Syntax: // // cert_selection { // all_tags // any_tag // public_key_algorithm // serial_number // subject_organization // } func (p *CustomCertSelectionPolicy) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { _, wrapper := d.Next(), d.Val() // consume wrapper name // No same-line options are supported if d.CountRemainingArgs() > 0 { return d.ArgErr() } var hasPublicKeyAlgorithm bool for nesting := d.Nesting(); d.NextBlock(nesting); { optionName := d.Val() switch optionName { case "all_tags": if d.CountRemainingArgs() == 0 { return d.ArgErr() } p.AllTags = append(p.AllTags, d.RemainingArgs()...) case "any_tag": if d.CountRemainingArgs() == 0 { return d.ArgErr() } p.AnyTag = append(p.AnyTag, d.RemainingArgs()...) case "public_key_algorithm": if hasPublicKeyAlgorithm { return d.Errf("duplicate %s option '%s'", wrapper, optionName) } if d.CountRemainingArgs() != 1 { return d.ArgErr() } d.NextArg() if err := p.PublicKeyAlgorithm.UnmarshalJSON([]byte(d.Val())); err != nil { return d.Errf("parsing %s option '%s': %v", wrapper, optionName, err) } hasPublicKeyAlgorithm = true case "serial_number": if d.CountRemainingArgs() == 0 { return d.ArgErr() } for d.NextArg() { val, bi := d.Val(), bigInt{} _, ok := bi.SetString(val, 10) if !ok { return d.Errf("parsing %s option '%s': invalid big.int value %s", wrapper, optionName, val) } p.SerialNumber = append(p.SerialNumber, bi) } case "subject_organization": if d.CountRemainingArgs() == 0 { return d.ArgErr() } p.SubjectOrganization = append(p.SubjectOrganization, d.RemainingArgs()...) default: return d.ArgErr() } // No nested blocks are supported if d.NextBlock(nesting + 1) { return d.Errf("malformed %s option '%s': blocks are not supported", wrapper, optionName) } } return nil } // bigInt is a big.Int type that interops with JSON encodings as a string. type bigInt struct{ big.Int } func (bi bigInt) MarshalJSON() ([]byte, error) { return json.Marshal(bi.String()) } func (bi *bigInt) UnmarshalJSON(p []byte) error { if string(p) == "null" { return nil } var stringRep string err := json.Unmarshal(p, &stringRep) if err != nil { return err } _, ok := bi.SetString(stringRep, 10) if !ok { return fmt.Errorf("not a valid big integer: %s", p) } return nil } // Interface guard var _ caddyfile.Unmarshaler = (*CustomCertSelectionPolicy)(nil) ================================================ FILE: modules/caddytls/connpolicy.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "context" "crypto/tls" "crypto/x509" "encoding/base64" "encoding/json" "encoding/pem" "fmt" "io" "os" "reflect" "slices" "strings" "github.com/mholt/acmez/v3" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(LeafCertClientAuth{}) } // ConnectionPolicies govern the establishment of TLS connections. It is // an ordered group of connection policies; the first matching policy will // be used to configure TLS connections at handshake-time. type ConnectionPolicies []*ConnectionPolicy // Provision sets up each connection policy. It should be called // during the Validate() phase, after the TLS app (if any) is // already set up. func (cp ConnectionPolicies) Provision(ctx caddy.Context) error { for i, pol := range cp { // matchers mods, err := ctx.LoadModule(pol, "MatchersRaw") if err != nil { return fmt.Errorf("loading handshake matchers: %v", err) } for _, modIface := range mods.(map[string]any) { cp[i].matchers = append(cp[i].matchers, modIface.(ConnectionMatcher)) } // enable HTTP/2 by default if pol.ALPN == nil { pol.ALPN = append(pol.ALPN, defaultALPN...) } // pre-build standard TLS config so we don't have to at handshake-time err = pol.buildStandardTLSConfig(ctx) if err != nil { return fmt.Errorf("connection policy %d: building standard TLS config: %s", i, err) } if pol.ClientAuthentication != nil && len(pol.ClientAuthentication.VerifiersRaw) > 0 { clientCertValidations, err := ctx.LoadModule(pol.ClientAuthentication, "VerifiersRaw") if err != nil { return fmt.Errorf("loading client cert verifiers: %v", err) } for _, validator := range clientCertValidations.([]any) { cp[i].ClientAuthentication.verifiers = append(cp[i].ClientAuthentication.verifiers, validator.(ClientCertificateVerifier)) } } if len(pol.HandshakeContextRaw) > 0 { modIface, err := ctx.LoadModule(pol, "HandshakeContextRaw") if err != nil { return fmt.Errorf("loading handshake context module: %v", err) } cp[i].handshakeContext = modIface.(HandshakeContext) } } return nil } // TLSConfig returns a standard-lib-compatible TLS configuration which // selects the first matching policy based on the ClientHello. func (cp ConnectionPolicies) TLSConfig(ctx caddy.Context) *tls.Config { // using ServerName to match policies is extremely common, especially in configs // with lots and lots of different policies; we can fast-track those by indexing // them by SNI, so we don't have to iterate potentially thousands of policies // (TODO: this map does not account for wildcards, see if this is a problem in practice? look for reports of high connection latency with wildcard certs but low latency for non-wildcards in multi-thousand-cert deployments) indexedBySNI := make(map[string]ConnectionPolicies) if len(cp) > 30 { for _, p := range cp { for _, m := range p.matchers { if sni, ok := m.(MatchServerName); ok { for _, sniName := range sni { // index for fast lookups during handshakes indexedBySNI[sniName] = append(indexedBySNI[sniName], p) } } } } } getConfigForClient := func(hello *tls.ClientHelloInfo) (*tls.Config, error) { // filter policies by SNI first, if possible, to speed things up // when there may be lots of policies possiblePolicies := cp if indexedPolicies, ok := indexedBySNI[hello.ServerName]; ok { possiblePolicies = indexedPolicies } policyLoop: for _, pol := range possiblePolicies { for _, matcher := range pol.matchers { if !matcher.Match(hello) { continue policyLoop } } if pol.Drop { return nil, fmt.Errorf("dropping connection") } return pol.TLSConfig, nil } return nil, fmt.Errorf("no server TLS configuration available for ClientHello: %+v", hello) } tlsCfg := &tls.Config{ MinVersion: tls.VersionTLS12, GetConfigForClient: getConfigForClient, } // enable ECH, if configured if tlsAppIface, err := ctx.AppIfConfigured("tls"); err == nil { tlsApp := tlsAppIface.(*TLS) if tlsApp.EncryptedClientHello != nil && len(tlsApp.EncryptedClientHello.configs) > 0 { // if no publication was configured, we apply ECH to all server names by default, // but the TLS app needs to know what they are in this case, since they don't appear // in its config (remember, TLS connection policies are used by *other* apps to // run TLS servers) -- we skip names with placeholders if tlsApp.EncryptedClientHello.Publication == nil { var echNames []string repl := caddy.NewReplacer() for _, p := range cp { for _, m := range p.matchers { if sni, ok := m.(MatchServerName); ok { for _, name := range sni { finalName := strings.ToLower(repl.ReplaceAll(name, "")) echNames = append(echNames, finalName) } } } } tlsApp.RegisterServerNames(echNames) } tlsCfg.GetEncryptedClientHelloKeys = func(chi *tls.ClientHelloInfo) ([]tls.EncryptedClientHelloKey, error) { tlsApp.EncryptedClientHello.configsMu.RLock() defer tlsApp.EncryptedClientHello.configsMu.RUnlock() return tlsApp.EncryptedClientHello.stdlibReady, nil } } } return tlsCfg } // ConnectionPolicy specifies the logic for handling a TLS handshake. // An empty policy is valid; safe and sensible defaults will be used. type ConnectionPolicy struct { // How to match this policy with a TLS ClientHello. If // this policy is the first to match, it will be used. MatchersRaw caddy.ModuleMap `json:"match,omitempty" caddy:"namespace=tls.handshake_match"` matchers []ConnectionMatcher // How to choose a certificate if more than one matched // the given ServerName (SNI) value. CertSelection *CustomCertSelectionPolicy `json:"certificate_selection,omitempty"` // The list of cipher suites to support. Caddy's // defaults are modern and secure. CipherSuites []string `json:"cipher_suites,omitempty"` // The list of elliptic curves to support. Caddy's // defaults are modern and secure. Curves []string `json:"curves,omitempty"` // Protocols to use for Application-Layer Protocol // Negotiation (ALPN) during the handshake. ALPN []string `json:"alpn,omitempty"` // Minimum TLS protocol version to allow. Default: `tls1.2` ProtocolMin string `json:"protocol_min,omitempty"` // Maximum TLS protocol version to allow. Default: `tls1.3` ProtocolMax string `json:"protocol_max,omitempty"` // Reject TLS connections. EXPERIMENTAL: May change. Drop bool `json:"drop,omitempty"` // Enables and configures TLS client authentication. ClientAuthentication *ClientAuthentication `json:"client_authentication,omitempty"` // DefaultSNI becomes the ServerName in a ClientHello if there // is no policy configured for the empty SNI value. DefaultSNI string `json:"default_sni,omitempty"` // FallbackSNI becomes the ServerName in a ClientHello if // the original ServerName doesn't match any certificates // in the cache. The use cases for this are very niche; // typically if a client is a CDN and passes through the // ServerName of the downstream handshake but can accept // a certificate with the origin's hostname instead, then // you would set this to your origin's hostname. Note that // Caddy must be managing a certificate for this name. // // This feature is EXPERIMENTAL and subject to change or removal. FallbackSNI string `json:"fallback_sni,omitempty"` // Also known as "SSLKEYLOGFILE", TLS secrets will be written to // this file in NSS key log format which can then be parsed by // Wireshark and other tools. This is INSECURE as it allows other // programs or tools to decrypt TLS connections. However, this // capability can be useful for debugging and troubleshooting. // **ENABLING THIS LOG COMPROMISES SECURITY!** // // This feature is EXPERIMENTAL and subject to change or removal. InsecureSecretsLog string `json:"insecure_secrets_log,omitempty"` // A module that can manipulate the context passed into CertMagic's // certificate management functions during TLS handshakes. // EXPERIMENTAL - subject to change or removal. HandshakeContextRaw json.RawMessage `json:"handshake_context,omitempty" caddy:"namespace=tls.context inline_key=module"` handshakeContext HandshakeContext // TLSConfig is the fully-formed, standard lib TLS config // used to serve TLS connections. Provision all // ConnectionPolicies to populate this. It is exported only // so it can be minimally adjusted after provisioning // if necessary (like to adjust NextProtos to disable HTTP/2), // and may be unexported in the future. TLSConfig *tls.Config `json:"-"` } type HandshakeContext interface { // HandshakeContext returns a context to pass into CertMagic's // GetCertificate function used to serve, load, and manage certs // during TLS handshakes. Generally you'll start with the context // from the ClientHelloInfo, but you may use other information // from it as well. Return an error to abort the handshake. HandshakeContext(*tls.ClientHelloInfo) (context.Context, error) } func (p *ConnectionPolicy) buildStandardTLSConfig(ctx caddy.Context) error { tlsAppIface, err := ctx.App("tls") if err != nil { return fmt.Errorf("getting tls app: %v", err) } tlsApp := tlsAppIface.(*TLS) // fill in some "easy" default values, but for other values // (such as slices), we should ensure that they start empty // so the user-provided config can fill them in; then we will // fill in a default config at the end if they are still unset cfg := &tls.Config{ NextProtos: p.ALPN, GetCertificate: func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { // TODO: I don't love how this works: we pre-build certmagic configs // so that handshakes are faster. Unfortunately, certmagic configs are // comprised of settings from both a TLS connection policy and a TLS // automation policy. The only two fields (as of March 2020; v2 beta 17) // of a certmagic config that come from the TLS connection policy are // CertSelection and DefaultServerName, so an automation policy is what // builds the base certmagic config. Since the pre-built config is // shared, I don't think we can change any of its fields per-handshake, // hence the awkward shallow copy (dereference) here and the subsequent // changing of some of its fields. I'm worried this dereference allocates // more at handshake-time, but I don't know how to practically pre-build // a certmagic config for each combination of conn policy + automation policy... cfg := *tlsApp.getConfigForName(hello.ServerName) if p.CertSelection != nil { // you would think we could just set this whether or not // p.CertSelection is nil, but that leads to panics if // it is, because cfg.CertSelection is an interface, // so it will have a non-nil value even if the actual // value underlying it is nil (sigh) cfg.CertSelection = p.CertSelection } cfg.DefaultServerName = p.DefaultSNI cfg.FallbackServerName = p.FallbackSNI // TODO: experimental: if a handshake context module is configured, allow it // to modify the context before passing it into CertMagic's GetCertificate ctx := hello.Context() if p.handshakeContext != nil { ctx, err = p.handshakeContext.HandshakeContext(hello) if err != nil { return nil, fmt.Errorf("handshake context: %v", err) } } return cfg.GetCertificateWithContext(ctx, hello) }, MinVersion: tls.VersionTLS12, MaxVersion: tls.VersionTLS13, } // session tickets support if tlsApp.SessionTickets != nil { cfg.SessionTicketsDisabled = tlsApp.SessionTickets.Disabled // session ticket key rotation tlsApp.SessionTickets.register(cfg) ctx.OnCancel(func() { // do cleanup when the context is canceled because, // though unlikely, it is possible that a context // needing a TLS server config could exist for less // than the lifetime of the whole app tlsApp.SessionTickets.unregister(cfg) }) } // TODO: Clean up session ticket active locks in storage if app (or process) is being closed! // add all the cipher suites in order, without duplicates cipherSuitesAdded := make(map[uint16]struct{}) for _, csName := range p.CipherSuites { csID := CipherSuiteID(csName) if csID == 0 { return fmt.Errorf("unsupported cipher suite: %s", csName) } if _, ok := cipherSuitesAdded[csID]; !ok { cipherSuitesAdded[csID] = struct{}{} cfg.CipherSuites = append(cfg.CipherSuites, csID) } } // add all the curve preferences in order, without duplicates curvesAdded := make(map[tls.CurveID]struct{}) for _, curveName := range p.Curves { curveID := SupportedCurves[curveName] if _, ok := curvesAdded[curveID]; !ok { curvesAdded[curveID] = struct{}{} cfg.CurvePreferences = append(cfg.CurvePreferences, curveID) } } // ensure ALPN includes the ACME TLS-ALPN protocol alpnFound := slices.Contains(p.ALPN, acmez.ACMETLS1Protocol) if !alpnFound && (cfg.NextProtos == nil || len(cfg.NextProtos) > 0) { cfg.NextProtos = append(cfg.NextProtos, acmez.ACMETLS1Protocol) } // min and max protocol versions if (p.ProtocolMin != "" && p.ProtocolMax != "") && p.ProtocolMin > p.ProtocolMax { return fmt.Errorf("protocol min (%x) cannot be greater than protocol max (%x)", p.ProtocolMin, p.ProtocolMax) } if p.ProtocolMin != "" { cfg.MinVersion = SupportedProtocols[p.ProtocolMin] } if p.ProtocolMax != "" { cfg.MaxVersion = SupportedProtocols[p.ProtocolMax] } // client authentication if p.ClientAuthentication != nil { if err := p.ClientAuthentication.provision(ctx); err != nil { return fmt.Errorf("provisioning client CA: %v", err) } if err := p.ClientAuthentication.ConfigureTLSConfig(cfg); err != nil { return fmt.Errorf("configuring TLS client authentication: %v", err) } // Prevent privilege escalation in case multiple vhosts are configured for // this TLS server; we could potentially figure out if that's the case, but // that might be complex to get right every time. Actually, two proper // solutions could leave tickets enabled, but I am not sure how to do them // properly without significant time investment; there may be new Go // APIs that alloaw this (Wrap/UnwrapSession?) but I do not know how to use // them at this time. TODO: one of these is a possible future enhancement: // A) Prevent resumptions across server identities (certificates): binding the ticket to the // certificate we would serve in a full handshake, or even bind a ticket to the exact SNI // it was issued under (though there are proposals for session resumption across hostnames). // B) Prevent resumptions falsely authenticating a client: include the realm in the ticket, // so that it can be validated upon resumption. cfg.SessionTicketsDisabled = true } if p.InsecureSecretsLog != "" { filename, err := caddy.NewReplacer().ReplaceOrErr(p.InsecureSecretsLog, true, true) if err != nil { return err } filename, err = caddy.FastAbs(filename) if err != nil { return err } logFile, _, err := secretsLogPool.LoadOrNew(filename, func() (caddy.Destructor, error) { w, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o600) return destructableWriter{w}, err }) if err != nil { return err } ctx.OnCancel(func() { _, _ = secretsLogPool.Delete(filename) }) cfg.KeyLogWriter = logFile.(io.Writer) if c := tlsApp.logger.Check(zapcore.WarnLevel, "TLS SECURITY COMPROMISED: secrets logging is enabled!"); c != nil { c.Write(zap.String("log_filename", filename)) } } setDefaultTLSParams(cfg) p.TLSConfig = cfg return nil } // SettingsEmpty returns true if p's settings (fields // except the matchers) are all empty/unset. func (p ConnectionPolicy) SettingsEmpty() bool { return p.CertSelection == nil && p.CipherSuites == nil && p.Curves == nil && p.ALPN == nil && p.ProtocolMin == "" && p.ProtocolMax == "" && p.ClientAuthentication == nil && p.DefaultSNI == "" && p.FallbackSNI == "" && p.InsecureSecretsLog == "" } // SettingsEqual returns true if p's settings (fields // except the matchers) are the same as q. func (p ConnectionPolicy) SettingsEqual(q ConnectionPolicy) bool { p.MatchersRaw = nil q.MatchersRaw = nil return reflect.DeepEqual(p, q) } // UnmarshalCaddyfile sets up the ConnectionPolicy from Caddyfile tokens. Syntax: // // connection_policy { // alpn // cert_selection { // ... // } // ciphers // client_auth { // ... // } // curves // default_sni // match { // ... // } // protocols [] // # EXPERIMENTAL: // drop // fallback_sni // insecure_secrets_log // } func (cp *ConnectionPolicy) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { _, wrapper := d.Next(), d.Val() // No same-line options are supported if d.CountRemainingArgs() > 0 { return d.ArgErr() } var hasCertSelection, hasClientAuth, hasDefaultSNI, hasDrop, hasFallbackSNI, hasInsecureSecretsLog, hasMatch, hasProtocols bool for nesting := d.Nesting(); d.NextBlock(nesting); { optionName := d.Val() switch optionName { case "alpn": if d.CountRemainingArgs() == 0 { return d.ArgErr() } cp.ALPN = append(cp.ALPN, d.RemainingArgs()...) case "cert_selection": if hasCertSelection { return d.Errf("duplicate %s option '%s'", wrapper, optionName) } p := &CustomCertSelectionPolicy{} if err := p.UnmarshalCaddyfile(d.NewFromNextSegment()); err != nil { return err } cp.CertSelection, hasCertSelection = p, true case "client_auth": if hasClientAuth { return d.Errf("duplicate %s option '%s'", wrapper, optionName) } ca := &ClientAuthentication{} if err := ca.UnmarshalCaddyfile(d.NewFromNextSegment()); err != nil { return err } cp.ClientAuthentication, hasClientAuth = ca, true case "ciphers": if d.CountRemainingArgs() == 0 { return d.ArgErr() } cp.CipherSuites = append(cp.CipherSuites, d.RemainingArgs()...) case "curves": if d.CountRemainingArgs() == 0 { return d.ArgErr() } cp.Curves = append(cp.Curves, d.RemainingArgs()...) case "default_sni": if hasDefaultSNI { return d.Errf("duplicate %s option '%s'", wrapper, optionName) } if d.CountRemainingArgs() != 1 { return d.ArgErr() } _, cp.DefaultSNI, hasDefaultSNI = d.NextArg(), d.Val(), true case "drop": // EXPERIMENTAL if hasDrop { return d.Errf("duplicate %s option '%s'", wrapper, optionName) } cp.Drop, hasDrop = true, true case "fallback_sni": // EXPERIMENTAL if hasFallbackSNI { return d.Errf("duplicate %s option '%s'", wrapper, optionName) } if d.CountRemainingArgs() != 1 { return d.ArgErr() } _, cp.FallbackSNI, hasFallbackSNI = d.NextArg(), d.Val(), true case "insecure_secrets_log": // EXPERIMENTAL if hasInsecureSecretsLog { return d.Errf("duplicate %s option '%s'", wrapper, optionName) } if d.CountRemainingArgs() != 1 { return d.ArgErr() } _, cp.InsecureSecretsLog, hasInsecureSecretsLog = d.NextArg(), d.Val(), true case "match": if hasMatch { return d.Errf("duplicate %s option '%s'", wrapper, optionName) } matcherSet, err := ParseCaddyfileNestedMatcherSet(d) if err != nil { return err } cp.MatchersRaw, hasMatch = matcherSet, true case "protocols": if hasProtocols { return d.Errf("duplicate %s option '%s'", wrapper, optionName) } if d.CountRemainingArgs() == 0 || d.CountRemainingArgs() > 2 { return d.ArgErr() } _, cp.ProtocolMin, hasProtocols = d.NextArg(), d.Val(), true if d.NextArg() { cp.ProtocolMax = d.Val() } default: return d.ArgErr() } // No nested blocks are supported if d.NextBlock(nesting + 1) { return d.Errf("malformed %s option '%s': blocks are not supported", wrapper, optionName) } } return nil } // ClientAuthentication configures TLS client auth. type ClientAuthentication struct { // Certificate authority module which provides the certificate pool of trusted certificates CARaw json.RawMessage `json:"ca,omitempty" caddy:"namespace=tls.ca_pool.source inline_key=provider"` ca CA // Deprecated: Use the `ca` field with the `tls.ca_pool.source.inline` module instead. // A list of base64 DER-encoded CA certificates // against which to validate client certificates. // Client certs which are not signed by any of // these CAs will be rejected. TrustedCACerts []string `json:"trusted_ca_certs,omitempty"` // Deprecated: Use the `ca` field with the `tls.ca_pool.source.file` module instead. // TrustedCACertPEMFiles is a list of PEM file names // from which to load certificates of trusted CAs. // Client certificates which are not signed by any of // these CA certificates will be rejected. TrustedCACertPEMFiles []string `json:"trusted_ca_certs_pem_files,omitempty"` // Deprecated: This field is deprecated and will be removed in // a future version. Please use the `validators` field instead // with the tls.client_auth.verifier.leaf module instead. // // A list of base64 DER-encoded client leaf certs // to accept. If this list is not empty, client certs // which are not in this list will be rejected. TrustedLeafCerts []string `json:"trusted_leaf_certs,omitempty"` // Client certificate verification modules. These can perform // custom client authentication checks, such as ensuring the // certificate is not revoked. VerifiersRaw []json.RawMessage `json:"verifiers,omitempty" caddy:"namespace=tls.client_auth.verifier inline_key=verifier"` verifiers []ClientCertificateVerifier // The mode for authenticating the client. Allowed values are: // // Mode | Description // -----|--------------- // `request` | Ask clients for a certificate, but allow even if there isn't one; do not verify it // `require` | Require clients to present a certificate, but do not verify it // `verify_if_given` | Ask clients for a certificate; allow even if there isn't one, but verify it if there is // `require_and_verify` | Require clients to present a valid certificate that is verified // // The default mode is `require_and_verify` if any // TrustedCACerts or TrustedCACertPEMFiles or TrustedLeafCerts // are provided; otherwise, the default mode is `require`. Mode string `json:"mode,omitempty"` existingVerifyPeerCert func([][]byte, [][]*x509.Certificate) error } // UnmarshalCaddyfile parses the Caddyfile segment to set up the client authentication. Syntax: // // client_auth { // mode [request|require|verify_if_given|require_and_verify] // trust_pool { // ... // } // verifier // } // // If `mode` is not provided, it defaults to `require_and_verify` if `trust_pool` is provided. // Otherwise, it defaults to `require`. func (ca *ClientAuthentication) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { for d.NextArg() { // consume any tokens on the same line, if any. } for nesting := d.Nesting(); d.NextBlock(nesting); { subdir := d.Val() switch subdir { case "mode": if d.CountRemainingArgs() > 1 { return d.ArgErr() } if !d.Args(&ca.Mode) { return d.ArgErr() } case "trusted_ca_cert": caddy.Log().Warn("The 'trusted_ca_cert' field is deprecated. Use the 'trust_pool' field instead.") if len(ca.CARaw) != 0 { return d.Err("cannot specify both 'trust_pool' and 'trusted_ca_cert' or 'trusted_ca_cert_file'") } if !d.NextArg() { return d.ArgErr() } ca.TrustedCACerts = append(ca.TrustedCACerts, d.Val()) case "trusted_leaf_cert": if !d.NextArg() { return d.ArgErr() } ca.TrustedLeafCerts = append(ca.TrustedLeafCerts, d.Val()) case "trusted_ca_cert_file": caddy.Log().Warn("The 'trusted_ca_cert_file' field is deprecated. Use the 'trust_pool' field instead.") if len(ca.CARaw) != 0 { return d.Err("cannot specify both 'trust_pool' and 'trusted_ca_cert' or 'trusted_ca_cert_file'") } if !d.NextArg() { return d.ArgErr() } filename := d.Val() ders, err := convertPEMFilesToDER(filename) if err != nil { return d.WrapErr(err) } ca.TrustedCACerts = append(ca.TrustedCACerts, ders...) case "trusted_leaf_cert_file": if !d.NextArg() { return d.ArgErr() } filename := d.Val() ders, err := convertPEMFilesToDER(filename) if err != nil { return d.WrapErr(err) } ca.TrustedLeafCerts = append(ca.TrustedLeafCerts, ders...) case "trust_pool": if len(ca.TrustedCACerts) != 0 { return d.Err("cannot specify both 'trust_pool' and 'trusted_ca_cert' or 'trusted_ca_cert_file'") } if !d.NextArg() { return d.ArgErr() } modName := d.Val() mod, err := caddyfile.UnmarshalModule(d, "tls.ca_pool.source."+modName) if err != nil { return d.WrapErr(err) } caMod, ok := mod.(CA) if !ok { return fmt.Errorf("trust_pool module '%s' is not a certificate pool provider", caMod) } ca.CARaw = caddyconfig.JSONModuleObject(caMod, "provider", modName, nil) case "verifier": if !d.NextArg() { return d.ArgErr() } vType := d.Val() modID := "tls.client_auth.verifier." + vType unm, err := caddyfile.UnmarshalModule(d, modID) if err != nil { return err } _, ok := unm.(ClientCertificateVerifier) if !ok { return d.Errf("module '%s' is not a caddytls.ClientCertificateVerifier", modID) } ca.VerifiersRaw = append(ca.VerifiersRaw, caddyconfig.JSONModuleObject(unm, "verifier", vType, nil)) default: return d.Errf("unknown subdirective for client_auth: %s", subdir) } } // only trust_ca_cert or trust_ca_cert_file was specified if len(ca.TrustedCACerts) > 0 { fileMod := &InlineCAPool{} fileMod.TrustedCACerts = append(fileMod.TrustedCACerts, ca.TrustedCACerts...) ca.CARaw = caddyconfig.JSONModuleObject(fileMod, "provider", "inline", nil) ca.TrustedCACertPEMFiles, ca.TrustedCACerts = nil, nil } return nil } func convertPEMFilesToDER(filename string) ([]string, error) { certDataPEM, err := os.ReadFile(filename) if err != nil { return nil, err } var ders []string // while block is not nil, we have more certificates in the file for block, rest := pem.Decode(certDataPEM); block != nil; block, rest = pem.Decode(rest) { if block.Type != "CERTIFICATE" { return nil, fmt.Errorf("no CERTIFICATE pem block found in %s", filename) } ders = append( ders, base64.StdEncoding.EncodeToString(block.Bytes), ) } // if we decoded nothing, return an error if len(ders) == 0 { return nil, fmt.Errorf("no CERTIFICATE pem block found in %s", filename) } return ders, nil } func (clientauth *ClientAuthentication) provision(ctx caddy.Context) error { if len(clientauth.CARaw) > 0 && (len(clientauth.TrustedCACerts) > 0 || len(clientauth.TrustedCACertPEMFiles) > 0) { return fmt.Errorf("conflicting config for client authentication trust CA") } // convert all named file paths to inline if len(clientauth.TrustedCACertPEMFiles) > 0 { for _, fpath := range clientauth.TrustedCACertPEMFiles { ders, err := convertPEMFilesToDER(fpath) if err != nil { return err } clientauth.TrustedCACerts = append(clientauth.TrustedCACerts, ders...) } } // if we have TrustedCACerts explicitly set, create an 'inline' CA and return if len(clientauth.TrustedCACerts) > 0 { caPool := InlineCAPool{ TrustedCACerts: clientauth.TrustedCACerts, } err := caPool.Provision(ctx) if err != nil { return err } clientauth.ca = caPool } // if we don't have any CARaw set, there's not much work to do if clientauth.CARaw == nil { return nil } caRaw, err := ctx.LoadModule(clientauth, "CARaw") if err != nil { return err } ca, ok := caRaw.(CA) if !ok { return fmt.Errorf("'ca' module '%s' is not a certificate pool provider", ca) } clientauth.ca = ca return nil } // Active returns true if clientauth has an actionable configuration. func (clientauth ClientAuthentication) Active() bool { return len(clientauth.TrustedCACerts) > 0 || len(clientauth.TrustedCACertPEMFiles) > 0 || len(clientauth.TrustedLeafCerts) > 0 || // TODO: DEPRECATED len(clientauth.VerifiersRaw) > 0 || len(clientauth.Mode) > 0 || clientauth.CARaw != nil || clientauth.ca != nil } // ConfigureTLSConfig sets up cfg to enforce clientauth's configuration. func (clientauth *ClientAuthentication) ConfigureTLSConfig(cfg *tls.Config) error { // if there's no actionable client auth, simply disable it if !clientauth.Active() { cfg.ClientAuth = tls.NoClientCert return nil } // enforce desired mode of client authentication if len(clientauth.Mode) > 0 { switch clientauth.Mode { case "request": cfg.ClientAuth = tls.RequestClientCert case "require": cfg.ClientAuth = tls.RequireAnyClientCert case "verify_if_given": cfg.ClientAuth = tls.VerifyClientCertIfGiven case "require_and_verify": cfg.ClientAuth = tls.RequireAndVerifyClientCert default: return fmt.Errorf("client auth mode not recognized: %s", clientauth.Mode) } } else { // otherwise, set a safe default mode if len(clientauth.TrustedCACerts) > 0 || len(clientauth.TrustedCACertPEMFiles) > 0 || len(clientauth.TrustedLeafCerts) > 0 || clientauth.CARaw != nil || clientauth.ca != nil { cfg.ClientAuth = tls.RequireAndVerifyClientCert } else { cfg.ClientAuth = tls.RequireAnyClientCert } } // enforce CA verification by adding CA certs to the ClientCAs pool if clientauth.ca != nil { cfg.ClientCAs = clientauth.ca.CertPool() } // TODO: DEPRECATED: Only here for backwards compatibility. // If leaf cert is specified, enforce by adding a client auth module if len(clientauth.TrustedLeafCerts) > 0 { caddy.Log().Named("tls.connection_policy").Warn("trusted_leaf_certs is deprecated; use leaf verifier module instead") var trustedLeafCerts []*x509.Certificate for _, clientCertString := range clientauth.TrustedLeafCerts { clientCert, err := decodeBase64DERCert(clientCertString) if err != nil { return fmt.Errorf("parsing certificate: %v", err) } trustedLeafCerts = append(trustedLeafCerts, clientCert) } clientauth.verifiers = append(clientauth.verifiers, LeafCertClientAuth{trustedLeafCerts: trustedLeafCerts}) } // if a custom verification function already exists, wrap it clientauth.existingVerifyPeerCert = cfg.VerifyPeerCertificate cfg.VerifyConnection = clientauth.verifyConnection return nil } // verifyConnection is for use as a tls.Config.VerifyConnection callback // to do custom client certificate verification. It is intended for // installation only by clientauth.ConfigureTLSConfig(). // // Unlike VerifyPeerCertificate, VerifyConnection is called on every // connection including resumed sessions, preventing session-resumption bypass. func (clientauth *ClientAuthentication) verifyConnection(cs tls.ConnectionState) error { // first use any pre-existing custom verification function if clientauth.existingVerifyPeerCert != nil { rawCerts := make([][]byte, len(cs.PeerCertificates)) for i, cert := range cs.PeerCertificates { rawCerts[i] = cert.Raw } if err := clientauth.existingVerifyPeerCert(rawCerts, cs.VerifiedChains); err != nil { return err } } for _, verifier := range clientauth.verifiers { if err := verifier.VerifyClientCertificate(nil, cs.VerifiedChains); err != nil { return err } } return nil } // decodeBase64DERCert base64-decodes, then DER-decodes, certStr. func decodeBase64DERCert(certStr string) (*x509.Certificate, error) { derBytes, err := base64.StdEncoding.DecodeString(certStr) if err != nil { return nil, err } return x509.ParseCertificate(derBytes) } // setDefaultTLSParams sets the default TLS cipher suites, protocol versions, // and server preferences of cfg if they are not already set; it does not // overwrite values, only fills in missing values. func setDefaultTLSParams(cfg *tls.Config) { if len(cfg.CipherSuites) == 0 { cfg.CipherSuites = getOptimalDefaultCipherSuites() } // Not a cipher suite, but still important for mitigating protocol downgrade attacks // (prepend since having it at end breaks http2 due to non-h2-approved suites before it) cfg.CipherSuites = append([]uint16{tls.TLS_FALLBACK_SCSV}, cfg.CipherSuites...) if len(cfg.CurvePreferences) == 0 { cfg.CurvePreferences = defaultCurves } // crypto/tls docs: // "If EncryptedClientHelloKeys is set, MinVersion, if set, must be VersionTLS13." if cfg.EncryptedClientHelloKeys != nil && cfg.MinVersion != 0 && cfg.MinVersion < tls.VersionTLS13 { cfg.MinVersion = tls.VersionTLS13 } } // LeafCertClientAuth verifies the client's leaf certificate. type LeafCertClientAuth struct { LeafCertificateLoadersRaw []json.RawMessage `json:"leaf_certs_loaders,omitempty" caddy:"namespace=tls.leaf_cert_loader inline_key=loader"` trustedLeafCerts []*x509.Certificate } // CaddyModule returns the Caddy module information. func (LeafCertClientAuth) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.client_auth.verifier.leaf", New: func() caddy.Module { return new(LeafCertClientAuth) }, } } func (l *LeafCertClientAuth) Provision(ctx caddy.Context) error { if l.LeafCertificateLoadersRaw == nil { return nil } val, err := ctx.LoadModule(l, "LeafCertificateLoadersRaw") if err != nil { return fmt.Errorf("could not parse leaf certificates loaders: %s", err.Error()) } trustedLeafCertloaders := []LeafCertificateLoader{} for _, loader := range val.([]any) { trustedLeafCertloaders = append(trustedLeafCertloaders, loader.(LeafCertificateLoader)) } trustedLeafCertificates := []*x509.Certificate{} for _, loader := range trustedLeafCertloaders { certs, err := loader.LoadLeafCertificates() if err != nil { return fmt.Errorf("could not load leaf certificates: %s", err.Error()) } trustedLeafCertificates = append(trustedLeafCertificates, certs...) } l.trustedLeafCerts = trustedLeafCertificates return nil } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (l *LeafCertClientAuth) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.NextArg() // accommodate the use of one-liners if d.CountRemainingArgs() > 1 { d.NextArg() modName := d.Val() mod, err := caddyfile.UnmarshalModule(d, "tls.leaf_cert_loader."+modName) if err != nil { return d.WrapErr(err) } vMod, ok := mod.(LeafCertificateLoader) if !ok { return fmt.Errorf("leaf module '%s' is not a leaf certificate loader", vMod) } l.LeafCertificateLoadersRaw = append( l.LeafCertificateLoadersRaw, caddyconfig.JSONModuleObject(vMod, "loader", modName, nil), ) return nil } // accommodate the use of nested blocks for nesting := d.Nesting(); d.NextBlock(nesting); { modName := d.Val() mod, err := caddyfile.UnmarshalModule(d, "tls.leaf_cert_loader."+modName) if err != nil { return d.WrapErr(err) } vMod, ok := mod.(LeafCertificateLoader) if !ok { return fmt.Errorf("leaf module '%s' is not a leaf certificate loader", vMod) } l.LeafCertificateLoadersRaw = append( l.LeafCertificateLoadersRaw, caddyconfig.JSONModuleObject(vMod, "loader", modName, nil), ) } return nil } func (l LeafCertClientAuth) VerifyClientCertificate(rawCerts [][]byte, _ [][]*x509.Certificate) error { if len(rawCerts) == 0 { return fmt.Errorf("no client certificate provided") } remoteLeafCert, err := x509.ParseCertificate(rawCerts[0]) if err != nil { return fmt.Errorf("can't parse the given certificate: %s", err.Error()) } if slices.ContainsFunc(l.trustedLeafCerts, remoteLeafCert.Equal) { return nil } return fmt.Errorf("client leaf certificate failed validation") } // PublicKeyAlgorithm is a JSON-unmarshalable wrapper type. type PublicKeyAlgorithm x509.PublicKeyAlgorithm // UnmarshalJSON satisfies json.Unmarshaler. func (a *PublicKeyAlgorithm) UnmarshalJSON(b []byte) error { algoStr := strings.ToLower(strings.Trim(string(b), `"`)) algo, ok := publicKeyAlgorithms[algoStr] if !ok { return fmt.Errorf("unrecognized public key algorithm: %s (expected one of %v)", algoStr, publicKeyAlgorithms) } *a = PublicKeyAlgorithm(algo) return nil } // ConnectionMatcher is a type which matches TLS handshakes. type ConnectionMatcher interface { Match(*tls.ClientHelloInfo) bool } // LeafCertificateLoader is a type that loads the trusted leaf certificates // for the tls.leaf_cert_loader modules type LeafCertificateLoader interface { LoadLeafCertificates() ([]*x509.Certificate, error) } // ClientCertificateVerifier is a type which verifies client certificates. // It is called during verifyPeerCertificate in the TLS handshake. type ClientCertificateVerifier interface { VerifyClientCertificate(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error } var defaultALPN = []string{"h2", "http/1.1"} type destructableWriter struct{ *os.File } func (d destructableWriter) Destruct() error { return d.Close() } var secretsLogPool = caddy.NewUsagePool() // Interface guards var ( _ caddyfile.Unmarshaler = (*ClientAuthentication)(nil) _ caddyfile.Unmarshaler = (*ConnectionPolicy)(nil) _ caddyfile.Unmarshaler = (*LeafCertClientAuth)(nil) ) // ParseCaddyfileNestedMatcherSet parses the Caddyfile tokens for a nested // matcher set, and returns its raw module map value. func ParseCaddyfileNestedMatcherSet(d *caddyfile.Dispenser) (caddy.ModuleMap, error) { matcherMap := make(map[string]ConnectionMatcher) tokensByMatcherName := make(map[string][]caddyfile.Token) for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); { matcherName := d.Val() tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...) } for matcherName, tokens := range tokensByMatcherName { dd := caddyfile.NewDispenser(tokens) dd.Next() // consume wrapper name unm, err := caddyfile.UnmarshalModule(dd, "tls.handshake_match."+matcherName) if err != nil { return nil, err } cm, ok := unm.(ConnectionMatcher) if !ok { return nil, fmt.Errorf("matcher module '%s' is not a connection matcher", matcherName) } matcherMap[matcherName] = cm } matcherSet := make(caddy.ModuleMap) for name, matcher := range matcherMap { jsonBytes, err := json.Marshal(matcher) if err != nil { return nil, fmt.Errorf("marshaling %T matcher: %v", matcher, err) } matcherSet[name] = jsonBytes } return matcherSet, nil } ================================================ FILE: modules/caddytls/connpolicy_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "encoding/json" "fmt" "reflect" "testing" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func TestClientAuthenticationUnmarshalCaddyfileWithDirectiveName(t *testing.T) { const test_der_1 = `MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==` const test_cert_file_1 = "../../caddytest/caddy.ca.cer" type args struct { d *caddyfile.Dispenser } tests := []struct { name string args args expected ClientAuthentication wantErr bool }{ { name: "empty client_auth block does not error", args: args{ d: caddyfile.NewTestDispenser( `client_auth { }`, ), }, wantErr: false, }, { name: "providing both 'trust_pool' and 'trusted_ca_cert' returns an error", args: args{ d: caddyfile.NewTestDispenser( `client_auth { trust_pool inline MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ== trusted_ca_cert MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ== }`), }, wantErr: true, }, { name: "trust_pool without a module argument returns an error", args: args{ d: caddyfile.NewTestDispenser( `client_auth { trust_pool }`), }, wantErr: true, }, { name: "providing more than 1 mode produces an error", args: args{ d: caddyfile.NewTestDispenser(` client_auth { mode require request } `), }, wantErr: true, }, { name: "not providing 'mode' argument produces an error", args: args{d: caddyfile.NewTestDispenser(` client_auth { mode } `)}, wantErr: true, }, { name: "providing a single 'mode' argument sets the mode", args: args{ d: caddyfile.NewTestDispenser(` client_auth { mode require } `), }, expected: ClientAuthentication{ Mode: "require", }, wantErr: false, }, { name: "not providing an argument to 'trusted_ca_cert' produces an error", args: args{ d: caddyfile.NewTestDispenser(` client_auth { trusted_ca_cert } `), }, wantErr: true, }, { name: "not providing an argument to 'trusted_leaf_cert' produces an error", args: args{ d: caddyfile.NewTestDispenser(` client_auth { trusted_leaf_cert } `), }, wantErr: true, }, { name: "not providing an argument to 'trusted_ca_cert_file' produces an error", args: args{ d: caddyfile.NewTestDispenser(` client_auth { trusted_ca_cert_file } `), }, wantErr: true, }, { name: "not providing an argument to 'trusted_leaf_cert_file' produces an error", args: args{ d: caddyfile.NewTestDispenser(` client_auth { trusted_leaf_cert_file } `), }, wantErr: true, }, { name: "using 'trusted_ca_cert' adapts successfully", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` client_auth { trusted_ca_cert %s }`, test_der_1)), }, expected: ClientAuthentication{ CARaw: json.RawMessage(fmt.Sprintf(`{"provider":"inline","trusted_ca_certs":["%s"]}`, test_der_1)), }, }, { name: "using 'inline' trust_pool loads the module successfully", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` client_auth { trust_pool inline { trust_der %s } } `, test_der_1)), }, expected: ClientAuthentication{ CARaw: json.RawMessage(fmt.Sprintf(`{"provider":"inline","trusted_ca_certs":["%s"]}`, test_der_1)), }, }, { name: "setting 'trusted_ca_cert' and 'trust_pool' produces an error", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` client_auth { trusted_ca_cert %s trust_pool inline { trust_der %s } }`, test_der_1, test_der_1)), }, wantErr: true, }, { name: "setting 'trust_pool' and 'trusted_ca_cert' produces an error", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` client_auth { trust_pool inline { trust_der %s } trusted_ca_cert %s }`, test_der_1, test_der_1)), }, wantErr: true, }, { name: "setting 'trust_pool' and 'trusted_ca_cert' produces an error", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` client_auth { trust_pool inline { trust_der %s } trusted_ca_cert_file %s }`, test_der_1, test_cert_file_1)), }, wantErr: true, }, { name: "configuring 'trusted_ca_cert_file' without an argument is an error", args: args{ d: caddyfile.NewTestDispenser(` client_auth { trusted_ca_cert_file } `), }, wantErr: true, }, { name: "configuring 'trusted_ca_cert_file' produces config with 'inline' provider", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` client_auth { trusted_ca_cert_file %s }`, test_cert_file_1), ), }, expected: ClientAuthentication{ CARaw: json.RawMessage(fmt.Sprintf(`{"provider":"inline","trusted_ca_certs":["%s"]}`, test_der_1)), }, wantErr: false, }, { name: "configuring leaf certs does not conflict with 'trust_pool'", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` client_auth { trust_pool inline { trust_der %s } trusted_leaf_cert %s }`, test_der_1, test_der_1)), }, expected: ClientAuthentication{ CARaw: json.RawMessage(fmt.Sprintf(`{"provider":"inline","trusted_ca_certs":["%s"]}`, test_der_1)), TrustedLeafCerts: []string{test_der_1}, }, }, { name: "providing trusted leaf certificate file loads the cert successfully", args: args{ d: caddyfile.NewTestDispenser(fmt.Sprintf(` client_auth { trusted_leaf_cert_file %s }`, test_cert_file_1)), }, expected: ClientAuthentication{ TrustedLeafCerts: []string{test_der_1}, }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ca := &ClientAuthentication{} if err := ca.UnmarshalCaddyfile(tt.args.d); (err != nil) != tt.wantErr { t.Errorf("ClientAuthentication.UnmarshalCaddyfile() error = %v, wantErr %v", err, tt.wantErr) return } if !tt.wantErr && !reflect.DeepEqual(&tt.expected, ca) { t.Errorf("ClientAuthentication.UnmarshalCaddyfile() = %v, want %v", ca, tt.expected) } }) } } func TestClientAuthenticationProvision(t *testing.T) { tests := []struct { name string ca ClientAuthentication wantErr bool }{ { name: "specifying both 'CARaw' and 'TrustedCACerts' produces an error", ca: ClientAuthentication{ CARaw: json.RawMessage(`{"provider":"inline","trusted_ca_certs":["foo"]}`), TrustedCACerts: []string{"foo"}, }, wantErr: true, }, { name: "specifying both 'CARaw' and 'TrustedCACertPEMFiles' produces an error", ca: ClientAuthentication{ CARaw: json.RawMessage(`{"provider":"inline","trusted_ca_certs":["foo"]}`), TrustedCACertPEMFiles: []string{"foo"}, }, wantErr: true, }, { name: "setting 'TrustedCACerts' provisions the cert pool", ca: ClientAuthentication{ TrustedCACerts: []string{test_der_1}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := tt.ca.provision(caddy.Context{}) if (err != nil) != tt.wantErr { t.Errorf("ClientAuthentication.provision() error = %v, wantErr %v", err, tt.wantErr) return } if !tt.wantErr { if tt.ca.ca.CertPool() == nil { t.Error("CertPool is nil, expected non-nil value") } } }) } } ================================================ FILE: modules/caddytls/distributedstek/distributedstek.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package distributedstek provides TLS session ticket ephemeral // keys (STEKs) in a distributed fashion by utilizing configured // storage for locking and key sharing. This allows a cluster of // machines to optimally resume TLS sessions in a load-balanced // environment without any hassle. This is similar to what // Twitter does, but without needing to rely on SSH, as it is // built into the web server this way: // https://blog.twitter.com/engineering/en_us/a/2013/forward-secrecy-at-twitter.html package distributedstek import ( "bytes" "encoding/gob" "encoding/json" "errors" "fmt" "io/fs" "log" "runtime/debug" "time" "github.com/caddyserver/certmagic" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddytls" ) func init() { caddy.RegisterModule(Provider{}) } // Provider implements a distributed STEK provider. This // module will obtain STEKs from a storage module instead // of generating STEKs internally. This allows STEKs to be // coordinated, improving TLS session resumption in a cluster. type Provider struct { // The storage module wherein to store and obtain session // ticket keys. If unset, Caddy's default/global-configured // storage module will be used. Storage json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"` storage certmagic.Storage stekConfig *caddytls.SessionTicketService timer *time.Timer ctx caddy.Context } // CaddyModule returns the Caddy module information. func (Provider) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.stek.distributed", New: func() caddy.Module { return new(Provider) }, } } // Provision provisions s. func (s *Provider) Provision(ctx caddy.Context) error { s.ctx = ctx // unpack the storage module to use, if different from the default if s.Storage != nil { val, err := ctx.LoadModule(s, "Storage") if err != nil { return fmt.Errorf("loading TLS storage module: %s", err) } cmStorage, err := val.(caddy.StorageConverter).CertMagicStorage() if err != nil { return fmt.Errorf("creating TLS storage configuration: %v", err) } s.storage = cmStorage } // otherwise, use default storage if s.storage == nil { s.storage = ctx.Storage() } return nil } // Initialize sets the configuration for s and returns the starting keys. func (s *Provider) Initialize(config *caddytls.SessionTicketService) ([][32]byte, error) { // keep a reference to the config; we'll need it when rotating keys s.stekConfig = config dstek, err := s.getSTEK() if err != nil { return nil, err } // create timer for the remaining time on the interval; // this timer is cleaned up only when rotate() returns s.timer = time.NewTimer(time.Until(dstek.NextRotation)) return dstek.Keys, nil } // Next returns a channel which transmits the latest session ticket keys. func (s *Provider) Next(doneChan <-chan struct{}) <-chan [][32]byte { keysChan := make(chan [][32]byte) go s.rotate(doneChan, keysChan) return keysChan } func (s *Provider) loadSTEK() (distributedSTEK, error) { var sg distributedSTEK gobBytes, err := s.storage.Load(s.ctx, stekFileName) if err != nil { return sg, err // don't wrap, in case error is certmagic.ErrNotExist } dec := gob.NewDecoder(bytes.NewReader(gobBytes)) err = dec.Decode(&sg) if err != nil { return sg, fmt.Errorf("STEK gob corrupted: %v", err) } return sg, nil } func (s *Provider) storeSTEK(dstek distributedSTEK) error { var buf bytes.Buffer err := gob.NewEncoder(&buf).Encode(dstek) if err != nil { return fmt.Errorf("encoding STEK gob: %v", err) } err = s.storage.Store(s.ctx, stekFileName, buf.Bytes()) if err != nil { return fmt.Errorf("storing STEK gob: %v", err) } return nil } // getSTEK locks and loads the current STEK from storage. If none // currently exists, a new STEK is created and persisted. If the // current STEK is outdated (NextRotation time is in the past), // then it is rotated and persisted. The resulting STEK is returned. func (s *Provider) getSTEK() (distributedSTEK, error) { err := s.storage.Lock(s.ctx, stekLockName) if err != nil { return distributedSTEK{}, fmt.Errorf("failed to acquire storage lock: %v", err) } //nolint:errcheck defer s.storage.Unlock(s.ctx, stekLockName) // load the current STEKs from storage dstek, err := s.loadSTEK() if errors.Is(err, fs.ErrNotExist) { // if there is none, then make some right away dstek, err = s.rotateKeys(dstek) if err != nil { return dstek, fmt.Errorf("creating new STEK: %v", err) } } else if err != nil { // some other error, that's a problem return dstek, fmt.Errorf("loading STEK: %v", err) } else if time.Now().After(dstek.NextRotation) { // if current STEKs are outdated, rotate them dstek, err = s.rotateKeys(dstek) if err != nil { return dstek, fmt.Errorf("rotating keys: %v", err) } } return dstek, nil } // rotateKeys rotates the keys of oldSTEK and returns the new distributedSTEK // with updated keys and timestamps. It stores the returned STEK in storage, // so this function must only be called in a storage-provided lock. func (s *Provider) rotateKeys(oldSTEK distributedSTEK) (distributedSTEK, error) { var newSTEK distributedSTEK var err error newSTEK.Keys, err = s.stekConfig.RotateSTEKs(oldSTEK.Keys) if err != nil { return newSTEK, err } now := time.Now() newSTEK.LastRotation = now newSTEK.NextRotation = now.Add(time.Duration(s.stekConfig.RotationInterval)) err = s.storeSTEK(newSTEK) if err != nil { return newSTEK, err } return newSTEK, nil } // rotate rotates keys on a regular basis, sending each updated set of // keys down keysChan, until doneChan is closed. func (s *Provider) rotate(doneChan <-chan struct{}, keysChan chan<- [][32]byte) { defer func() { if err := recover(); err != nil { log.Printf("[PANIC] distributed STEK rotation: %v\n%s", err, debug.Stack()) } }() for { select { case <-s.timer.C: dstek, err := s.getSTEK() if err != nil { // TODO: improve this handling log.Printf("[ERROR] Loading STEK: %v", err) continue } // send the updated keys to the service keysChan <- dstek.Keys // timer channel is already drained, so reset directly (see godoc) s.timer.Reset(time.Until(dstek.NextRotation)) case <-doneChan: // again, see godocs for why timer is stopped this way if !s.timer.Stop() { <-s.timer.C } return } } } type distributedSTEK struct { Keys [][32]byte LastRotation, NextRotation time.Time } const ( stekLockName = "stek_check" stekFileName = "stek/stek.bin" ) // Interface guard var _ caddytls.STEKProvider = (*Provider)(nil) ================================================ FILE: modules/caddytls/ech.go ================================================ package caddytls import ( "context" "crypto/tls" "encoding/base64" "encoding/json" "errors" "fmt" "io/fs" weakrand "math/rand/v2" "path" "strconv" "strings" "sync" "time" "github.com/caddyserver/certmagic" "github.com/cloudflare/circl/hpke" "github.com/cloudflare/circl/kem" "github.com/libdns/libdns" "go.uber.org/zap" "golang.org/x/crypto/cryptobyte" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(ECHDNSPublisher{}) } // ECH enables Encrypted ClientHello (ECH) and configures its management. // // ECH helps protect site names (also called "server names" or "domain names" // or "SNI"), which are normally sent over plaintext when establishing a TLS // connection. With ECH, the true ClientHello is encrypted and wrapped by an // "outer" ClientHello that uses a more generic, shared server name that is // publicly known. // // Clients need to know which public name (and other parameters) to use when // connecting to a site with ECH, and the methods for this vary; however, // major browsers support reading ECH configurations from DNS records (which // is typically only secure when DNS-over-HTTPS or DNS-over-TLS is enabled in // the client). Caddy has the ability to automatically publish ECH configs to // DNS records if a DNS provider is configured either in the TLS app or with // each individual publication config object. (Requires a custom build with a // DNS provider module.) // // ECH requires at least TLS 1.3, so any TLS connection policies with ECH // applied will automatically upgrade the minimum TLS version to 1.3, even if // configured to a lower version. // // EXPERIMENTAL: Subject to change. type ECH struct { // The list of ECH configurations for which to automatically generate // and rotate keys. At least one is required to enable ECH. // // It is strongly recommended to use as few ECH configs as possible // to maximize the size of your anonymity set (see the ECH specification // for a definition). Typically, each server should have only one public // name, i.e. one config in this list. Configs []ECHConfiguration `json:"configs,omitempty"` // Publication describes ways to publish ECH configs for clients to // discover and use. Without publication, most clients will not use // ECH at all, and those that do will suffer degraded performance. // // Most major browsers support ECH by way of publication to HTTPS // DNS RRs. (This also typically requires that they use DoH or DoT.) Publication []*ECHPublication `json:"publication,omitempty"` configsMu *sync.RWMutex // protects both configs and the list of configs/keys the standard library uses configs map[string][]echConfig // map of public_name to list of configs stdlibReady []tls.EncryptedClientHelloKey // ECH configs+keys in a format the standard library can use } // Provision loads or creates ECH configs and returns outer names (for certificate // management), but does not publish any ECH configs. The DNS module is used as // a default for later publishing if needed. func (ech *ECH) Provision(ctx caddy.Context) ([]string, error) { ech.configsMu = new(sync.RWMutex) logger := ctx.Logger().Named("ech") // set up publication modules before we need to obtain a lock in storage, // since this is strictly internal and doesn't require synchronization for i, pub := range ech.Publication { mods, err := ctx.LoadModule(pub, "PublishersRaw") if err != nil { return nil, fmt.Errorf("loading ECH publication modules: %v", err) } for _, modIface := range mods.(map[string]any) { ech.Publication[i].publishers = append(ech.Publication[i].publishers, modIface.(ECHPublisher)) } } // the rest of provisioning needs an exclusive lock so that instances aren't // stepping on each other when setting up ECH configs storage := ctx.Storage() if err := storage.Lock(ctx, echStorageLockName); err != nil { return nil, err } defer func() { if err := storage.Unlock(ctx, echStorageLockName); err != nil { logger.Error("unable to unlock ECH provisioning in storage", zap.Error(err)) } }() ech.configsMu.Lock() defer ech.configsMu.Unlock() outerNames, err := ech.setConfigsFromStorage(ctx, logger) if err != nil { return nil, fmt.Errorf("loading configs from storage: %w", err) } // see if we need to make any new ones based on the input configuration for _, cfg := range ech.Configs { publicName := strings.ToLower(strings.TrimSpace(cfg.PublicName)) if list, ok := ech.configs[publicName]; !ok || len(list) == 0 { // no config with this public name was loaded, so create one echCfg, err := generateAndStoreECHConfig(ctx, publicName) if err != nil { return nil, err } logger.Debug("generated new ECH config", zap.String("public_name", echCfg.RawPublicName), zap.Uint8("id", echCfg.ConfigID)) ech.configs[publicName] = append(ech.configs[publicName], echCfg) outerNames = append(outerNames, publicName) } } // convert the configs into a structure ready for the std lib to use ech.updateKeyList() // ensure any old keys are rotated out if err = ech.rotateECHKeys(ctx, logger, true); err != nil { return nil, fmt.Errorf("rotating ECH configs: %w", err) } return outerNames, nil } // setConfigsFromStorage sets the ECH configs in memory to those in storage. // It must be called in a write lock on ech.configsMu. func (ech *ECH) setConfigsFromStorage(ctx caddy.Context, logger *zap.Logger) ([]string, error) { storage := ctx.Storage() ech.configs = make(map[string][]echConfig) var outerNames []string // start by loading all the existing configs (even the older ones on the way out, // since some clients may still be using them if they haven't yet picked up on the // new configs) cfgKeys, err := storage.List(ctx, echConfigsKey, false) if err != nil && !errors.Is(err, fs.ErrNotExist) { // OK if dir doesn't exist; it will be created return nil, err } for _, cfgKey := range cfgKeys { cfg, err := loadECHConfig(ctx, path.Base(cfgKey)) if err != nil { return nil, err } // if any part of the config's folder was corrupted, the load function will // clean it up and not return an error, since configs are immutable and // fairly ephemeral... so just check that we actually got a populated config if cfg.configBin == nil || cfg.privKeyBin == nil { continue } logger.Debug("loaded ECH config", zap.String("public_name", cfg.RawPublicName), zap.Uint8("id", cfg.ConfigID)) if _, seen := ech.configs[cfg.RawPublicName]; !seen { outerNames = append(outerNames, cfg.RawPublicName) } ech.configs[cfg.RawPublicName] = append(ech.configs[cfg.RawPublicName], cfg) } return outerNames, nil } // rotateECHKeys updates the ECH keys/configs that are outdated if rotation is needed. // It should be called in a write lock on ech.configsMu. If a lock is already obtained // in storage, then pass true for storageSynced. // // This function sets/updates the stdlib-ready key list only if a rotation occurs. func (ech *ECH) rotateECHKeys(ctx caddy.Context, logger *zap.Logger, storageSynced bool) error { storage := ctx.Storage() // all existing configs are now loaded; rotate keys "regularly" as recommended by the spec // (also: "Rotating too frequently limits the client anonymity set." - but the more server // names, the more frequently rotation can be done safely) const ( rotationInterval = 24 * time.Hour * 30 deleteAfter = 24 * time.Hour * 90 ) if !ech.rotationNeeded(rotationInterval, deleteAfter) { return nil } // sync this operation across cluster if not already if !storageSynced { if err := storage.Lock(ctx, echStorageLockName); err != nil { return err } defer func() { if err := storage.Unlock(ctx, echStorageLockName); err != nil { logger.Error("unable to unlock ECH rotation in storage", zap.Error(err)) } }() } // update what storage has, in case another instance already updated things if _, err := ech.setConfigsFromStorage(ctx, logger); err != nil { return fmt.Errorf("updating ECH keys from storage: %v", err) } // iterate the updated list and do any updates as needed for publicName := range ech.configs { for i := 0; i < len(ech.configs[publicName]); i++ { cfg := ech.configs[publicName][i] if time.Since(cfg.meta.Created) >= rotationInterval && cfg.meta.Replaced.IsZero() { // key is due for rotation and it hasn't been replaced yet; do that now logger.Debug("ECH config is due for rotation", zap.String("public_name", cfg.RawPublicName), zap.Uint8("id", cfg.ConfigID), zap.Time("created", cfg.meta.Created), zap.Duration("age", time.Since(cfg.meta.Created)), zap.Duration("rotation_interval", rotationInterval)) // start by generating and storing the replacement ECH config newCfg, err := generateAndStoreECHConfig(ctx, publicName) if err != nil { return fmt.Errorf("generating and storing new replacement ECH config: %w", err) } // mark the key as replaced so we don't rotate it again, and instead delete it later ech.configs[publicName][i].meta.Replaced = time.Now() // persist the updated metadata metaBytes, err := json.Marshal(ech.configs[publicName][i].meta) if err != nil { return fmt.Errorf("marshaling updated ECH config metadata: %v", err) } if err := storage.Store(ctx, echMetaKey(cfg.ConfigID), metaBytes); err != nil { return fmt.Errorf("storing updated ECH config metadata: %v", err) } ech.configs[publicName] = append(ech.configs[publicName], newCfg) logger.Debug("rotated ECH key", zap.String("public_name", cfg.RawPublicName), zap.Uint8("old_id", cfg.ConfigID), zap.Uint8("new_id", newCfg.ConfigID)) } else if time.Since(cfg.meta.Created) >= deleteAfter && !cfg.meta.Replaced.IsZero() { // key has expired and is no longer supported; delete it from storage and memory cfgIDKey := path.Join(echConfigsKey, strconv.Itoa(int(cfg.ConfigID))) if err := storage.Delete(ctx, cfgIDKey); err != nil { return fmt.Errorf("deleting expired ECH config: %v", err) } ech.configs[publicName] = append(ech.configs[publicName][:i], ech.configs[publicName][i+1:]...) i-- logger.Debug("deleted expired ECH key", zap.String("public_name", cfg.RawPublicName), zap.Uint8("id", cfg.ConfigID), zap.Duration("age", time.Since(cfg.meta.Created))) } } } ech.updateKeyList() return nil } // rotationNeeded returns true if any ECH key needs to be replaced, or deleted. // It must be called inside a read or write lock of ech.configsMu (probably a // write lock, so that the rotation can occur correctly in the same lock).) func (ech *ECH) rotationNeeded(rotationInterval, deleteAfter time.Duration) bool { for publicName := range ech.configs { for i := 0; i < len(ech.configs[publicName]); i++ { cfg := ech.configs[publicName][i] if (time.Since(cfg.meta.Created) >= rotationInterval && cfg.meta.Replaced.IsZero()) || (time.Since(cfg.meta.Created) >= deleteAfter && !cfg.meta.Replaced.IsZero()) { return true } } } return false } // updateKeyList updates the list of ECH keys the std lib uses to serve ECH. // It must be called inside a write lock on ech.configsMu. func (ech *ECH) updateKeyList() { ech.stdlibReady = []tls.EncryptedClientHelloKey{} for _, cfgs := range ech.configs { for _, cfg := range cfgs { ech.stdlibReady = append(ech.stdlibReady, tls.EncryptedClientHelloKey{ Config: cfg.configBin, PrivateKey: cfg.privKeyBin, SendAsRetry: cfg.meta.Replaced.IsZero(), // only send during retries if key has not been rotated out }) } } } // publishECHConfigs publishes any configs that are configured for publication and which haven't been published already. func (t *TLS) publishECHConfigs(logger *zap.Logger) error { // make publication exclusive, since we don't need to repeat this unnecessarily storage := t.ctx.Storage() const echLockName = "ech_publish" if err := storage.Lock(t.ctx, echLockName); err != nil { return err } defer func() { if err := storage.Unlock(t.ctx, echLockName); err != nil { logger.Error("unable to unlock ECH provisioning in storage", zap.Error(err)) } }() // get the publication config, or use a default if not specified // (the default publication config should be to publish all ECH // configs to the app-global DNS provider; if no DNS provider is // configured, then this whole function is basically a no-op) publicationList := t.EncryptedClientHello.Publication if publicationList == nil { if dnsProv, ok := t.dns.(ECHDNSProvider); ok { publicationList = []*ECHPublication{ { publishers: []ECHPublisher{ &ECHDNSPublisher{ provider: dnsProv, logger: logger, }, }, }, } } } // for each publication config, build the list of ECH configs to // publish with it, and figure out which inner names to publish // to/for, then publish for _, publication := range publicationList { t.EncryptedClientHello.configsMu.RLock() // this publication is either configured for specific ECH configs, // or we just use an implied default of all ECH configs var echCfgList echConfigList var configIDs []uint8 // TODO: use IDs or the outer names? if publication.Configs == nil { // by default, publish all configs for _, configs := range t.EncryptedClientHello.configs { echCfgList = append(echCfgList, configs...) for _, c := range configs { configIDs = append(configIDs, c.ConfigID) } } } else { for _, cfgOuterName := range publication.Configs { if cfgList, ok := t.EncryptedClientHello.configs[cfgOuterName]; ok { echCfgList = append(echCfgList, cfgList...) for _, c := range cfgList { configIDs = append(configIDs, c.ConfigID) } } } } t.EncryptedClientHello.configsMu.RUnlock() // marshal the ECH config list as binary for publication echCfgListBin, err := echCfgList.MarshalBinary() if err != nil { return fmt.Errorf("marshaling ECH config list: %v", err) } // now we have our list of ECH configs to publish and the inner names // to publish for (i.e. the names being protected); iterate each publisher // and do the publish for any config+name that needs a publish for _, publisher := range publication.publishers { publisherKey := publisher.PublisherKey() // by default, publish for all (non-outer) server names, unless // a specific list of names is configured var serverNamesSet map[string]struct{} if publication.Domains == nil { serverNamesSet = make(map[string]struct{}, len(t.serverNames)) for name := range t.serverNames { // skip Tailscale names, a special case we also handle differently in our auto-HTTPS if strings.HasSuffix(name, ".ts.net") { continue } serverNamesSet[name] = struct{}{} } } else { serverNamesSet = make(map[string]struct{}, len(publication.Domains)) for _, name := range publication.Domains { serverNamesSet[name] = struct{}{} } } // remove any domains from the set which have already had all configs in the // list published by this publisher, to avoid always re-publishing unnecessarily for configuredInnerName := range serverNamesSet { allConfigsPublished := true for _, cfg := range echCfgList { // TODO: Potentially utilize the timestamp (map value) for recent-enough publication, instead of just checking for existence if _, ok := cfg.meta.Publications[publisherKey][configuredInnerName]; !ok { allConfigsPublished = false break } } if allConfigsPublished { delete(serverNamesSet, configuredInnerName) } } // if all the (inner) domains have had this ECH config list published // by this publisher, then try the next publication config if len(serverNamesSet) == 0 { logger.Debug("ECH config list already published by publisher for associated domains (or no domains to publish for)", zap.Uint8s("config_ids", configIDs), zap.String("publisher", publisherKey)) continue } // convert the set of names to a slice dnsNamesToPublish := make([]string, 0, len(serverNamesSet)) for name := range serverNamesSet { dnsNamesToPublish = append(dnsNamesToPublish, name) } logger.Debug("publishing ECH config list", zap.String("publisher", publisherKey), zap.Strings("domains", dnsNamesToPublish), zap.Uint8s("config_ids", configIDs)) // publish this ECH config list with this publisher pubTime := time.Now() err := publisher.PublishECHConfigList(t.ctx, dnsNamesToPublish, echCfgListBin) var publishErrs PublishECHConfigListErrors if errors.As(err, &publishErrs) { // at least a partial failure, maybe a complete failure, but we can // log each error by domain for innerName, domainErr := range publishErrs { logger.Error("failed to publish ECH configuration list", zap.String("publisher", publisherKey), zap.String("domain", innerName), zap.Uint8s("config_ids", configIDs), zap.Error(domainErr)) } } else if err != nil { // generic error; assume the entire thing failed, I guess logger.Error("failed publishing ECH configuration list", zap.String("publisher", publisherKey), zap.Strings("domains", dnsNamesToPublish), zap.Uint8s("config_ids", configIDs), zap.Error(err)) } if err == nil || (len(publishErrs) > 0 && len(publishErrs) < len(dnsNamesToPublish)) { // if publication for at least some domains succeeded, we should update our publication // state for those domains to avoid unnecessarily republishing every time someAll := "all" if len(publishErrs) > 0 { someAll = "some" } // make a list of names that published successfully with this publisher // so that we update only their state in storage, not the failed ones var successNames []string for _, name := range dnsNamesToPublish { if _, ok := publishErrs[name]; !ok { successNames = append(successNames, name) } } logger.Info("successfully published ECH configuration list for "+someAll+" domains", zap.String("publisher", publisherKey), zap.Strings("domains", successNames), zap.Uint8s("config_ids", configIDs)) for _, cfg := range echCfgList { if cfg.meta.Publications == nil { cfg.meta.Publications = make(publicationHistory) } if _, ok := cfg.meta.Publications[publisherKey]; !ok { cfg.meta.Publications[publisherKey] = make(map[string]time.Time) } for _, name := range successNames { cfg.meta.Publications[publisherKey][name] = pubTime } metaBytes, err := json.Marshal(cfg.meta) if err != nil { return fmt.Errorf("marshaling ECH config metadata: %v", err) } if err := t.ctx.Storage().Store(t.ctx, echMetaKey(cfg.ConfigID), metaBytes); err != nil { return fmt.Errorf("storing updated ECH config metadata: %v", err) } } } else { logger.Error("all domains failed to publish ECH configuration list (see earlier errors)", zap.String("publisher", publisherKey), zap.Strings("domains", dnsNamesToPublish), zap.Uint8s("config_ids", configIDs)) } } } return nil } // loadECHConfig loads the config from storage with the given configID. // An error is not actually returned in some cases the config fails to // load because in some cases it just means the config ID folder has // been cleaned up in storage, maybe due to an incomplete set of keys // or corrupted contents; in any case, the only rectification is to // delete it and make new keys (an error IS returned if deleting the // corrupted keys fails, for example). Check the returned echConfig for // non-nil privKeyBin and configBin values before using. func loadECHConfig(ctx caddy.Context, configID string) (echConfig, error) { storage := ctx.Storage() logger := ctx.Logger() cfgIDKey := path.Join(echConfigsKey, configID) keyKey := path.Join(cfgIDKey, "key.bin") configKey := path.Join(cfgIDKey, "config.bin") metaKey := path.Join(cfgIDKey, "meta.json") // if loading anything fails, might as well delete this folder and free up // the config ID; spec is designed to rotate configs frequently anyway // (I consider it a more serious error if we can't clean up the folder, // since leaving stray storage keys is confusing) privKeyBytes, err := storage.Load(ctx, keyKey) if err != nil { delErr := storage.Delete(ctx, cfgIDKey) if delErr != nil { return echConfig{}, fmt.Errorf("error loading private key (%v) and cleaning up parent storage key %s: %v", err, cfgIDKey, delErr) } logger.Warn("could not load ECH private key; deleting its config folder", zap.String("config_id", configID), zap.Error(err)) return echConfig{}, nil } echConfigBytes, err := storage.Load(ctx, configKey) if err != nil { delErr := storage.Delete(ctx, cfgIDKey) if delErr != nil { return echConfig{}, fmt.Errorf("error loading ECH config (%v) and cleaning up parent storage key %s: %v", err, cfgIDKey, delErr) } logger.Warn("could not load ECH config; deleting its config folder", zap.String("config_id", configID), zap.Error(err)) return echConfig{}, nil } var cfg echConfig if err := cfg.UnmarshalBinary(echConfigBytes); err != nil { delErr := storage.Delete(ctx, cfgIDKey) if delErr != nil { return echConfig{}, fmt.Errorf("error loading ECH config (%v) and cleaning up parent storage key %s: %v", err, cfgIDKey, delErr) } logger.Warn("could not load ECH config; deleted its config folder", zap.String("config_id", configID), zap.Error(err)) return echConfig{}, nil } metaBytes, err := storage.Load(ctx, metaKey) if errors.Is(err, fs.ErrNotExist) { logger.Warn("ECH config metadata file missing; will recreate at next publication", zap.String("config_id", configID), zap.Error(err)) } else if err != nil { delErr := storage.Delete(ctx, cfgIDKey) if delErr != nil { return echConfig{}, fmt.Errorf("error loading ECH config metadata (%v) and cleaning up parent storage key %s: %v", err, cfgIDKey, delErr) } logger.Warn("could not load ECH config metadata; deleted its folder", zap.String("config_id", configID), zap.Error(err)) return echConfig{}, nil } var meta echConfigMeta if len(metaBytes) > 0 { if err := json.Unmarshal(metaBytes, &meta); err != nil { // even though it's just metadata, reset the whole config since we can't reliably maintain it delErr := storage.Delete(ctx, cfgIDKey) if delErr != nil { return echConfig{}, fmt.Errorf("error decoding ECH metadata (%v) and cleaning up parent storage key %s: %v", err, cfgIDKey, delErr) } logger.Warn("could not JSON-decode ECH metadata; deleted its config folder", zap.String("config_id", configID), zap.Error(err)) return echConfig{}, nil } } cfg.privKeyBin = privKeyBytes cfg.configBin = echConfigBytes cfg.meta = meta return cfg, nil } func generateAndStoreECHConfig(ctx caddy.Context, publicName string) (echConfig, error) { // Go currently has very strict requirements for server-side ECH configs, // to quote the Go 1.24 godoc (with typos of AEAD IDs corrected): // // "Config should be a marshalled ECHConfig associated with PrivateKey. This // must match the config provided to clients byte-for-byte. The config // should only specify the DHKEM(X25519, HKDF-SHA256) KEM ID (0x0020), the // HKDF-SHA256 KDF ID (0x0001), and a subset of the following AEAD IDs: // AES-128-GCM (0x0001), AES-256-GCM (0x0002), ChaCha20Poly1305 (0x0003)." // // So we need to be sure we generate a config within these parameters // so the Go TLS server can use it. // generate a key pair const kemChoice = hpke.KEM_X25519_HKDF_SHA256 publicKey, privateKey, err := kemChoice.Scheme().GenerateKeyPair() if err != nil { return echConfig{}, err } // find an available config ID configID, err := newECHConfigID(ctx) if err != nil { return echConfig{}, fmt.Errorf("generating unique config ID: %v", err) } echCfg := echConfig{ PublicKey: publicKey, Version: draftTLSESNI25, ConfigID: configID, RawPublicName: publicName, KEMID: kemChoice, CipherSuites: []hpkeSymmetricCipherSuite{ { KDFID: hpke.KDF_HKDF_SHA256, AEADID: hpke.AEAD_AES128GCM, }, { KDFID: hpke.KDF_HKDF_SHA256, AEADID: hpke.AEAD_AES256GCM, }, { KDFID: hpke.KDF_HKDF_SHA256, AEADID: hpke.AEAD_ChaCha20Poly1305, }, }, } meta := echConfigMeta{ Created: time.Now(), } privKeyBytes, err := privateKey.MarshalBinary() if err != nil { return echConfig{}, fmt.Errorf("marshaling ECH private key: %v", err) } echConfigBytes, err := echCfg.MarshalBinary() if err != nil { return echConfig{}, fmt.Errorf("marshaling ECH config: %v", err) } metaBytes, err := json.Marshal(meta) if err != nil { return echConfig{}, fmt.Errorf("marshaling ECH config metadata: %v", err) } parentKey := path.Join(echConfigsKey, strconv.Itoa(int(configID))) keyKey := path.Join(parentKey, "key.bin") configKey := path.Join(parentKey, "config.bin") metaKey := path.Join(parentKey, "meta.json") if err := ctx.Storage().Store(ctx, keyKey, privKeyBytes); err != nil { return echConfig{}, fmt.Errorf("storing ECH private key: %v", err) } if err := ctx.Storage().Store(ctx, configKey, echConfigBytes); err != nil { return echConfig{}, fmt.Errorf("storing ECH config: %v", err) } if err := ctx.Storage().Store(ctx, metaKey, metaBytes); err != nil { return echConfig{}, fmt.Errorf("storing ECH config metadata: %v", err) } echCfg.privKeyBin = privKeyBytes echCfg.configBin = echConfigBytes // this contains the public key echCfg.meta = meta return echCfg, nil } // ECH represents an Encrypted ClientHello configuration. // // EXPERIMENTAL: Subject to change. type ECHConfiguration struct { // The public server name (SNI) that will be used in the outer ClientHello. // This should be a domain name for which this server is authoritative, // because Caddy will try to provision a certificate for this name. As an // outer SNI, it is never used for application data (HTTPS, etc.), but it // is necessary for enabling clients to connect securely in some cases. // If this field is empty or missing, or if Caddy cannot get a certificate // for this domain (e.g. the domain's DNS records do not point to this server), // client reliability becomes brittle, and you risk coercing clients to expose // true server names in plaintext, which compromises both the privacy of the // server and makes clients more vulnerable. PublicName string `json:"public_name"` } // ECHPublication configures publication of ECH config(s). It pairs a list // of ECH configs with the list of domains they are assigned to protect, and // describes how to publish those configs for those domains. // // Most servers will have only a single publication config, unless their // domains are spread across multiple DNS providers or require different // methods of publication. // // EXPERIMENTAL: Subject to change. type ECHPublication struct { // The list of ECH configurations to publish, identified by public name. // If not set, all configs will be included for publication by default. // // It is generally advised to maximize the size of your anonymity set, // which implies using as few public names as possible for your sites. // Usually, only a single public name is used to protect all the sites // for a server // // EXPERIMENTAL: This field may be renamed or have its structure changed. Configs []string `json:"configs,omitempty"` // The list of ("inner") domain names which are protected with the associated // ECH configurations. // // If not set, all server names registered with the TLS module will be // added to this list implicitly. (This registration is done automatically // by other Caddy apps that use the TLS module. They should register their // configured server names for this purpose. For example, the HTTP server // registers the hostnames for which it applies automatic HTTPS. This is // not something you, the user, have to do.) Most servers // // Names in this list should not appear in any other publication config // object with the same publishers, since the publications will likely // overwrite each other. // // NOTE: In order to publish ECH configs for domains configured for // On-Demand TLS that are not explicitly enumerated elsewhere in the // config, those domain names will have to be listed here. The only // time Caddy knows which domains it is serving with On-Demand TLS is // handshake-time, which is too late for publishing ECH configs; it // means the first connections would not protect the server names, // revealing that information to observers, and thus defeating the // purpose of ECH. Hence the need to list them here so Caddy can // proactively publish ECH configs before clients connect with those // server names in plaintext. Domains []string `json:"domains,omitempty"` // How to publish the ECH configurations so clients can know to use // ECH to connect more securely to the server. PublishersRaw caddy.ModuleMap `json:"publishers,omitempty" caddy:"namespace=tls.ech.publishers"` publishers []ECHPublisher } // ECHDNSProvider can service DNS entries for ECH purposes. type ECHDNSProvider interface { libdns.RecordGetter libdns.RecordSetter } // ECHDNSPublisher configures how to publish an ECH configuration to // DNS records for the specified domains. // // EXPERIMENTAL: Subject to change. type ECHDNSPublisher struct { // The DNS provider module which will establish the HTTPS record(s). ProviderRaw json.RawMessage `json:"provider,omitempty" caddy:"namespace=dns.providers inline_key=name"` provider ECHDNSProvider logger *zap.Logger } // CaddyModule returns the Caddy module information. func (ECHDNSPublisher) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.ech.publishers.dns", New: func() caddy.Module { return new(ECHDNSPublisher) }, } } func (dnsPub *ECHDNSPublisher) Provision(ctx caddy.Context) error { dnsProvMod, err := ctx.LoadModule(dnsPub, "ProviderRaw") if err != nil { return fmt.Errorf("loading ECH DNS provider module: %v", err) } prov, ok := dnsProvMod.(ECHDNSProvider) if !ok { return fmt.Errorf("ECH DNS provider module is not an ECH DNS Provider: %v", err) } dnsPub.provider = prov dnsPub.logger = ctx.Logger() return nil } // PublisherKey returns the name of the DNS provider module. // We intentionally omit specific provider configuration (or a hash thereof, // since the config is likely sensitive, potentially containing an API key) // because it is unlikely that specific configuration, such as an API key, // is relevant to unique key use as an ECH config publisher. func (dnsPub ECHDNSPublisher) PublisherKey() string { return string(dnsPub.provider.(caddy.Module).CaddyModule().ID) } // PublishECHConfigList publishes the given ECH config list (as binary) to the given DNS names. // If there is an error, it may be of type PublishECHConfigListErrors, detailing // potentially multiple errors keyed by associated innerName. func (dnsPub *ECHDNSPublisher) PublishECHConfigList(ctx context.Context, innerNames []string, configListBin []byte) error { nameservers := certmagic.RecursiveNameservers(nil) // TODO: we could make resolvers configurable errs := make(PublishECHConfigListErrors) nextName: for _, domain := range innerNames { zone, err := certmagic.FindZoneByFQDN(ctx, dnsPub.logger, domain, nameservers) if err != nil { errs[domain] = fmt.Errorf("could not determine zone for domain: %w (domain=%s nameservers=%v)", err, domain, nameservers) continue } relName := libdns.RelativeName(domain+".", zone) // get existing records for this domain; we need to make sure another // record exists for it so we don't accidentally trample a wildcard; we // also want to get any HTTPS record that may already exist for it so // we can augment the ech SvcParamKey with any other existing SvcParams recs, err := dnsPub.provider.GetRecords(ctx, zone) if err != nil { errs[domain] = fmt.Errorf("unable to get existing DNS records to publish ECH data to HTTPS DNS record: %w", err) continue } var httpsRec libdns.ServiceBinding var nameHasExistingRecord bool for _, rec := range recs { rr := rec.RR() if rr.Name == relName { // CNAME records are exclusive of all other records, so we cannot publish an HTTPS // record for a domain that is CNAME'd. See #6922. if rr.Type == "CNAME" { dnsPub.logger.Warn("domain has CNAME record, so unable to publish ECH data to HTTPS record", zap.String("domain", domain), zap.String("cname_value", rr.Data)) continue nextName } nameHasExistingRecord = true if svcb, ok := rec.(libdns.ServiceBinding); ok && svcb.Scheme == "https" { if svcb.Target == "" || svcb.Target == "." { httpsRec = svcb break } } } } if !nameHasExistingRecord { // Turns out if you publish a DNS record for a name that doesn't have any DNS record yet, // any wildcard records won't apply for the name anymore, meaning if a wildcard A/AAAA record // is used to resolve the domain to a server, publishing an HTTPS record could break resolution! // In theory, this should be a non-issue, at least for A/AAAA records, if the HTTPS record // includes ipv[4|6]hint SvcParamKeys, dnsPub.logger.Warn("domain does not have any existing records, so skipping publication of HTTPS record", zap.String("domain", domain), zap.String("relative_name", relName), zap.String("zone", zone)) continue } params := httpsRec.Params if params == nil { params = make(libdns.SvcParams) } // overwrite only the "ech" SvcParamKey params["ech"] = []string{base64.StdEncoding.EncodeToString(configListBin)} // publish record _, err = dnsPub.provider.SetRecords(ctx, zone, []libdns.Record{ libdns.ServiceBinding{ // HTTPS and SVCB RRs: RFC 9460 (https://www.rfc-editor.org/rfc/rfc9460) Scheme: "https", Name: relName, TTL: 5 * time.Minute, // TODO: low hard-coded value only temporary; change to a higher value once more field-tested and key rotation is implemented Priority: 2, // allows a manual override with priority 1 Target: ".", Params: params, }, }) if err != nil { errs[domain] = fmt.Errorf("unable to publish ECH data to HTTPS DNS record: %w (zone=%s dns_record_name=%s)", err, zone, relName) continue } } if len(errs) > 0 { return errs } return nil } // echConfig represents an ECHConfig from the specification, // [draft-ietf-tls-esni-22](https://www.ietf.org/archive/id/draft-ietf-tls-esni-22.html). type echConfig struct { // "The version of ECH for which this configuration is used. // The version is the same as the code point for the // encrypted_client_hello extension. Clients MUST ignore any // ECHConfig structure with a version they do not support." Version uint16 // The "length" and "contents" fields defined next in the // structure are implicitly taken care of by cryptobyte // when encoding the following fields: // HpkeKeyConfig fields: ConfigID uint8 KEMID hpke.KEM PublicKey kem.PublicKey CipherSuites []hpkeSymmetricCipherSuite // ECHConfigContents fields: MaxNameLength uint8 RawPublicName string RawExtensions []byte // these fields are not part of the spec, but are here for // our use when setting up TLS servers or maintenance configBin []byte privKeyBin []byte meta echConfigMeta } func (echCfg echConfig) MarshalBinary() ([]byte, error) { var b cryptobyte.Builder if err := echCfg.marshalBinary(&b); err != nil { return nil, err } return b.Bytes() } // UnmarshalBinary decodes the data back into an ECH config. // // Borrowed from github.com/OmarTariq612/goech with modifications. // Original code: Copyright (c) 2023 Omar Tariq AbdEl-Raziq func (echCfg *echConfig) UnmarshalBinary(data []byte) error { var content cryptobyte.String b := cryptobyte.String(data) if !b.ReadUint16(&echCfg.Version) { return errInvalidLen } if echCfg.Version != draftTLSESNI25 { return fmt.Errorf("supported version must be %d: got %d", draftTLSESNI25, echCfg.Version) } if !b.ReadUint16LengthPrefixed(&content) || !b.Empty() { return errInvalidLen } var t cryptobyte.String var pk []byte if !content.ReadUint8(&echCfg.ConfigID) || !content.ReadUint16((*uint16)(&echCfg.KEMID)) || !content.ReadUint16LengthPrefixed(&t) || !t.ReadBytes(&pk, len(t)) || !content.ReadUint16LengthPrefixed(&t) || len(t)%4 != 0 /* the length of (KDFs and AEADs) must be divisible by 4 */ { return errInvalidLen } if !echCfg.KEMID.IsValid() { return fmt.Errorf("invalid KEM ID: %d", echCfg.KEMID) } var err error if echCfg.PublicKey, err = echCfg.KEMID.Scheme().UnmarshalBinaryPublicKey(pk); err != nil { return fmt.Errorf("parsing public_key: %w", err) } echCfg.CipherSuites = echCfg.CipherSuites[:0] for !t.Empty() { var hpkeKDF, hpkeAEAD uint16 if !t.ReadUint16(&hpkeKDF) || !t.ReadUint16(&hpkeAEAD) { // we have already checked that the length is divisible by 4 panic("this must not happen") } if !hpke.KDF(hpkeKDF).IsValid() { return fmt.Errorf("invalid KDF ID: %d", hpkeKDF) } if !hpke.AEAD(hpkeAEAD).IsValid() { return fmt.Errorf("invalid AEAD ID: %d", hpkeAEAD) } echCfg.CipherSuites = append(echCfg.CipherSuites, hpkeSymmetricCipherSuite{ KDFID: hpke.KDF(hpkeKDF), AEADID: hpke.AEAD(hpkeAEAD), }) } var rawPublicName []byte if !content.ReadUint8(&echCfg.MaxNameLength) || !content.ReadUint8LengthPrefixed(&t) || !t.ReadBytes(&rawPublicName, len(t)) || !content.ReadUint16LengthPrefixed(&t) || !t.ReadBytes(&echCfg.RawExtensions, len(t)) || !content.Empty() { return errInvalidLen } echCfg.RawPublicName = string(rawPublicName) return nil } var errInvalidLen = errors.New("invalid length") // marshalBinary writes this config to the cryptobyte builder. If there is an error, // it will occur before any writes have happened. func (echCfg echConfig) marshalBinary(b *cryptobyte.Builder) error { pk, err := echCfg.PublicKey.MarshalBinary() if err != nil { return err } if l := len(echCfg.RawPublicName); l == 0 || l > 255 { return fmt.Errorf("public name length (%d) must be in the range 1-255", l) } b.AddUint16(echCfg.Version) b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { // "length" field b.AddUint8(echCfg.ConfigID) b.AddUint16(uint16(echCfg.KEMID)) b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { b.AddBytes(pk) }) b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { for _, cs := range echCfg.CipherSuites { b.AddUint16(uint16(cs.KDFID)) b.AddUint16(uint16(cs.AEADID)) } }) b.AddUint8(uint8(min(len(echCfg.RawPublicName)+16, 255))) b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { b.AddBytes([]byte(echCfg.RawPublicName)) }) b.AddUint16LengthPrefixed(func(child *cryptobyte.Builder) { child.AddBytes(echCfg.RawExtensions) }) }) return nil } type hpkeSymmetricCipherSuite struct { KDFID hpke.KDF AEADID hpke.AEAD } type echConfigList []echConfig func (cl echConfigList) MarshalBinary() ([]byte, error) { var b cryptobyte.Builder var err error // the list's length prefixes the list, as with most opaque values b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { for _, cfg := range cl { if err = cfg.marshalBinary(b); err != nil { break } } }) if err != nil { return nil, err } return b.Bytes() } func newECHConfigID(ctx caddy.Context) (uint8, error) { // uint8 can be 0-255 inclusive const uint8Range = 256 // avoid repeating storage checks tried := make([]bool, uint8Range) // Try to find an available number with random rejection sampling; // i.e. choose a random number and see if it's already taken. // The hard limit on how many times we try to find an available // number is flexible... in theory, assuming uniform distribution, // 256 attempts should make each possible value show up exactly // once, but obviously that won't be the case. We can try more // times to try to ensure that every number gets a chance, which // is especially useful if few are available, or we can lower it // if we assume we should have found an available value by then // and want to limit runtime; for now I choose the middle ground // and just try as many times as there are possible values. for i := 0; i < uint8Range && ctx.Err() == nil; i++ { num := uint8(weakrand.N(uint8Range)) //nolint:gosec // don't try the same number a second time if tried[num] { continue } tried[num] = true // check to see if any of the subkeys use this config ID numStr := strconv.Itoa(int(num)) trialPath := path.Join(echConfigsKey, numStr) if ctx.Storage().Exists(ctx, trialPath) { continue } return num, nil } if err := ctx.Err(); err != nil { return 0, err } return 0, fmt.Errorf("depleted attempts to find an available config_id") } // ECHPublisher is an interface for publishing ECHConfigList values // so that they can be used by clients. type ECHPublisher interface { // Returns a key that is unique to this publisher and its configuration. // A publisher's ID combined with its config is a valid key. // It is used to prevent duplicating publications. PublisherKey() string // Publishes the ECH config list (as binary) for the given innerNames. Some // publishers may not need a list of inner/protected names, and can ignore the // argument; most, however, will want to use it to know which inner names are // to be associated with the given ECH config list. // // Implementations should return an error of type PublishECHConfigListErrors // when relevant to key errors to their associated innerName, but should never // return a non-nil PublishECHConfigListErrors when its length is 0. PublishECHConfigList(ctx context.Context, innerNames []string, echConfigList []byte) error } // PublishECHConfigListErrors is returned by ECHPublishers to describe one or more // errors publishing an ECH config list from PublishECHConfigList. A non-nil, empty // value of this type should never be returned. // nolint:errname // The linter wants "Error" convention, but this is a multi-error type. type PublishECHConfigListErrors map[string]error func (p PublishECHConfigListErrors) Error() string { var sb strings.Builder for innerName, err := range p { if sb.Len() > 0 { sb.WriteString("; ") } sb.WriteString(innerName) sb.WriteString(": ") sb.WriteString(err.Error()) } return sb.String() } type echConfigMeta struct { Created time.Time `json:"created"` Replaced time.Time `json:"replaced,omitzero"` Publications publicationHistory `json:"publications"` } func echMetaKey(configID uint8) string { return path.Join(echConfigsKey, strconv.Itoa(int(configID)), "meta.json") } // publicationHistory is a map of publisher key to // map of inner name to timestamp type publicationHistory map[string]map[string]time.Time // echStorageLockName is the name of the storage lock to sync ECH updates. const echStorageLockName = "ech_rotation" // The key prefix when putting ECH configs in storage. After this // comes the config ID. const echConfigsKey = "ech/configs" // https://www.ietf.org/archive/id/draft-ietf-tls-esni-25.html const draftTLSESNI25 = 0xfe0d // Interface guard var _ ECHPublisher = (*ECHDNSPublisher)(nil) ================================================ FILE: modules/caddytls/fileloader.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "crypto/tls" "fmt" "os" "strings" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(FileLoader{}) } // FileLoader loads certificates and their associated keys from disk. type FileLoader []CertKeyFilePair // Provision implements caddy.Provisioner. func (fl FileLoader) Provision(ctx caddy.Context) error { repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer) if !ok { repl = caddy.NewReplacer() } for k, pair := range fl { for i, tag := range pair.Tags { pair.Tags[i] = repl.ReplaceKnown(tag, "") } fl[k] = CertKeyFilePair{ Certificate: repl.ReplaceKnown(pair.Certificate, ""), Key: repl.ReplaceKnown(pair.Key, ""), Format: repl.ReplaceKnown(pair.Format, ""), Tags: pair.Tags, } } return nil } // CaddyModule returns the Caddy module information. func (FileLoader) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.certificates.load_files", New: func() caddy.Module { return new(FileLoader) }, } } // CertKeyFilePair pairs certificate and key file names along with their // encoding format so that they can be loaded from disk. type CertKeyFilePair struct { // Path to the certificate (public key) file. Certificate string `json:"certificate"` // Path to the private key file. Key string `json:"key"` // The format of the cert and key. Can be "pem". Default: "pem" Format string `json:"format,omitempty"` // Arbitrary values to associate with this certificate. // Can be useful when you want to select a particular // certificate when there may be multiple valid candidates. Tags []string `json:"tags,omitempty"` } // LoadCertificates returns the certificates to be loaded by fl. func (fl FileLoader) LoadCertificates() ([]Certificate, error) { certs := make([]Certificate, 0, len(fl)) for _, pair := range fl { certData, err := os.ReadFile(pair.Certificate) if err != nil { return nil, err } keyData, err := os.ReadFile(pair.Key) if err != nil { return nil, err } var cert tls.Certificate switch pair.Format { case "": fallthrough case "pem": // if the start of the key file looks like an encrypted private key, // reject it with a helpful error message if strings.Contains(string(keyData[:40]), "ENCRYPTED") { return nil, fmt.Errorf("encrypted private keys are not supported; please decrypt the key first") } cert, err = tls.X509KeyPair(certData, keyData) default: return nil, fmt.Errorf("unrecognized certificate/key encoding format: %s", pair.Format) } if err != nil { return nil, err } certs = append(certs, Certificate{Certificate: cert, Tags: pair.Tags}) } return certs, nil } // Interface guard var ( _ CertificateLoader = (FileLoader)(nil) _ caddy.Provisioner = (FileLoader)(nil) ) ================================================ FILE: modules/caddytls/folderloader.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "bytes" "crypto/tls" "encoding/pem" "fmt" "io/fs" "os" "path/filepath" "strings" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(FolderLoader{}) } // FolderLoader loads certificates and their associated keys from disk // by recursively walking the specified directories, looking for PEM // files which contain both a certificate and a key. type FolderLoader []string // CaddyModule returns the Caddy module information. func (FolderLoader) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.certificates.load_folders", New: func() caddy.Module { return new(FolderLoader) }, } } // Provision implements caddy.Provisioner. func (fl FolderLoader) Provision(ctx caddy.Context) error { repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer) if !ok { repl = caddy.NewReplacer() } for k, path := range fl { fl[k] = repl.ReplaceKnown(path, "") } return nil } // LoadCertificates loads all the certificates+keys in the directories // listed in fl from all files ending with .pem. This method of loading // certificates expects the certificate and key to be bundled into the // same file. func (fl FolderLoader) LoadCertificates() ([]Certificate, error) { var certs []Certificate for _, dir := range fl { root, err := os.OpenRoot(dir) if err != nil { return nil, fmt.Errorf("unable to open root directory %s: %w", dir, err) } err = filepath.WalkDir(dir, func(fpath string, d fs.DirEntry, err error) error { if err != nil { return fmt.Errorf("unable to traverse into path: %s", fpath) } if d.IsDir() { return nil } if !strings.HasSuffix(strings.ToLower(d.Name()), ".pem") { return nil } rel, err := filepath.Rel(dir, fpath) if err != nil { return fmt.Errorf("unable to get relative path for %s: %w", fpath, err) } bundle, err := root.ReadFile(rel) if err != nil { return err } cert, err := tlsCertFromCertAndKeyPEMBundle(bundle) if err != nil { return fmt.Errorf("%s: %w", fpath, err) } certs = append(certs, Certificate{Certificate: cert}) return nil }) _ = root.Close() if err != nil { return nil, fmt.Errorf("walking certificates directory %s: %w", dir, err) } } return certs, nil } func tlsCertFromCertAndKeyPEMBundle(bundle []byte) (tls.Certificate, error) { certBuilder, keyBuilder := new(bytes.Buffer), new(bytes.Buffer) var foundKey bool // use only the first key in the file for { // Decode next block so we can see what type it is var derBlock *pem.Block derBlock, bundle = pem.Decode(bundle) if derBlock == nil { break } if derBlock.Type == "CERTIFICATE" { // Re-encode certificate as PEM, appending to certificate chain if err := pem.Encode(certBuilder, derBlock); err != nil { return tls.Certificate{}, err } } else if derBlock.Type == "EC PARAMETERS" { // EC keys generated from openssl can be composed of two blocks: // parameters and key (parameter block should come first) if !foundKey { // Encode parameters if err := pem.Encode(keyBuilder, derBlock); err != nil { return tls.Certificate{}, err } // Key must immediately follow derBlock, bundle = pem.Decode(bundle) if derBlock == nil || derBlock.Type != "EC PRIVATE KEY" { return tls.Certificate{}, fmt.Errorf("expected elliptic private key to immediately follow EC parameters") } if err := pem.Encode(keyBuilder, derBlock); err != nil { return tls.Certificate{}, err } foundKey = true } } else if derBlock.Type == "PRIVATE KEY" || strings.HasSuffix(derBlock.Type, " PRIVATE KEY") { // RSA key if !foundKey { if err := pem.Encode(keyBuilder, derBlock); err != nil { return tls.Certificate{}, err } foundKey = true } } else { return tls.Certificate{}, fmt.Errorf("unrecognized PEM block type: %s", derBlock.Type) } } certPEMBytes, keyPEMBytes := certBuilder.Bytes(), keyBuilder.Bytes() if len(certPEMBytes) == 0 { return tls.Certificate{}, fmt.Errorf("failed to parse PEM data") } if len(keyPEMBytes) == 0 { return tls.Certificate{}, fmt.Errorf("no private key block found") } // if the start of the key file looks like an encrypted private key, // reject it with a helpful error message if strings.HasPrefix(string(keyPEMBytes[:40]), "ENCRYPTED") { return tls.Certificate{}, fmt.Errorf("encrypted private keys are not supported; please decrypt the key first") } cert, err := tls.X509KeyPair(certPEMBytes, keyPEMBytes) if err != nil { return tls.Certificate{}, fmt.Errorf("making X509 key pair: %v", err) } return cert, nil } var ( _ CertificateLoader = (FolderLoader)(nil) _ caddy.Provisioner = (FolderLoader)(nil) ) ================================================ FILE: modules/caddytls/internalissuer.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "bytes" "context" "crypto/x509" "encoding/pem" "time" "github.com/caddyserver/certmagic" "github.com/smallstep/certificates/authority/provisioner" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddypki" ) func init() { caddy.RegisterModule(InternalIssuer{}) } // InternalIssuer is a certificate issuer that generates // certificates internally using a locally-configured // CA which can be customized using the `pki` app. type InternalIssuer struct { // The ID of the CA to use for signing. The default // CA ID is "local". The CA can be configured with the // `pki` app. CA string `json:"ca,omitempty"` // The validity period of certificates. Lifetime caddy.Duration `json:"lifetime,omitempty"` // If true, the root will be the issuer instead of // the intermediate. This is NOT recommended and should // only be used when devices/clients do not properly // validate certificate chains. SignWithRoot bool `json:"sign_with_root,omitempty"` ca *caddypki.CA logger *zap.Logger } // CaddyModule returns the Caddy module information. func (InternalIssuer) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.issuance.internal", New: func() caddy.Module { return new(InternalIssuer) }, } } // Provision sets up the issuer. func (iss *InternalIssuer) Provision(ctx caddy.Context) error { iss.logger = ctx.Logger() // set some defaults if iss.CA == "" { iss.CA = caddypki.DefaultCAID } // get a reference to the configured CA appModule, err := ctx.App("pki") if err != nil { return err } pkiApp := appModule.(*caddypki.PKI) ca, err := pkiApp.GetCA(ctx, iss.CA) if err != nil { return err } iss.ca = ca // set any other default values if iss.Lifetime == 0 { iss.Lifetime = caddy.Duration(defaultInternalCertLifetime) } return nil } // IssuerKey returns the unique issuer key for the // configured CA endpoint. func (iss InternalIssuer) IssuerKey() string { return iss.ca.ID } // Issue issues a certificate to satisfy the CSR. func (iss InternalIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) { // prepare the signing authority authCfg := caddypki.AuthorityConfig{ SignWithRoot: iss.SignWithRoot, } auth, err := iss.ca.NewAuthority(authCfg) if err != nil { return nil, err } // get the cert (public key) that will be used for signing var issuerCert *x509.Certificate if iss.SignWithRoot { issuerCert = iss.ca.RootCertificate() } else { chain := iss.ca.IntermediateCertificateChain() issuerCert = chain[0] } // ensure issued certificate does not expire later than its issuer lifetime := time.Duration(iss.Lifetime) if time.Now().Add(lifetime).After(issuerCert.NotAfter) { lifetime = time.Until(issuerCert.NotAfter) iss.logger.Warn("cert lifetime would exceed issuer NotAfter, clamping lifetime", zap.Duration("orig_lifetime", time.Duration(iss.Lifetime)), zap.Duration("lifetime", lifetime), zap.Time("not_after", issuerCert.NotAfter), ) } certChain, err := auth.SignWithContext(ctx, csr, provisioner.SignOptions{}, customCertLifetime(caddy.Duration(lifetime))) if err != nil { return nil, err } var buf bytes.Buffer for _, cert := range certChain { err := pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) if err != nil { return nil, err } } return &certmagic.IssuedCertificate{ Certificate: buf.Bytes(), }, nil } // UnmarshalCaddyfile deserializes Caddyfile tokens into iss. // // ... internal { // ca // lifetime // sign_with_root // } func (iss *InternalIssuer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume issuer name for d.NextBlock(0) { switch d.Val() { case "ca": if !d.AllArgs(&iss.CA) { return d.ArgErr() } case "lifetime": if !d.NextArg() { return d.ArgErr() } dur, err := caddy.ParseDuration(d.Val()) if err != nil { return err } iss.Lifetime = caddy.Duration(dur) case "sign_with_root": if d.NextArg() { return d.ArgErr() } iss.SignWithRoot = true default: return d.Errf("unrecognized subdirective '%s'", d.Val()) } } return nil } // customCertLifetime allows us to customize certificates that are issued // by Smallstep libs, particularly the NotBefore & NotAfter dates. type customCertLifetime time.Duration func (d customCertLifetime) Modify(cert *x509.Certificate, _ provisioner.SignOptions) error { cert.NotBefore = time.Now() cert.NotAfter = cert.NotBefore.Add(time.Duration(d)) return nil } const defaultInternalCertLifetime = 12 * time.Hour // Interface guards var ( _ caddy.Provisioner = (*InternalIssuer)(nil) _ certmagic.Issuer = (*InternalIssuer)(nil) _ provisioner.CertificateModifier = (*customCertLifetime)(nil) ) ================================================ FILE: modules/caddytls/internalissuer_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "crypto/rand" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "fmt" "os" "path/filepath" "testing" "time" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddypki" "go.uber.org/zap" "go.step.sm/crypto/keyutil" "go.step.sm/crypto/pemutil" ) func TestInternalIssuer_Issue(t *testing.T) { rootSigner, err := keyutil.GenerateDefaultSigner() if err != nil { t.Fatalf("Creating root signer failed: %v", err) } tmpl := &x509.Certificate{ Subject: pkix.Name{CommonName: "test-root"}, IsCA: true, MaxPathLen: 3, NotAfter: time.Now().Add(7 * 24 * time.Hour), NotBefore: time.Now().Add(-7 * 24 * time.Hour), } rootBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, rootSigner.Public(), rootSigner) if err != nil { t.Fatalf("Creating root certificate failed: %v", err) } root, err := x509.ParseCertificate(rootBytes) if err != nil { t.Fatalf("Parsing root certificate failed: %v", err) } firstIntermediateSigner, err := keyutil.GenerateDefaultSigner() if err != nil { t.Fatalf("Creating intermedaite signer failed: %v", err) } firstIntermediateBytes, err := x509.CreateCertificate(rand.Reader, &x509.Certificate{ Subject: pkix.Name{CommonName: "test-first-intermediate"}, IsCA: true, MaxPathLen: 2, NotAfter: time.Now().Add(24 * time.Hour), NotBefore: time.Now().Add(-24 * time.Hour), }, root, firstIntermediateSigner.Public(), rootSigner) if err != nil { t.Fatalf("Creating intermediate certificate failed: %v", err) } firstIntermediate, err := x509.ParseCertificate(firstIntermediateBytes) if err != nil { t.Fatalf("Parsing intermediate certificate failed: %v", err) } secondIntermediateSigner, err := keyutil.GenerateDefaultSigner() if err != nil { t.Fatalf("Creating second intermedaite signer failed: %v", err) } secondIntermediateBytes, err := x509.CreateCertificate(rand.Reader, &x509.Certificate{ Subject: pkix.Name{CommonName: "test-second-intermediate"}, IsCA: true, MaxPathLen: 2, NotAfter: time.Now().Add(24 * time.Hour), NotBefore: time.Now().Add(-24 * time.Hour), }, firstIntermediate, secondIntermediateSigner.Public(), firstIntermediateSigner) if err != nil { t.Fatalf("Creating second intermediate certificate failed: %v", err) } secondIntermediate, err := x509.ParseCertificate(secondIntermediateBytes) if err != nil { t.Fatalf("Parsing second intermediate certificate failed: %v", err) } dir := t.TempDir() storageDir := filepath.Join(dir, "certmagic") rootCertFile := filepath.Join(dir, "root.pem") if _, err = pemutil.Serialize(root, pemutil.WithFilename(rootCertFile)); err != nil { t.Fatalf("Failed serializing root certificate: %v", err) } intermediateCertFile := filepath.Join(dir, "intermediate.pem") if _, err = pemutil.Serialize(firstIntermediate, pemutil.WithFilename(intermediateCertFile)); err != nil { t.Fatalf("Failed serializing intermediate certificate: %v", err) } intermediateKeyFile := filepath.Join(dir, "intermediate.key") if _, err = pemutil.Serialize(firstIntermediateSigner, pemutil.WithFilename(intermediateKeyFile)); err != nil { t.Fatalf("Failed serializing intermediate key: %v", err) } var intermediateChainContents []byte intermediateChain := []*x509.Certificate{secondIntermediate, firstIntermediate} for _, cert := range intermediateChain { b, err := pemutil.Serialize(cert) if err != nil { t.Fatalf("Failed serializing intermediate certificate: %v", err) } intermediateChainContents = append(intermediateChainContents, pem.EncodeToMemory(b)...) } intermediateChainFile := filepath.Join(dir, "intermediates.pem") if err := os.WriteFile(intermediateChainFile, intermediateChainContents, 0644); err != nil { t.Fatalf("Failed writing intermediate chain: %v", err) } intermediateChainKeyFile := filepath.Join(dir, "intermediates.key") if _, err = pemutil.Serialize(secondIntermediateSigner, pemutil.WithFilename(intermediateChainKeyFile)); err != nil { t.Fatalf("Failed serializing intermediate key: %v", err) } signer, err := keyutil.GenerateDefaultSigner() if err != nil { t.Fatalf("Failed creating signer: %v", err) } csrBytes, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{ Subject: pkix.Name{CommonName: "test"}, }, signer) if err != nil { t.Fatalf("Failed creating CSR: %v", err) } csr, err := x509.ParseCertificateRequest(csrBytes) if err != nil { t.Fatalf("Failed parsing CSR: %v", err) } t.Run("generated-with-defaults", func(t *testing.T) { caddyCtx, cancel := caddy.NewContext(caddy.Context{Context: t.Context()}) t.Cleanup(cancel) logger := zap.NewNop() ca := &caddypki.CA{ StorageRaw: []byte(fmt.Sprintf(`{"module": "file_system", "root": %q}`, storageDir)), } if err := ca.Provision(caddyCtx, "local-test-generated", logger); err != nil { t.Fatalf("Failed provisioning CA: %v", err) } iss := InternalIssuer{ SignWithRoot: false, ca: ca, logger: logger, } c, err := iss.Issue(t.Context(), csr) if err != nil { t.Fatalf("Failed issuing certificate: %v", err) } chain, err := pemutil.ParseCertificateBundle(c.Certificate) if err != nil { t.Errorf("Failed issuing certificate: %v", err) } if len(chain) != 2 { t.Errorf("Expected 2 certificates in chain; got %d", len(chain)) } }) t.Run("single-intermediate-from-disk", func(t *testing.T) { caddyCtx, cancel := caddy.NewContext(caddy.Context{Context: t.Context()}) t.Cleanup(cancel) logger := zap.NewNop() ca := &caddypki.CA{ Root: &caddypki.KeyPair{ Certificate: rootCertFile, }, Intermediate: &caddypki.KeyPair{ Certificate: intermediateCertFile, PrivateKey: intermediateKeyFile, }, StorageRaw: []byte(fmt.Sprintf(`{"module": "file_system", "root": %q}`, storageDir)), } if err := ca.Provision(caddyCtx, "local-test-single-intermediate", logger); err != nil { t.Fatalf("Failed provisioning CA: %v", err) } iss := InternalIssuer{ ca: ca, SignWithRoot: false, logger: logger, } c, err := iss.Issue(t.Context(), csr) if err != nil { t.Fatalf("Failed issuing certificate: %v", err) } chain, err := pemutil.ParseCertificateBundle(c.Certificate) if err != nil { t.Errorf("Failed issuing certificate: %v", err) } if len(chain) != 2 { t.Errorf("Expected 2 certificates in chain; got %d", len(chain)) } }) t.Run("multiple-intermediates-from-disk", func(t *testing.T) { caddyCtx, cancel := caddy.NewContext(caddy.Context{Context: t.Context()}) t.Cleanup(cancel) logger := zap.NewNop() ca := &caddypki.CA{ Root: &caddypki.KeyPair{ Certificate: rootCertFile, }, Intermediate: &caddypki.KeyPair{ Certificate: intermediateChainFile, PrivateKey: intermediateChainKeyFile, }, StorageRaw: []byte(fmt.Sprintf(`{"module": "file_system", "root": %q}`, storageDir)), } if err := ca.Provision(caddyCtx, "local-test", zap.NewNop()); err != nil { t.Fatalf("Failed provisioning CA: %v", err) } iss := InternalIssuer{ ca: ca, SignWithRoot: false, logger: logger, } c, err := iss.Issue(t.Context(), csr) if err != nil { t.Fatalf("Failed issuing certificate: %v", err) } chain, err := pemutil.ParseCertificateBundle(c.Certificate) if err != nil { t.Errorf("Failed issuing certificate: %v", err) } if len(chain) != 3 { t.Errorf("Expected 3 certificates in chain; got %d", len(chain)) } }) } ================================================ FILE: modules/caddytls/leaffileloader.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "crypto/x509" "encoding/pem" "fmt" "os" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(LeafFileLoader{}) } // LeafFileLoader loads leaf certificates from disk. type LeafFileLoader struct { Files []string `json:"files,omitempty"` } // CaddyModule returns the Caddy module information. func (LeafFileLoader) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.leaf_cert_loader.file", New: func() caddy.Module { return new(LeafFileLoader) }, } } // Provision implements caddy.Provisioner. func (fl *LeafFileLoader) Provision(ctx caddy.Context) error { repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer) if !ok { repl = caddy.NewReplacer() } for k, path := range fl.Files { fl.Files[k] = repl.ReplaceKnown(path, "") } return nil } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (fl *LeafFileLoader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.NextArg() fl.Files = append(fl.Files, d.RemainingArgs()...) return nil } // LoadLeafCertificates returns the certificates to be loaded by fl. func (fl LeafFileLoader) LoadLeafCertificates() ([]*x509.Certificate, error) { certificates := make([]*x509.Certificate, 0, len(fl.Files)) for _, path := range fl.Files { ders, err := convertPEMFilesToDERBytes(path) if err != nil { return nil, err } certs, err := x509.ParseCertificates(ders) if err != nil { return nil, err } certificates = append(certificates, certs...) } return certificates, nil } func convertPEMFilesToDERBytes(filename string) ([]byte, error) { certDataPEM, err := os.ReadFile(filename) if err != nil { return nil, err } var ders []byte // while block is not nil, we have more certificates in the file for block, rest := pem.Decode(certDataPEM); block != nil; block, rest = pem.Decode(rest) { if block.Type != "CERTIFICATE" { return nil, fmt.Errorf("no CERTIFICATE pem block found in %s", filename) } ders = append( ders, block.Bytes..., ) } // if we decoded nothing, return an error if len(ders) == 0 { return nil, fmt.Errorf("no CERTIFICATE pem block found in %s", filename) } return ders, nil } // Interface guard var ( _ LeafCertificateLoader = (*LeafFileLoader)(nil) _ caddy.Provisioner = (*LeafFileLoader)(nil) _ caddyfile.Unmarshaler = (*LeafFileLoader)(nil) ) ================================================ FILE: modules/caddytls/leaffileloader_test.go ================================================ package caddytls import ( "context" "encoding/pem" "os" "strings" "testing" "github.com/caddyserver/caddy/v2" ) func TestLeafFileLoader(t *testing.T) { fl := LeafFileLoader{Files: []string{"../../caddytest/leafcert.pem"}} fl.Provision(caddy.Context{Context: context.Background()}) out, err := fl.LoadLeafCertificates() if err != nil { t.Errorf("Leaf certs file loading test failed: %v", err) } if len(out) != 1 { t.Errorf("Error loading leaf cert in memory struct") return } pemBytes := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: out[0].Raw}) pemFileBytes, err := os.ReadFile("../../caddytest/leafcert.pem") if err != nil { t.Errorf("Unable to read the example certificate from the file") } // Remove /r because windows. pemFileString := strings.ReplaceAll(string(pemFileBytes), "\r\n", "\n") if string(pemBytes) != pemFileString { t.Errorf("Leaf Certificate File Loader: Failed to load the correct certificate") } } ================================================ FILE: modules/caddytls/leaffolderloader.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "crypto/x509" "fmt" "os" "path/filepath" "strings" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(LeafFolderLoader{}) } // LeafFolderLoader loads certificates from disk // by recursively walking the specified directories, looking for PEM // files which contain a certificate. type LeafFolderLoader struct { Folders []string `json:"folders,omitempty"` } // CaddyModule returns the Caddy module information. func (LeafFolderLoader) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.leaf_cert_loader.folder", New: func() caddy.Module { return new(LeafFolderLoader) }, } } // Provision implements caddy.Provisioner. func (fl *LeafFolderLoader) Provision(ctx caddy.Context) error { repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer) if !ok { repl = caddy.NewReplacer() } for k, path := range fl.Folders { fl.Folders[k] = repl.ReplaceKnown(path, "") } return nil } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (fl *LeafFolderLoader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.NextArg() fl.Folders = append(fl.Folders, d.RemainingArgs()...) return nil } // LoadLeafCertificates loads all the leaf certificates in the directories // listed in fl from all files ending with .pem. func (fl LeafFolderLoader) LoadLeafCertificates() ([]*x509.Certificate, error) { var certs []*x509.Certificate for _, dir := range fl.Folders { err := filepath.Walk(dir, func(fpath string, info os.FileInfo, err error) error { if err != nil { return fmt.Errorf("unable to traverse into path: %s", fpath) } if info.IsDir() { return nil } if !strings.HasSuffix(strings.ToLower(info.Name()), ".pem") { return nil } certData, err := convertPEMFilesToDERBytes(fpath) if err != nil { return err } cert, err := x509.ParseCertificate(certData) if err != nil { return fmt.Errorf("%s: %w", fpath, err) } certs = append(certs, cert) return nil }) if err != nil { return nil, err } } return certs, nil } var ( _ LeafCertificateLoader = (*LeafFolderLoader)(nil) _ caddy.Provisioner = (*LeafFolderLoader)(nil) _ caddyfile.Unmarshaler = (*LeafFolderLoader)(nil) ) ================================================ FILE: modules/caddytls/leaffolderloader_test.go ================================================ package caddytls import ( "context" "encoding/pem" "os" "strings" "testing" "github.com/caddyserver/caddy/v2" ) func TestLeafFolderLoader(t *testing.T) { fl := LeafFolderLoader{Folders: []string{"../../caddytest"}} fl.Provision(caddy.Context{Context: context.Background()}) out, err := fl.LoadLeafCertificates() if err != nil { t.Errorf("Leaf certs folder loading test failed: %v", err) } if len(out) != 1 { t.Errorf("Error loading leaf cert in memory struct") return } pemBytes := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: out[0].Raw}) pemFileBytes, err := os.ReadFile("../../caddytest/leafcert.pem") if err != nil { t.Errorf("Unable to read the example certificate from the file") } // Remove /r because windows. pemFileString := strings.ReplaceAll(string(pemFileBytes), "\r\n", "\n") if string(pemBytes) != pemFileString { t.Errorf("Leaf Certificate Folder Loader: Failed to load the correct certificate") } } ================================================ FILE: modules/caddytls/leafpemloader.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "crypto/x509" "fmt" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(LeafPEMLoader{}) } // LeafPEMLoader loads leaf certificates by // decoding their PEM blocks directly. This has the advantage // of not needing to store them on disk at all. type LeafPEMLoader struct { Certificates []string `json:"certificates,omitempty"` } // Provision implements caddy.Provisioner. func (pl *LeafPEMLoader) Provision(ctx caddy.Context) error { repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer) if !ok { repl = caddy.NewReplacer() } for i, cert := range pl.Certificates { pl.Certificates[i] = repl.ReplaceKnown(cert, "") } return nil } // CaddyModule returns the Caddy module information. func (LeafPEMLoader) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.leaf_cert_loader.pem", New: func() caddy.Module { return new(LeafPEMLoader) }, } } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (fl *LeafPEMLoader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.NextArg() fl.Certificates = append(fl.Certificates, d.RemainingArgs()...) return nil } // LoadLeafCertificates returns the certificates contained in pl. func (pl LeafPEMLoader) LoadLeafCertificates() ([]*x509.Certificate, error) { certs := make([]*x509.Certificate, 0, len(pl.Certificates)) for i, cert := range pl.Certificates { derBytes, err := convertPEMToDER([]byte(cert)) if err != nil { return nil, fmt.Errorf("PEM leaf certificate loader, cert %d: %v", i, err) } cert, err := x509.ParseCertificate(derBytes) if err != nil { return nil, fmt.Errorf("PEM cert %d: %v", i, err) } certs = append(certs, cert) } return certs, nil } // Interface guard var ( _ LeafCertificateLoader = (*LeafPEMLoader)(nil) _ caddy.Provisioner = (*LeafPEMLoader)(nil) ) ================================================ FILE: modules/caddytls/leafpemloader_test.go ================================================ package caddytls import ( "context" "encoding/pem" "os" "strings" "testing" "github.com/caddyserver/caddy/v2" ) func TestLeafPEMLoader(t *testing.T) { pl := LeafPEMLoader{Certificates: []string{` -----BEGIN CERTIFICATE----- MIICUTCCAfugAwIBAgIBADANBgkqhkiG9w0BAQQFADBXMQswCQYDVQQGEwJDTjEL MAkGA1UECBMCUE4xCzAJBgNVBAcTAkNOMQswCQYDVQQKEwJPTjELMAkGA1UECxMC VU4xFDASBgNVBAMTC0hlcm9uZyBZYW5nMB4XDTA1MDcxNTIxMTk0N1oXDTA1MDgx NDIxMTk0N1owVzELMAkGA1UEBhMCQ04xCzAJBgNVBAgTAlBOMQswCQYDVQQHEwJD TjELMAkGA1UEChMCT04xCzAJBgNVBAsTAlVOMRQwEgYDVQQDEwtIZXJvbmcgWWFu ZzBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQCp5hnG7ogBhtlynpOS21cBewKE/B7j V14qeyslnr26xZUsSVko36ZnhiaO/zbMOoRcKK9vEcgMtcLFuQTWDl3RAgMBAAGj gbEwga4wHQYDVR0OBBYEFFXI70krXeQDxZgbaCQoR4jUDncEMH8GA1UdIwR4MHaA FFXI70krXeQDxZgbaCQoR4jUDncEoVukWTBXMQswCQYDVQQGEwJDTjELMAkGA1UE CBMCUE4xCzAJBgNVBAcTAkNOMQswCQYDVQQKEwJPTjELMAkGA1UECxMCVU4xFDAS BgNVBAMTC0hlcm9uZyBZYW5nggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEE BQADQQA/ugzBrjjK9jcWnDVfGHlk3icNRq0oV7Ri32z/+HQX67aRfgZu7KWdI+Ju Wm7DCfrPNGVwFWUQOmsPue9rZBgO -----END CERTIFICATE----- `}} pl.Provision(caddy.Context{Context: context.Background()}) out, err := pl.LoadLeafCertificates() if err != nil { t.Errorf("Leaf certs pem loading test failed: %v", err) } if len(out) != 1 { t.Errorf("Error loading leaf cert in memory struct") return } pemBytes := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: out[0].Raw}) pemFileBytes, err := os.ReadFile("../../caddytest/leafcert.pem") if err != nil { t.Errorf("Unable to read the example certificate from the file") } // Remove /r because windows. pemFileString := strings.ReplaceAll(string(pemFileBytes), "\r\n", "\n") if string(pemBytes) != pemFileString { t.Errorf("Leaf Certificate Folder Loader: Failed to load the correct certificate") } } ================================================ FILE: modules/caddytls/leafstorageloader.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "crypto/x509" "encoding/json" "encoding/pem" "fmt" "github.com/caddyserver/certmagic" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(LeafStorageLoader{}) } // LeafStorageLoader loads leaf certificates from the // globally configured storage module. type LeafStorageLoader struct { // A list of certificate file names to be loaded from storage. Certificates []string `json:"certificates,omitempty"` // The storage module where the trusted leaf certificates are stored. Absent // explicit storage implies the use of Caddy default storage. StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"` // Reference to the globally configured storage module. storage certmagic.Storage ctx caddy.Context } // CaddyModule returns the Caddy module information. func (LeafStorageLoader) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.leaf_cert_loader.storage", New: func() caddy.Module { return new(LeafStorageLoader) }, } } // Provision loads the storage module for sl. func (sl *LeafStorageLoader) Provision(ctx caddy.Context) error { if sl.StorageRaw != nil { val, err := ctx.LoadModule(sl, "StorageRaw") if err != nil { return fmt.Errorf("loading storage module: %v", err) } cmStorage, err := val.(caddy.StorageConverter).CertMagicStorage() if err != nil { return fmt.Errorf("creating storage configuration: %v", err) } sl.storage = cmStorage } if sl.storage == nil { sl.storage = ctx.Storage() } sl.ctx = ctx repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer) if !ok { repl = caddy.NewReplacer() } for k, path := range sl.Certificates { sl.Certificates[k] = repl.ReplaceKnown(path, "") } return nil } // LoadLeafCertificates returns the certificates to be loaded by sl. func (sl LeafStorageLoader) LoadLeafCertificates() ([]*x509.Certificate, error) { certificates := make([]*x509.Certificate, 0, len(sl.Certificates)) for _, path := range sl.Certificates { certData, err := sl.storage.Load(sl.ctx, path) if err != nil { return nil, err } ders, err := convertPEMToDER(certData) if err != nil { return nil, err } certs, err := x509.ParseCertificates(ders) if err != nil { return nil, err } certificates = append(certificates, certs...) } return certificates, nil } func convertPEMToDER(pemData []byte) ([]byte, error) { var ders []byte // while block is not nil, we have more certificates in the file for block, rest := pem.Decode(pemData); block != nil; block, rest = pem.Decode(rest) { if block.Type != "CERTIFICATE" { return nil, fmt.Errorf("no CERTIFICATE pem block found in the given pem data") } ders = append( ders, block.Bytes..., ) } // if we decoded nothing, return an error if len(ders) == 0 { return nil, fmt.Errorf("no CERTIFICATE pem block found in the given pem data") } return ders, nil } // Interface guard var ( _ LeafCertificateLoader = (*LeafStorageLoader)(nil) _ caddy.Provisioner = (*LeafStorageLoader)(nil) ) ================================================ FILE: modules/caddytls/matchers.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "context" "crypto/tls" "fmt" "net" "net/netip" "regexp" "slices" "strconv" "strings" "github.com/caddyserver/certmagic" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/internal" ) func init() { caddy.RegisterModule(MatchServerName{}) caddy.RegisterModule(MatchServerNameRE{}) caddy.RegisterModule(MatchRemoteIP{}) caddy.RegisterModule(MatchLocalIP{}) } // MatchServerName matches based on SNI. Names in // this list may use left-most-label wildcards, // similar to wildcard certificates. type MatchServerName []string // CaddyModule returns the Caddy module information. func (MatchServerName) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.handshake_match.sni", New: func() caddy.Module { return new(MatchServerName) }, } } // Match matches hello based on SNI. func (m MatchServerName) Match(hello *tls.ClientHelloInfo) bool { var repl *caddy.Replacer // caddytls.TestServerNameMatcher calls this function without any context if ctx := hello.Context(); ctx != nil { // In some situations the existing context may have no replacer if replAny := ctx.Value(caddy.ReplacerCtxKey); replAny != nil { repl = replAny.(*caddy.Replacer) } } if repl == nil { repl = caddy.NewReplacer() } for _, name := range m { rs := repl.ReplaceAll(name, "") if certmagic.MatchWildcard(hello.ServerName, rs) { return true } } return false } // UnmarshalCaddyfile sets up the MatchServerName from Caddyfile tokens. Syntax: // // sni func (m *MatchServerName) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { for d.Next() { wrapper := d.Val() // At least one same-line option must be provided if d.CountRemainingArgs() == 0 { return d.ArgErr() } *m = append(*m, d.RemainingArgs()...) // No blocks are supported if d.NextBlock(d.Nesting()) { return d.Errf("malformed TLS handshake matcher '%s': blocks are not supported", wrapper) } } return nil } // MatchRegexp is an embeddable type for matching // using regular expressions. It adds placeholders // to the request's replacer. In fact, it is a copy of // caddyhttp.MatchRegexp with a local replacer prefix // and placeholders support in a regular expression pattern. type MatchRegexp struct { // A unique name for this regular expression. Optional, // but useful to prevent overwriting captures from other // regexp matchers. Name string `json:"name,omitempty"` // The regular expression to evaluate, in RE2 syntax, // which is the same general syntax used by Go, Perl, // and Python. For details, see // [Go's regexp package](https://golang.org/pkg/regexp/). // Captures are accessible via placeholders. Unnamed // capture groups are exposed as their numeric, 1-based // index, while named capture groups are available by // the capture group name. Pattern string `json:"pattern"` compiled *regexp.Regexp } // Provision compiles the regular expression which may include placeholders. func (mre *MatchRegexp) Provision(caddy.Context) error { repl := caddy.NewReplacer() re, err := regexp.Compile(repl.ReplaceAll(mre.Pattern, "")) if err != nil { return fmt.Errorf("compiling matcher regexp %s: %v", mre.Pattern, err) } mre.compiled = re return nil } // Validate ensures mre is set up correctly. func (mre *MatchRegexp) Validate() error { if mre.Name != "" && !wordRE.MatchString(mre.Name) { return fmt.Errorf("invalid regexp name (must contain only word characters): %s", mre.Name) } return nil } // Match returns true if input matches the compiled regular // expression in m. It sets values on the replacer repl // associated with capture groups, using the given scope // (namespace). func (mre *MatchRegexp) Match(input string, repl *caddy.Replacer) bool { matches := mre.compiled.FindStringSubmatch(input) if matches == nil { return false } // save all capture groups, first by index for i, match := range matches { keySuffix := "." + strconv.Itoa(i) if mre.Name != "" { repl.Set(regexpPlaceholderPrefix+"."+mre.Name+keySuffix, match) } repl.Set(regexpPlaceholderPrefix+keySuffix, match) } // then by name for i, name := range mre.compiled.SubexpNames() { // skip the first element (the full match), and empty names if i == 0 || name == "" { continue } keySuffix := "." + name if mre.Name != "" { repl.Set(regexpPlaceholderPrefix+"."+mre.Name+keySuffix, matches[i]) } repl.Set(regexpPlaceholderPrefix+keySuffix, matches[i]) } return true } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { // iterate to merge multiple matchers into one for d.Next() { // If this is the second iteration of the loop // then there's more than one *_regexp matcher, // and we would end up overwriting the old one if mre.Pattern != "" { return d.Err("regular expression can only be used once per named matcher") } args := d.RemainingArgs() switch len(args) { case 1: mre.Pattern = args[0] case 2: mre.Name = args[0] mre.Pattern = args[1] default: return d.ArgErr() } // Default to the named matcher's name, if no regexp name is provided. // Note: it requires d.SetContext(caddyfile.MatcherNameCtxKey, value) // called before this unmarshalling, otherwise it wouldn't work. if mre.Name == "" { mre.Name = d.GetContextString(caddyfile.MatcherNameCtxKey) } if d.NextBlock(0) { return d.Err("malformed regexp matcher: blocks are not supported") } } return nil } // MatchServerNameRE matches based on SNI using a regular expression. type MatchServerNameRE struct{ MatchRegexp } // CaddyModule returns the Caddy module information. func (MatchServerNameRE) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.handshake_match.sni_regexp", New: func() caddy.Module { return new(MatchServerNameRE) }, } } // Match matches hello based on SNI using a regular expression. func (m MatchServerNameRE) Match(hello *tls.ClientHelloInfo) bool { // Note: caddytls.TestServerNameMatcher calls this function without any context ctx := hello.Context() if ctx == nil { // layer4.Connection implements GetContext() to pass its context here, // since hello.Context() returns nil if mayHaveContext, ok := hello.Conn.(interface{ GetContext() context.Context }); ok { ctx = mayHaveContext.GetContext() } } var repl *caddy.Replacer if ctx != nil { // In some situations the existing context may have no replacer if replAny := ctx.Value(caddy.ReplacerCtxKey); replAny != nil { repl = replAny.(*caddy.Replacer) } } if repl == nil { repl = caddy.NewReplacer() } return m.MatchRegexp.Match(hello.ServerName, repl) } // MatchRemoteIP matches based on the remote IP of the // connection. Specific IPs or CIDR ranges can be specified. // // Note that IPs can sometimes be spoofed, so do not rely // on this as a replacement for actual authentication. type MatchRemoteIP struct { // The IPs or CIDR ranges to match. Ranges []string `json:"ranges,omitempty"` // The IPs or CIDR ranges to *NOT* match. NotRanges []string `json:"not_ranges,omitempty"` cidrs []netip.Prefix notCidrs []netip.Prefix logger *zap.Logger } // CaddyModule returns the Caddy module information. func (MatchRemoteIP) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.handshake_match.remote_ip", New: func() caddy.Module { return new(MatchRemoteIP) }, } } // Provision parses m's IP ranges, either from IP or CIDR expressions. func (m *MatchRemoteIP) Provision(ctx caddy.Context) error { repl := caddy.NewReplacer() m.logger = ctx.Logger() for _, str := range m.Ranges { rs := repl.ReplaceAll(str, "") cidrs, err := m.parseIPRange(rs) if err != nil { return err } m.cidrs = append(m.cidrs, cidrs...) } for _, str := range m.NotRanges { rs := repl.ReplaceAll(str, "") cidrs, err := m.parseIPRange(rs) if err != nil { return err } m.notCidrs = append(m.notCidrs, cidrs...) } return nil } // Match matches hello based on the connection's remote IP. func (m MatchRemoteIP) Match(hello *tls.ClientHelloInfo) bool { remoteAddr := hello.Conn.RemoteAddr().String() ipStr, _, err := net.SplitHostPort(remoteAddr) if err != nil { ipStr = remoteAddr // weird; maybe no port? } ipAddr, err := netip.ParseAddr(ipStr) if err != nil { if c := m.logger.Check(zapcore.ErrorLevel, "invalid client IP address"); c != nil { c.Write(zap.String("ip", ipStr)) } return false } return (len(m.cidrs) == 0 || m.matches(ipAddr, m.cidrs)) && (len(m.notCidrs) == 0 || !m.matches(ipAddr, m.notCidrs)) } func (MatchRemoteIP) parseIPRange(str string) ([]netip.Prefix, error) { var cidrs []netip.Prefix if strings.Contains(str, "/") { ipNet, err := netip.ParsePrefix(str) if err != nil { return nil, fmt.Errorf("parsing CIDR expression: %v", err) } cidrs = append(cidrs, ipNet) } else { ipAddr, err := netip.ParseAddr(str) if err != nil { return nil, fmt.Errorf("invalid IP address: '%s': %v", str, err) } ip := netip.PrefixFrom(ipAddr, ipAddr.BitLen()) cidrs = append(cidrs, ip) } return cidrs, nil } func (MatchRemoteIP) matches(ip netip.Addr, ranges []netip.Prefix) bool { return slices.ContainsFunc(ranges, func(prefix netip.Prefix) bool { return prefix.Contains(ip) }) } // UnmarshalCaddyfile sets up the MatchRemoteIP from Caddyfile tokens. Syntax: // // remote_ip // // Note: IPs and CIDRs prefixed with ! symbol are treated as not_ranges func (m *MatchRemoteIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { for d.Next() { wrapper := d.Val() // At least one same-line option must be provided if d.CountRemainingArgs() == 0 { return d.ArgErr() } for d.NextArg() { val := d.Val() var exclamation bool if len(val) > 1 && val[0] == '!' { exclamation, val = true, val[1:] } ranges := []string{val} if val == "private_ranges" { ranges = internal.PrivateRangesCIDR() } if exclamation { m.NotRanges = append(m.NotRanges, ranges...) } else { m.Ranges = append(m.Ranges, ranges...) } } // No blocks are supported if d.NextBlock(d.Nesting()) { return d.Errf("malformed TLS handshake matcher '%s': blocks are not supported", wrapper) } } return nil } // MatchLocalIP matches based on the IP address of the interface // receiving the connection. Specific IPs or CIDR ranges can be specified. type MatchLocalIP struct { // The IPs or CIDR ranges to match. Ranges []string `json:"ranges,omitempty"` cidrs []netip.Prefix logger *zap.Logger } // CaddyModule returns the Caddy module information. func (MatchLocalIP) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.handshake_match.local_ip", New: func() caddy.Module { return new(MatchLocalIP) }, } } // Provision parses m's IP ranges, either from IP or CIDR expressions. func (m *MatchLocalIP) Provision(ctx caddy.Context) error { repl := caddy.NewReplacer() m.logger = ctx.Logger() for _, str := range m.Ranges { rs := repl.ReplaceAll(str, "") cidrs, err := m.parseIPRange(rs) if err != nil { return err } m.cidrs = append(m.cidrs, cidrs...) } return nil } // Match matches hello based on the connection's remote IP. func (m MatchLocalIP) Match(hello *tls.ClientHelloInfo) bool { localAddr := hello.Conn.LocalAddr().String() ipStr, _, err := net.SplitHostPort(localAddr) if err != nil { ipStr = localAddr // weird; maybe no port? } ipAddr, err := netip.ParseAddr(ipStr) if err != nil { if c := m.logger.Check(zapcore.ErrorLevel, "invalid local IP address"); c != nil { c.Write(zap.String("ip", ipStr)) } return false } return (len(m.cidrs) == 0 || m.matches(ipAddr, m.cidrs)) } func (MatchLocalIP) parseIPRange(str string) ([]netip.Prefix, error) { var cidrs []netip.Prefix if strings.Contains(str, "/") { ipNet, err := netip.ParsePrefix(str) if err != nil { return nil, fmt.Errorf("parsing CIDR expression: %v", err) } cidrs = append(cidrs, ipNet) } else { ipAddr, err := netip.ParseAddr(str) if err != nil { return nil, fmt.Errorf("invalid IP address: '%s': %v", str, err) } ip := netip.PrefixFrom(ipAddr, ipAddr.BitLen()) cidrs = append(cidrs, ip) } return cidrs, nil } func (MatchLocalIP) matches(ip netip.Addr, ranges []netip.Prefix) bool { return slices.ContainsFunc(ranges, func(prefix netip.Prefix) bool { return prefix.Contains(ip) }) } // UnmarshalCaddyfile sets up the MatchLocalIP from Caddyfile tokens. Syntax: // // local_ip func (m *MatchLocalIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { for d.Next() { wrapper := d.Val() // At least one same-line option must be provided if d.CountRemainingArgs() == 0 { return d.ArgErr() } for d.NextArg() { val := d.Val() if val == "private_ranges" { m.Ranges = append(m.Ranges, internal.PrivateRangesCIDR()...) continue } m.Ranges = append(m.Ranges, val) } // No blocks are supported if d.NextBlock(d.Nesting()) { return d.Errf("malformed TLS handshake matcher '%s': blocks are not supported", wrapper) } } return nil } // Interface guards var ( _ ConnectionMatcher = (*MatchLocalIP)(nil) _ ConnectionMatcher = (*MatchRemoteIP)(nil) _ ConnectionMatcher = (*MatchServerName)(nil) _ ConnectionMatcher = (*MatchServerNameRE)(nil) _ caddy.Provisioner = (*MatchLocalIP)(nil) _ caddy.Provisioner = (*MatchRemoteIP)(nil) _ caddy.Provisioner = (*MatchServerNameRE)(nil) _ caddyfile.Unmarshaler = (*MatchLocalIP)(nil) _ caddyfile.Unmarshaler = (*MatchRemoteIP)(nil) _ caddyfile.Unmarshaler = (*MatchServerName)(nil) _ caddyfile.Unmarshaler = (*MatchServerNameRE)(nil) ) var wordRE = regexp.MustCompile(`\w+`) const regexpPlaceholderPrefix = "tls.regexp" ================================================ FILE: modules/caddytls/matchers_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "context" "crypto/tls" "net" "testing" "github.com/caddyserver/caddy/v2" ) func TestServerNameMatcher(t *testing.T) { for i, tc := range []struct { names []string input string expect bool }{ { names: []string{"example.com"}, input: "example.com", expect: true, }, { names: []string{"example.com"}, input: "foo.com", expect: false, }, { names: []string{"example.com"}, input: "", expect: false, }, { names: []string{}, input: "", expect: false, }, { names: []string{"foo", "example.com"}, input: "example.com", expect: true, }, { names: []string{"foo", "example.com"}, input: "sub.example.com", expect: false, }, { names: []string{"foo", "example.com"}, input: "foo.com", expect: false, }, { names: []string{"*.example.com"}, input: "example.com", expect: false, }, { names: []string{"*.example.com"}, input: "sub.example.com", expect: true, }, { names: []string{"*.example.com", "*.sub.example.com"}, input: "sub2.sub.example.com", expect: true, }, } { chi := &tls.ClientHelloInfo{ServerName: tc.input} actual := MatchServerName(tc.names).Match(chi) if actual != tc.expect { t.Errorf("Test %d: Expected %t but got %t (input=%s match=%v)", i, tc.expect, actual, tc.input, tc.names) } } } func TestServerNameREMatcher(t *testing.T) { for i, tc := range []struct { pattern string input string expect bool }{ { pattern: "^example\\.(com|net)$", input: "example.com", expect: true, }, { pattern: "^example\\.(com|net)$", input: "foo.com", expect: false, }, { pattern: "^example\\.(com|net)$", input: "", expect: false, }, { pattern: "", input: "", expect: true, }, { pattern: "^example\\.(com|net)$", input: "foo.example.com", expect: false, }, } { chi := &tls.ClientHelloInfo{ServerName: tc.input} mre := MatchServerNameRE{MatchRegexp{Pattern: tc.pattern}} ctx, _ := caddy.NewContext(caddy.Context{Context: context.Background()}) if mre.Provision(ctx) != nil { t.Errorf("Test %d: Failed to provision a regexp matcher (pattern=%v)", i, tc.pattern) } actual := mre.Match(chi) if actual != tc.expect { t.Errorf("Test %d: Expected %t but got %t (input=%s match=%v)", i, tc.expect, actual, tc.input, tc.pattern) } } } func TestRemoteIPMatcher(t *testing.T) { ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() for i, tc := range []struct { ranges []string notRanges []string input string expect bool }{ { ranges: []string{"127.0.0.1"}, input: "127.0.0.1:12345", expect: true, }, { ranges: []string{"127.0.0.1"}, input: "127.0.0.2:12345", expect: false, }, { ranges: []string{"127.0.0.1/16"}, input: "127.0.1.23:12345", expect: true, }, { ranges: []string{"127.0.0.1", "192.168.1.105"}, input: "192.168.1.105:12345", expect: true, }, { notRanges: []string{"127.0.0.1"}, input: "127.0.0.1:12345", expect: false, }, { notRanges: []string{"127.0.0.2"}, input: "127.0.0.1:12345", expect: true, }, { ranges: []string{"127.0.0.1"}, notRanges: []string{"127.0.0.2"}, input: "127.0.0.1:12345", expect: true, }, { ranges: []string{"127.0.0.2"}, notRanges: []string{"127.0.0.2"}, input: "127.0.0.2:12345", expect: false, }, { ranges: []string{"127.0.0.2"}, notRanges: []string{"127.0.0.2"}, input: "127.0.0.3:12345", expect: false, }, } { matcher := MatchRemoteIP{Ranges: tc.ranges, NotRanges: tc.notRanges} err := matcher.Provision(ctx) if err != nil { t.Fatalf("Test %d: Provision failed: %v", i, err) } addr := testAddr(tc.input) chi := &tls.ClientHelloInfo{Conn: testConn{addr: addr}} actual := matcher.Match(chi) if actual != tc.expect { t.Errorf("Test %d: Expected %t but got %t (input=%s ranges=%v notRanges=%v)", i, tc.expect, actual, tc.input, tc.ranges, tc.notRanges) } } } func TestLocalIPMatcher(t *testing.T) { ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) defer cancel() for i, tc := range []struct { ranges []string input string expect bool }{ { ranges: []string{"127.0.0.1"}, input: "127.0.0.1:12345", expect: true, }, { ranges: []string{"127.0.0.1"}, input: "127.0.0.2:12345", expect: false, }, { ranges: []string{"127.0.0.1/16"}, input: "127.0.1.23:12345", expect: true, }, { ranges: []string{"127.0.0.1", "192.168.1.105"}, input: "192.168.1.105:12345", expect: true, }, { input: "127.0.0.1:12345", expect: true, }, { ranges: []string{"127.0.0.1"}, input: "127.0.0.1:12345", expect: true, }, { ranges: []string{"127.0.0.2"}, input: "127.0.0.3:12345", expect: false, }, { ranges: []string{"127.0.0.2"}, input: "127.0.0.2", expect: true, }, { ranges: []string{"127.0.0.2"}, input: "127.0.0.300", expect: false, }, } { matcher := MatchLocalIP{Ranges: tc.ranges} err := matcher.Provision(ctx) if err != nil { t.Fatalf("Test %d: Provision failed: %v", i, err) } addr := testAddr(tc.input) chi := &tls.ClientHelloInfo{Conn: testConn{addr: addr}} actual := matcher.Match(chi) if actual != tc.expect { t.Errorf("Test %d: Expected %t but got %t (input=%s ranges=%v)", i, tc.expect, actual, tc.input, tc.ranges) } } } type testConn struct { *net.TCPConn addr testAddr } func (tc testConn) RemoteAddr() net.Addr { return tc.addr } func (tc testConn) LocalAddr() net.Addr { return tc.addr } type testAddr string func (testAddr) Network() string { return "tcp" } func (ta testAddr) String() string { return string(ta) } ================================================ FILE: modules/caddytls/ondemand.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "context" "crypto/tls" "encoding/json" "errors" "fmt" "net/http" "net/url" "time" "github.com/caddyserver/certmagic" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(PermissionByHTTP{}) } // OnDemandConfig configures on-demand TLS, for obtaining // needed certificates at handshake-time. Because this // feature can easily be abused, Caddy must ask permission // to your application whether a particular domain is allowed // to have a certificate issued for it. type OnDemandConfig struct { // Deprecated. WILL BE REMOVED SOON. Use 'permission' instead with the `http` module. Ask string `json:"ask,omitempty"` // REQUIRED. A module that will determine whether a // certificate is allowed to be loaded from storage // or obtained from an issuer on demand. PermissionRaw json.RawMessage `json:"permission,omitempty" caddy:"namespace=tls.permission inline_key=module"` permission OnDemandPermission } // OnDemandPermission is a type that can give permission for // whether a certificate should be allowed to be obtained or // loaded from storage on-demand. // EXPERIMENTAL: This API is experimental and subject to change. type OnDemandPermission interface { // CertificateAllowed returns nil if a certificate for the given // name is allowed to be either obtained from an issuer or loaded // from storage on-demand. // // The context passed in has the associated *tls.ClientHelloInfo // value available at the certmagic.ClientHelloInfoCtxKey key. // // In the worst case, this function may be called as frequently // as every TLS handshake, so it should return as quick as possible // to reduce latency. In the normal case, this function is only // called when a certificate is needed that is not already loaded // into memory ready to serve. CertificateAllowed(ctx context.Context, name string) error } // PermissionByHTTP determines permission for a TLS certificate by // making a request to an HTTP endpoint. type PermissionByHTTP struct { // The endpoint to access. It should be a full URL. // A query string parameter "domain" will be added to it, // containing the domain (or IP) for the desired certificate, // like so: `?domain=example.com`. Generally, this endpoint // is not exposed publicly to avoid a minor information leak // (which domains are serviced by your application). // // The endpoint must return a 200 OK status if a certificate // is allowed; anything else will cause it to be denied. // Redirects are not followed. Endpoint string `json:"endpoint"` logger *zap.Logger replacer *caddy.Replacer } // CaddyModule returns the Caddy module information. func (PermissionByHTTP) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.permission.http", New: func() caddy.Module { return new(PermissionByHTTP) }, } } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (p *PermissionByHTTP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { if !d.Next() { return nil } if !d.AllArgs(&p.Endpoint) { return d.ArgErr() } return nil } func (p *PermissionByHTTP) Provision(ctx caddy.Context) error { p.logger = ctx.Logger() p.replacer = caddy.NewReplacer() return nil } func (p PermissionByHTTP) CertificateAllowed(ctx context.Context, name string) error { // run replacer on endpoint URL (for environment variables) -- return errors to prevent surprises (#5036) askEndpoint, err := p.replacer.ReplaceOrErr(p.Endpoint, true, true) if err != nil { return fmt.Errorf("preparing 'ask' endpoint: %v", err) } askURL, err := url.Parse(askEndpoint) if err != nil { return fmt.Errorf("parsing ask URL: %v", err) } qs := askURL.Query() qs.Set("domain", name) askURL.RawQuery = qs.Encode() askURLString := askURL.String() var remote string if chi, ok := ctx.Value(certmagic.ClientHelloInfoCtxKey).(*tls.ClientHelloInfo); ok && chi != nil { remote = chi.Conn.RemoteAddr().String() } if c := p.logger.Check(zapcore.DebugLevel, "asking permission endpoint"); c != nil { c.Write( zap.String("remote", remote), zap.String("domain", name), zap.String("url", askURLString), ) } resp, err := onDemandAskClient.Get(askURLString) if err != nil { return fmt.Errorf("checking %v to determine if certificate for hostname '%s' should be allowed: %v", askEndpoint, name, err) } resp.Body.Close() if c := p.logger.Check(zapcore.DebugLevel, "response from permission endpoint"); c != nil { c.Write( zap.String("remote", remote), zap.String("domain", name), zap.String("url", askURLString), zap.Int("status", resp.StatusCode), ) } if resp.StatusCode < 200 || resp.StatusCode > 299 { return fmt.Errorf("%s: %w %s - non-2xx status code %d", name, ErrPermissionDenied, askEndpoint, resp.StatusCode) } return nil } // ErrPermissionDenied is an error that should be wrapped or returned when the // configured permission module does not allow a certificate to be issued, // to distinguish that from other errors such as connection failure. var ErrPermissionDenied = errors.New("certificate not allowed by permission module") // These perpetual values are used for on-demand TLS. var ( onDemandAskClient = &http.Client{ Timeout: 10 * time.Second, CheckRedirect: func(req *http.Request, via []*http.Request) error { return fmt.Errorf("following http redirects is not allowed") }, } ) // Interface guards var ( _ OnDemandPermission = (*PermissionByHTTP)(nil) _ caddy.Provisioner = (*PermissionByHTTP)(nil) ) ================================================ FILE: modules/caddytls/pemloader.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "crypto/tls" "fmt" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(PEMLoader{}) } // PEMLoader loads certificates and their associated keys by // decoding their PEM blocks directly. This has the advantage // of not needing to store them on disk at all. type PEMLoader []CertKeyPEMPair // Provision implements caddy.Provisioner. func (pl PEMLoader) Provision(ctx caddy.Context) error { repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer) if !ok { repl = caddy.NewReplacer() } for k, pair := range pl { for i, tag := range pair.Tags { pair.Tags[i] = repl.ReplaceKnown(tag, "") } pl[k] = CertKeyPEMPair{ CertificatePEM: repl.ReplaceKnown(pair.CertificatePEM, ""), KeyPEM: repl.ReplaceKnown(pair.KeyPEM, ""), Tags: pair.Tags, } } return nil } // CaddyModule returns the Caddy module information. func (PEMLoader) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.certificates.load_pem", New: func() caddy.Module { return new(PEMLoader) }, } } // CertKeyPEMPair pairs certificate and key PEM blocks. type CertKeyPEMPair struct { // The certificate (public key) in PEM format. CertificatePEM string `json:"certificate"` // The private key in PEM format. KeyPEM string `json:"key"` // Arbitrary values to associate with this certificate. // Can be useful when you want to select a particular // certificate when there may be multiple valid candidates. Tags []string `json:"tags,omitempty"` } // LoadCertificates returns the certificates contained in pl. func (pl PEMLoader) LoadCertificates() ([]Certificate, error) { certs := make([]Certificate, 0, len(pl)) for i, pair := range pl { cert, err := tls.X509KeyPair([]byte(pair.CertificatePEM), []byte(pair.KeyPEM)) if err != nil { return nil, fmt.Errorf("PEM pair %d: %v", i, err) } certs = append(certs, Certificate{ Certificate: cert, Tags: pair.Tags, }) } return certs, nil } // Interface guard var ( _ CertificateLoader = (PEMLoader)(nil) _ caddy.Provisioner = (PEMLoader)(nil) ) ================================================ FILE: modules/caddytls/sessiontickets.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "crypto/rand" "crypto/tls" "encoding/json" "fmt" "io" "log" "runtime/debug" "sync" "time" "github.com/caddyserver/caddy/v2" ) // SessionTicketService configures and manages TLS session tickets. type SessionTicketService struct { // KeySource is the method by which Caddy produces or obtains // TLS session ticket keys (STEKs). By default, Caddy generates // them internally using a secure pseudorandom source. KeySource json.RawMessage `json:"key_source,omitempty" caddy:"namespace=tls.stek inline_key=provider"` // How often Caddy rotates STEKs. Default: 12h. RotationInterval caddy.Duration `json:"rotation_interval,omitempty"` // The maximum number of keys to keep in rotation. Default: 4. MaxKeys int `json:"max_keys,omitempty"` // Disables STEK rotation. DisableRotation bool `json:"disable_rotation,omitempty"` // Disables TLS session resumption by tickets. Disabled bool `json:"disabled,omitempty"` keySource STEKProvider configs map[*tls.Config]struct{} stopChan chan struct{} currentKeys [][32]byte mu *sync.Mutex } func (s *SessionTicketService) provision(ctx caddy.Context) error { s.configs = make(map[*tls.Config]struct{}) s.mu = new(sync.Mutex) // establish sane defaults if s.RotationInterval == 0 { s.RotationInterval = caddy.Duration(defaultSTEKRotationInterval) } if s.MaxKeys <= 0 { s.MaxKeys = defaultMaxSTEKs } if s.KeySource == nil { s.KeySource = json.RawMessage(`{"provider":"standard"}`) } // load the STEK module, which will provide keys val, err := ctx.LoadModule(s, "KeySource") if err != nil { return fmt.Errorf("loading TLS session ticket ephemeral keys provider module: %s", err) } s.keySource = val.(STEKProvider) // if session tickets or just rotation are // disabled, no need to start service if s.Disabled || s.DisableRotation { return nil } // start the STEK module; this ensures we have // a starting key before any config needs one return s.start() } // start loads the starting STEKs and spawns a goroutine // which loops to rotate the STEKs, which continues until // stop() is called. If start() was already called, this // is a no-op. func (s *SessionTicketService) start() error { if s.stopChan != nil { return nil } s.stopChan = make(chan struct{}) // initializing the key source gives us our // initial key(s) to start with; if successful, // we need to be sure to call Next() so that // the key source can know when it is done initialKeys, err := s.keySource.Initialize(s) if err != nil { return fmt.Errorf("setting STEK module configuration: %v", err) } s.mu.Lock() s.currentKeys = initialKeys s.mu.Unlock() // keep the keys rotated go s.stayUpdated() return nil } // stayUpdated is a blocking function which rotates // the keys whenever new ones are sent. It reads // from keysChan until s.stop() is called. func (s *SessionTicketService) stayUpdated() { defer func() { if err := recover(); err != nil { log.Printf("[PANIC] session ticket service: %v\n%s", err, debug.Stack()) } }() // this call is essential when Initialize() // returns without error, because the stop // channel is the only way the key source // will know when to clean up keysChan := s.keySource.Next(s.stopChan) for { select { case newKeys := <-keysChan: s.mu.Lock() s.currentKeys = newKeys configs := s.configs s.mu.Unlock() for cfg := range configs { cfg.SetSessionTicketKeys(newKeys) } case <-s.stopChan: return } } } // stop terminates the key rotation goroutine. func (s *SessionTicketService) stop() { if s.stopChan != nil { close(s.stopChan) } } // register sets the session ticket keys on cfg // and keeps them updated. Any values registered // must be unregistered, or they will not be // garbage-collected. s.start() must have been // called first. If session tickets are disabled // or if ticket key rotation is disabled, this // function is a no-op. func (s *SessionTicketService) register(cfg *tls.Config) { if s.Disabled || s.DisableRotation { return } s.mu.Lock() cfg.SetSessionTicketKeys(s.currentKeys) s.configs[cfg] = struct{}{} s.mu.Unlock() } // unregister stops session key management on cfg and // removes the internal stored reference to cfg. If // session tickets are disabled or if ticket key rotation // is disabled, this function is a no-op. func (s *SessionTicketService) unregister(cfg *tls.Config) { if s.Disabled || s.DisableRotation { return } s.mu.Lock() delete(s.configs, cfg) s.mu.Unlock() } // RotateSTEKs rotates the keys in keys by producing a new key and eliding // the oldest one. The new slice of keys is returned. func (s SessionTicketService) RotateSTEKs(keys [][32]byte) ([][32]byte, error) { // produce a new key newKey, err := s.generateSTEK() if err != nil { return nil, fmt.Errorf("generating STEK: %v", err) } // we need to prepend this new key to the list of // keys so that it is preferred, but we need to be // careful that we do not grow the slice larger // than MaxKeys, otherwise we'll be storing one // more key in memory than we expect; so be sure // that the slice does not grow beyond the limit // even for a brief period of time, since there's // no guarantee when that extra allocation will // be overwritten; this is why we first trim the // length to one less the max, THEN prepend the // new key if len(keys) >= s.MaxKeys { keys[len(keys)-1] = [32]byte{} // zero-out memory of oldest key keys = keys[:s.MaxKeys-1] // trim length of slice } keys = append([][32]byte{newKey}, keys...) // prepend new key return keys, nil } // generateSTEK generates key material suitable for use as a // session ticket ephemeral key. func (s *SessionTicketService) generateSTEK() ([32]byte, error) { var newTicketKey [32]byte _, err := io.ReadFull(rand.Reader, newTicketKey[:]) return newTicketKey, err } // STEKProvider is a type that can provide session ticket ephemeral // keys (STEKs). type STEKProvider interface { // Initialize provides the STEK configuration to the STEK // module so that it can obtain and manage keys accordingly. // It returns the initial key(s) to use. Implementations can // rely on Next() being called if Initialize() returns // without error, so that it may know when it is done. Initialize(config *SessionTicketService) ([][32]byte, error) // Next returns the channel through which the next session // ticket keys will be transmitted until doneChan is closed. // Keys should be sent on keysChan as they are updated. // When doneChan is closed, any resources allocated in // Initialize() must be cleaned up. Next(doneChan <-chan struct{}) (keysChan <-chan [][32]byte) } const ( defaultSTEKRotationInterval = 12 * time.Hour defaultMaxSTEKs = 4 ) ================================================ FILE: modules/caddytls/standardstek/stek.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package standardstek import ( "log" "runtime/debug" "sync" "time" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddytls" ) func init() { caddy.RegisterModule(standardSTEKProvider{}) } type standardSTEKProvider struct { stekConfig *caddytls.SessionTicketService timer *time.Timer } // CaddyModule returns the Caddy module information. func (standardSTEKProvider) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.stek.standard", New: func() caddy.Module { return new(standardSTEKProvider) }, } } // Initialize sets the configuration for s and returns the starting keys. func (s *standardSTEKProvider) Initialize(config *caddytls.SessionTicketService) ([][32]byte, error) { // keep a reference to the config; we'll need it when rotating keys s.stekConfig = config itvl := time.Duration(s.stekConfig.RotationInterval) mutex.Lock() defer mutex.Unlock() // if this is our first rotation or we are overdue // for one, perform a rotation immediately; otherwise, // we assume that the keys are non-empty and fresh since := time.Since(lastRotation) if lastRotation.IsZero() || since > itvl { var err error keys, err = s.stekConfig.RotateSTEKs(keys) if err != nil { return nil, err } since = 0 // since this is overdue or is the first rotation, use full interval lastRotation = time.Now() } // create timer for the remaining time on the interval; // this timer is cleaned up only when Next() returns s.timer = time.NewTimer(itvl - since) return keys, nil } // Next returns a channel which transmits the latest session ticket keys. func (s *standardSTEKProvider) Next(doneChan <-chan struct{}) <-chan [][32]byte { keysChan := make(chan [][32]byte) go s.rotate(doneChan, keysChan) return keysChan } // rotate rotates keys on a regular basis, sending each updated set of // keys down keysChan, until doneChan is closed. func (s *standardSTEKProvider) rotate(doneChan <-chan struct{}, keysChan chan<- [][32]byte) { defer func() { if err := recover(); err != nil { log.Printf("[PANIC] standard STEK rotation: %v\n%s", err, debug.Stack()) } }() for { select { case now := <-s.timer.C: // copy the slice header to avoid races mutex.RLock() keysCopy := keys mutex.RUnlock() // generate a new key, rotating old ones var err error keysCopy, err = s.stekConfig.RotateSTEKs(keysCopy) if err != nil { // TODO: improve this handling log.Printf("[ERROR] Generating STEK: %v", err) continue } // replace keys slice with updated value and // record the timestamp of rotation mutex.Lock() keys = keysCopy lastRotation = now mutex.Unlock() // send the updated keys to the service keysChan <- keysCopy // timer channel is already drained, so reset directly (see godoc) s.timer.Reset(time.Duration(s.stekConfig.RotationInterval)) case <-doneChan: // again, see godocs for why timer is stopped this way if !s.timer.Stop() { <-s.timer.C } return } } } var ( lastRotation time.Time keys [][32]byte mutex sync.RWMutex // protects keys and lastRotation ) // Interface guard var _ caddytls.STEKProvider = (*standardSTEKProvider)(nil) ================================================ FILE: modules/caddytls/storageloader.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "crypto/tls" "fmt" "strings" "github.com/caddyserver/certmagic" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(StorageLoader{}) } // StorageLoader loads certificates and their associated keys // from the globally configured storage module. type StorageLoader struct { // A list of pairs of certificate and key file names along with their // encoding format so that they can be loaded from storage. Pairs []CertKeyFilePair `json:"pairs,omitempty"` // Reference to the globally configured storage module. storage certmagic.Storage ctx caddy.Context } // CaddyModule returns the Caddy module information. func (StorageLoader) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.certificates.load_storage", New: func() caddy.Module { return new(StorageLoader) }, } } // Provision loads the storage module for sl. func (sl *StorageLoader) Provision(ctx caddy.Context) error { sl.storage = ctx.Storage() sl.ctx = ctx repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer) if !ok { repl = caddy.NewReplacer() } for k, pair := range sl.Pairs { for i, tag := range pair.Tags { pair.Tags[i] = repl.ReplaceKnown(tag, "") } sl.Pairs[k] = CertKeyFilePair{ Certificate: repl.ReplaceKnown(pair.Certificate, ""), Key: repl.ReplaceKnown(pair.Key, ""), Format: repl.ReplaceKnown(pair.Format, ""), Tags: pair.Tags, } } return nil } // LoadCertificates returns the certificates to be loaded by sl. func (sl StorageLoader) LoadCertificates() ([]Certificate, error) { certs := make([]Certificate, 0, len(sl.Pairs)) for _, pair := range sl.Pairs { certData, err := sl.storage.Load(sl.ctx, pair.Certificate) if err != nil { return nil, err } keyData, err := sl.storage.Load(sl.ctx, pair.Key) if err != nil { return nil, err } var cert tls.Certificate switch pair.Format { case "": fallthrough case "pem": // if the start of the key file looks like an encrypted private key, // reject it with a helpful error message if strings.Contains(string(keyData[:40]), "ENCRYPTED") { return nil, fmt.Errorf("encrypted private keys are not supported; please decrypt the key first") } cert, err = tls.X509KeyPair(certData, keyData) default: return nil, fmt.Errorf("unrecognized certificate/key encoding format: %s", pair.Format) } if err != nil { return nil, err } certs = append(certs, Certificate{Certificate: cert, Tags: pair.Tags}) } return certs, nil } // Interface guard var ( _ CertificateLoader = (*StorageLoader)(nil) _ caddy.Provisioner = (*StorageLoader)(nil) ) ================================================ FILE: modules/caddytls/tls.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "context" "crypto/tls" "encoding/json" "fmt" "log" "net" "net/http" "runtime/debug" "strings" "sync" "time" "github.com/caddyserver/certmagic" "github.com/libdns/libdns" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/internal" "github.com/caddyserver/caddy/v2/modules/caddyevents" ) func init() { caddy.RegisterModule(TLS{}) caddy.RegisterModule(AutomateLoader{}) } var ( certCache *certmagic.Cache certCacheMu sync.RWMutex ) // TLS provides TLS facilities including certificate // loading and management, client auth, and more. type TLS struct { // Certificates to load into memory for quick recall during // TLS handshakes. Each key is the name of a certificate // loader module. // // The "automate" certificate loader module can be used to // specify a list of subjects that need certificates to be // managed automatically, including subdomains that may // already be covered by a managed wildcard certificate. // The first matching automation policy will be used // to manage automated certificate(s). // // All loaded certificates get pooled // into the same cache and may be used to complete TLS // handshakes for the relevant server names (SNI). // Certificates loaded manually (anything other than // "automate") are not automatically managed and will // have to be refreshed manually before they expire. CertificatesRaw caddy.ModuleMap `json:"certificates,omitempty" caddy:"namespace=tls.certificates"` // Configures certificate automation. Automation *AutomationConfig `json:"automation,omitempty"` // Configures session ticket ephemeral keys (STEKs). SessionTickets *SessionTicketService `json:"session_tickets,omitempty"` // Configures the in-memory certificate cache. Cache *CertCacheOptions `json:"cache,omitempty"` // Disables OCSP stapling for manually-managed certificates only. // To configure OCSP stapling for automated certificates, use an // automation policy instead. // // Disabling OCSP stapling puts clients at greater risk, reduces their // privacy, and usually lowers client performance. It is NOT recommended // to disable this unless you are able to justify the costs. // // EXPERIMENTAL. Subject to change. DisableOCSPStapling bool `json:"disable_ocsp_stapling,omitempty"` // Disables checks in certmagic that the configured storage is ready // and able to handle writing new content to it. These checks are // intended to prevent information loss (newly issued certificates), but // can be expensive on the storage. // // Disabling these checks should only be done when the storage // can be trusted to have enough capacity and no other problems. // // EXPERIMENTAL. Subject to change. DisableStorageCheck bool `json:"disable_storage_check,omitempty"` // Disables the automatic cleanup of the storage backend. // This is useful when TLS is not being used to store certificates // and the user wants run their server in a read-only mode. // // Storage cleaning creates two files: instance.uuid and last_clean.json. // The instance.uuid file is used to identify the instance of Caddy // in a cluster. The last_clean.json file is used to store the last // time the storage was cleaned. // // EXPERIMENTAL. Subject to change. DisableStorageClean bool `json:"disable_storage_clean,omitempty"` // Enable Encrypted ClientHello (ECH). ECH protects the server name // (SNI) and other sensitive parameters of a normally-plaintext TLS // ClientHello during a handshake. // // EXPERIMENTAL: Subject to change. EncryptedClientHello *ECH `json:"encrypted_client_hello,omitempty"` // The default DNS provider module to use when a DNS module is needed. // // EXPERIMENTAL: Subject to change. DNSRaw json.RawMessage `json:"dns,omitempty" caddy:"namespace=dns.providers inline_key=name"` // The default DNS resolvers to use for TLS-related DNS operations, specifically // for ACME DNS challenges and ACME server DNS validations. // If not specified, the system default resolvers will be used. // // EXPERIMENTAL: Subject to change. Resolvers []string `json:"resolvers,omitempty"` dns any // technically, it should be any/all of the libdns interfaces (RecordSetter, RecordAppender, etc.) certificateLoaders []CertificateLoader automateNames map[string]struct{} ctx caddy.Context storageCleanTicker *time.Ticker storageCleanStop chan struct{} logger *zap.Logger events *caddyevents.App serverNames map[string]struct{} serverNamesMu *sync.Mutex // set of subjects with managed certificates, // and hashes of manually-loaded certificates // (managing's value is an optional issuer key, for distinction) managing, loaded map[string]string } // CaddyModule returns the Caddy module information. func (TLS) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls", New: func() caddy.Module { return new(TLS) }, } } // Provision sets up the configuration for the TLS app. func (t *TLS) Provision(ctx caddy.Context) error { eventsAppIface, err := ctx.App("events") if err != nil { return fmt.Errorf("getting events app: %v", err) } t.events = eventsAppIface.(*caddyevents.App) t.ctx = ctx t.logger = ctx.Logger() repl := caddy.NewReplacer() t.managing, t.loaded = make(map[string]string), make(map[string]string) t.serverNames = make(map[string]struct{}) t.serverNamesMu = new(sync.Mutex) // set up default DNS module, if any, and make sure it implements all the // common libdns interfaces, since it could be used for a variety of things // (do this before provisioning other modules, since they may rely on this) if len(t.DNSRaw) > 0 { dnsMod, err := ctx.LoadModule(t, "DNSRaw") if err != nil { return fmt.Errorf("loading overall DNS provider module: %v", err) } switch dnsMod.(type) { case interface { libdns.RecordAppender libdns.RecordDeleter libdns.RecordGetter libdns.RecordSetter }: default: return fmt.Errorf("DNS module does not implement the most common libdns interfaces: %T", dnsMod) } t.dns = dnsMod } // set up a new certificate cache; this (re)loads all certificates cacheOpts := certmagic.CacheOptions{ GetConfigForCert: func(cert certmagic.Certificate) (*certmagic.Config, error) { return t.getConfigForName(cert.Names[0]), nil }, Logger: t.logger.Named("cache"), } if t.Automation != nil { cacheOpts.OCSPCheckInterval = time.Duration(t.Automation.OCSPCheckInterval) cacheOpts.RenewCheckInterval = time.Duration(t.Automation.RenewCheckInterval) } if t.Cache != nil { cacheOpts.Capacity = t.Cache.Capacity } if cacheOpts.Capacity <= 0 { cacheOpts.Capacity = 10000 } certCacheMu.Lock() if certCache == nil { certCache = certmagic.NewCache(cacheOpts) } else { certCache.SetOptions(cacheOpts) } certCacheMu.Unlock() // certificate loaders val, err := ctx.LoadModule(t, "CertificatesRaw") if err != nil { return fmt.Errorf("loading certificate loader modules: %s", err) } for modName, modIface := range val.(map[string]any) { if modName == "automate" { // special case; these will be loaded in later using our automation facilities, // which we want to avoid doing during provisioning if automateNames, ok := modIface.(*AutomateLoader); ok && automateNames != nil { if t.automateNames == nil { t.automateNames = make(map[string]struct{}) } repl := caddy.NewReplacer() for _, sub := range *automateNames { t.automateNames[repl.ReplaceAll(sub, "")] = struct{}{} } } else { return fmt.Errorf("loading certificates with 'automate' requires array of strings, got: %T", modIface) } continue } t.certificateLoaders = append(t.certificateLoaders, modIface.(CertificateLoader)) } // using the certificate loaders we just initialized, load // manual/static (unmanaged) certificates - we do this in // provision so that other apps (such as http) can know which // certificates have been manually loaded, and also so that // commands like validate can be a better test certCacheMu.RLock() magic := certmagic.New(certCache, certmagic.Config{ Storage: ctx.Storage(), Logger: t.logger, OnEvent: t.onEvent, OCSP: certmagic.OCSPConfig{ DisableStapling: t.DisableOCSPStapling, }, DisableStorageCheck: t.DisableStorageCheck, }) certCacheMu.RUnlock() for _, loader := range t.certificateLoaders { certs, err := loader.LoadCertificates() if err != nil { return fmt.Errorf("loading certificates: %v", err) } for _, cert := range certs { hash, err := magic.CacheUnmanagedTLSCertificate(ctx, cert.Certificate, cert.Tags) if err != nil { return fmt.Errorf("caching unmanaged certificate: %v", err) } t.loaded[hash] = "" } } // on-demand permission module if t.Automation != nil && t.Automation.OnDemand != nil && t.Automation.OnDemand.PermissionRaw != nil { if t.Automation.OnDemand.Ask != "" { return fmt.Errorf("on-demand TLS config conflict: both 'ask' endpoint and a 'permission' module are specified; 'ask' is deprecated, so use only the permission module") } val, err := ctx.LoadModule(t.Automation.OnDemand, "PermissionRaw") if err != nil { return fmt.Errorf("loading on-demand TLS permission module: %v", err) } t.Automation.OnDemand.permission = val.(OnDemandPermission) } // automation/management policies if t.Automation == nil { t.Automation = new(AutomationConfig) } t.Automation.defaultPublicAutomationPolicy = new(AutomationPolicy) err = t.Automation.defaultPublicAutomationPolicy.Provision(t) if err != nil { return fmt.Errorf("provisioning default public automation policy: %v", err) } for n := range t.automateNames { // if any names specified by the "automate" loader do not qualify for a public // certificate, we should initialize a default internal automation policy // (but we don't want to do this unnecessarily, since it may prompt for password!) if certmagic.SubjectQualifiesForPublicCert(n) { continue } t.Automation.defaultInternalAutomationPolicy = &AutomationPolicy{ IssuersRaw: []json.RawMessage{json.RawMessage(`{"module":"internal"}`)}, } err = t.Automation.defaultInternalAutomationPolicy.Provision(t) if err != nil { return fmt.Errorf("provisioning default internal automation policy: %v", err) } break } for i, ap := range t.Automation.Policies { err := ap.Provision(t) if err != nil { return fmt.Errorf("provisioning automation policy %d: %v", i, err) } } // run replacer on ask URL (for environment variables) -- return errors to prevent surprises (#5036) if t.Automation != nil && t.Automation.OnDemand != nil && t.Automation.OnDemand.Ask != "" { t.Automation.OnDemand.Ask, err = repl.ReplaceOrErr(t.Automation.OnDemand.Ask, true, true) if err != nil { return fmt.Errorf("preparing 'ask' endpoint: %v", err) } perm := PermissionByHTTP{ Endpoint: t.Automation.OnDemand.Ask, } if err := perm.Provision(ctx); err != nil { return fmt.Errorf("provisioning 'ask' module: %v", err) } t.Automation.OnDemand.permission = perm } // session ticket ephemeral keys (STEK) service and provider if t.SessionTickets != nil { err := t.SessionTickets.provision(ctx) if err != nil { return fmt.Errorf("provisioning session tickets configuration: %v", err) } } // ECH (Encrypted ClientHello) initialization if t.EncryptedClientHello != nil { outerNames, err := t.EncryptedClientHello.Provision(ctx) if err != nil { return fmt.Errorf("provisioning Encrypted ClientHello components: %v", err) } // outer names should have certificates to reduce client brittleness for _, outerName := range outerNames { if outerName == "" { continue } if !t.HasCertificateForSubject(outerName) { if t.automateNames == nil { t.automateNames = make(map[string]struct{}) } t.automateNames[outerName] = struct{}{} } } } return nil } // Validate validates t's configuration. func (t *TLS) Validate() error { if t.Automation != nil { // ensure that host aren't repeated; since only the first // automation policy is used, repeating a host in the lists // isn't useful and is probably a mistake; same for two // catch-all/default policies var hasDefault bool hostSet := make(map[string]int) for i, ap := range t.Automation.Policies { if len(ap.subjects) == 0 { if hasDefault { return fmt.Errorf("automation policy %d is the second policy that acts as default/catch-all, but will never be used", i) } hasDefault = true } for _, h := range ap.subjects { if first, ok := hostSet[h]; ok { return fmt.Errorf("automation policy %d: cannot apply more than one automation policy to host: %s (first match in policy %d)", i, h, first) } hostSet[h] = i } } } if t.Cache != nil { if t.Cache.Capacity < 0 { return fmt.Errorf("cache capacity must be >= 0") } } return nil } // Start activates the TLS module. func (t *TLS) Start() error { // warn if on-demand TLS is enabled but no restrictions are in place if t.Automation.OnDemand == nil || (t.Automation.OnDemand.Ask == "" && t.Automation.OnDemand.permission == nil) { for _, ap := range t.Automation.Policies { if ap.OnDemand && ap.isWildcardOrDefault() { if c := t.logger.Check(zapcore.WarnLevel, "YOUR SERVER MAY BE VULNERABLE TO ABUSE: on-demand TLS is enabled, but no protections are in place"); c != nil { c.Write(zap.String("docs", "https://caddyserver.com/docs/automatic-https#on-demand-tls")) } break } } } // now that we are running, and all manual certificates have // been loaded, time to load the automated/managed certificates err := t.Manage(t.automateNames) if err != nil { return fmt.Errorf("automate: managing %v: %v", t.automateNames, err) } if t.EncryptedClientHello != nil { echLogger := t.logger.Named("ech") // publish ECH configs in the background; does not need to block // server startup, as it could take a while; then keep keys rotated go func() { // publish immediately first if err := t.publishECHConfigs(echLogger); err != nil { echLogger.Error("publication(s) failed", zap.Error(err)) } // then every so often, rotate and publish if needed // (both of these functions only do something if needed) for { select { case <-time.After(1 * time.Hour): // ensure old keys are rotated out t.EncryptedClientHello.configsMu.Lock() err = t.EncryptedClientHello.rotateECHKeys(t.ctx, echLogger, false) t.EncryptedClientHello.configsMu.Unlock() if err != nil { echLogger.Error("rotating ECH configs failed", zap.Error(err)) return } err := t.publishECHConfigs(echLogger) if err != nil { echLogger.Error("publication(s) failed", zap.Error(err)) } case <-t.ctx.Done(): return } } }() } if !t.DisableStorageClean { // start the storage cleaner goroutine and ticker, // which cleans out expired certificates and more t.keepStorageClean() } return nil } // Stop stops the TLS module and cleans up any allocations. func (t *TLS) Stop() error { // stop the storage cleaner goroutine and ticker if t.storageCleanStop != nil { close(t.storageCleanStop) } if t.storageCleanTicker != nil { t.storageCleanTicker.Stop() } return nil } // Cleanup frees up resources allocated during Provision. func (t *TLS) Cleanup() error { // stop the session ticket rotation goroutine if t.SessionTickets != nil { t.SessionTickets.stop() } // if a new TLS app was loaded, remove certificates from the cache that are no longer // being managed or loaded by the new config; if there is no more TLS app running, // then stop cert maintenance and let the cert cache be GC'ed if nextTLS, err := caddy.ActiveContext().AppIfConfigured("tls"); err == nil && nextTLS != nil { nextTLSApp := nextTLS.(*TLS) // compute which certificates were managed or loaded into the cert cache by this // app instance (which is being stopped) that are not managed or loaded by the // new app instance (which just started), and remove them from the cache var noLongerManaged []certmagic.SubjectIssuer var noLongerLoaded []string reManage := make(map[string]struct{}) for subj, currentIssuerKey := range t.managing { // It's a bit nuanced: managed certs can sometimes be different enough that we have to // swap them out for a different one, even if they are for the same subject/domain. // We consider "private" certs (internal CA/locally-trusted/etc) to be significantly // distinct from "public" certs (production CAs/globally-trusted/etc) because of the // implications when it comes to actual deployments: switching between an internal CA // and a production CA, for example, is quite significant. Switching from one public CA // to another, however, is not, and for our purposes we consider those to be the same. // Anyway, if the next TLS app does not manage a cert for this name at all, definitely // remove it from the cache. But if it does, and it's not the same kind of issuer/CA // as we have, also remove it, so that it can swap it out for the right one. if nextIssuerKey, ok := nextTLSApp.managing[subj]; !ok || nextIssuerKey != currentIssuerKey { // next app is not managing a cert for this domain at all or is using a different issuer, so remove it noLongerManaged = append(noLongerManaged, certmagic.SubjectIssuer{Subject: subj, IssuerKey: currentIssuerKey}) // then, if the next app is managing a cert for this name, but with a different issuer, re-manage it if ok && nextIssuerKey != currentIssuerKey { reManage[subj] = struct{}{} } } } for hash := range t.loaded { if _, ok := nextTLSApp.loaded[hash]; !ok { noLongerLoaded = append(noLongerLoaded, hash) } } // remove the certs certCacheMu.RLock() certCache.RemoveManaged(noLongerManaged) certCache.Remove(noLongerLoaded) certCacheMu.RUnlock() // give the new TLS app a "kick" to manage certs that it is configured for // with its own configuration instead of the one we just evicted if err := nextTLSApp.Manage(reManage); err != nil { if c := t.logger.Check(zapcore.ErrorLevel, "re-managing unloaded certificates with new config"); c != nil { c.Write( zap.Strings("subjects", internal.MaxSizeSubjectsListForLog(reManage, 1000)), zap.Error(err), ) } } } else { // no more TLS app running, so delete in-memory cert cache, if it was created yet certCacheMu.RLock() hasCache := certCache != nil certCacheMu.RUnlock() if hasCache { certCache.Stop() certCacheMu.Lock() certCache = nil certCacheMu.Unlock() } } return nil } // Manage immediately begins managing subjects according to the // matching automation policy. The subjects are given in a map // to prevent duplication and also because quick lookups are // needed to assess wildcard coverage, if any, depending on // certain config parameters (with lots of subjects, computing // wildcard coverage over a slice can be highly inefficient). func (t *TLS) Manage(subjects map[string]struct{}) error { // for a large number of names, we can be more memory-efficient // by making only one certmagic.Config for all the names that // use that config, rather than calling ManageAsync once for // every name; so first, bin names by AutomationPolicy policyToNames := make(map[*AutomationPolicy][]string) for subj := range subjects { ap := t.getAutomationPolicyForName(subj) // by default, if a wildcard that covers the subj is also being // managed, either by a previous call to Manage or by this one, // prefer using that over individual certs for its subdomains; // but users can disable this and force getting a certificate for // subdomains by adding the name to the 'automate' cert loader if t.managingWildcardFor(subj, subjects) { if _, ok := t.automateNames[subj]; !ok { continue } } policyToNames[ap] = append(policyToNames[ap], subj) } // now that names are grouped by policy, we can simply make one // certmagic.Config for each (potentially large) group of names // and call ManageAsync just once for the whole batch for ap, names := range policyToNames { err := ap.magic.ManageAsync(t.ctx.Context, names) if err != nil { const maxNamesToDisplay = 100 if len(names) > maxNamesToDisplay { names = append(names[:maxNamesToDisplay], fmt.Sprintf("(and %d more...)", len(names)-maxNamesToDisplay)) } return fmt.Errorf("automate: manage %v: %v", names, err) } for _, name := range names { // certs that are issued solely by our internal issuer are a little bit of // a special case: if you have an initial config that manages example.com // using internal CA, then after testing it you switch to a production CA, // you wouldn't want to keep using the same self-signed cert, obviously; // so we differentiate these by associating the subject with its issuer key; // we do this because CertMagic has no notion of "InternalIssuer" like we // do, so we have to do this logic ourselves var issuerKey string if len(ap.Issuers) == 1 { if intIss, ok := ap.Issuers[0].(*InternalIssuer); ok && intIss != nil { issuerKey = intIss.IssuerKey() } } t.managing[name] = issuerKey } } return nil } // managingWildcardFor returns true if the app is managing a certificate that covers that // subject name (including consideration of wildcards), either from its internal list of // names that it IS managing certs for, or from the otherSubjsToManage which includes names // that WILL be managed. func (t *TLS) managingWildcardFor(subj string, otherSubjsToManage map[string]struct{}) bool { // TODO: we could also consider manually-loaded certs using t.HasCertificateForSubject(), // but that does not account for how manually-loaded certs may be restricted as to which // hostnames or ClientHellos they can be used with by tags, etc; I don't *think* anyone // necessarily wants this anyway, but I thought I'd note this here for now (if we did // consider manually-loaded certs, we'd probably want to rename the method since it // wouldn't be just about managed certs anymore) // IP addresses must match exactly if ip := net.ParseIP(subj); ip != nil { _, managing := t.managing[subj] return managing } // replace labels of the domain with wildcards until we get a match labels := strings.Split(subj, ".") for i := range labels { if labels[i] == "*" { continue } labels[i] = "*" candidate := strings.Join(labels, ".") if _, ok := t.managing[candidate]; ok { return true } if _, ok := otherSubjsToManage[candidate]; ok { return true } } return false } // RegisterServerNames registers the provided DNS names with the TLS app. // This is currently used to auto-publish Encrypted ClientHello (ECH) // configurations, if enabled. Use of this function by apps using the TLS // app removes the need for the user to redundantly specify domain names // in their configuration. This function separates hostname and port // (keeping only the hotsname) and filters IP addresses, which can't be // used with ECH. // // EXPERIMENTAL: This function and its semantics/behavior are subject to change. func (t *TLS) RegisterServerNames(dnsNames []string) { t.serverNamesMu.Lock() for _, name := range dnsNames { host, _, err := net.SplitHostPort(name) if err != nil { host = name } if strings.TrimSpace(host) != "" && !certmagic.SubjectIsIP(host) { t.serverNames[strings.ToLower(host)] = struct{}{} } } t.serverNamesMu.Unlock() } // HandleHTTPChallenge ensures that the ACME HTTP challenge or ZeroSSL HTTP // validation request is handled for the certificate named by r.Host, if it // is an HTTP challenge request. It requires that the automation policy for // r.Host has an issuer that implements GetACMEIssuer() or is a *ZeroSSLIssuer. func (t *TLS) HandleHTTPChallenge(w http.ResponseWriter, r *http.Request) bool { acmeChallenge := certmagic.LooksLikeHTTPChallenge(r) zerosslValidation := certmagic.LooksLikeZeroSSLHTTPValidation(r) // no-op if it's not an ACME challenge request if !acmeChallenge && !zerosslValidation { return false } // try all the issuers until we find the one that initiated the challenge ap := t.getAutomationPolicyForName(r.Host) if acmeChallenge { type acmeCapable interface{ GetACMEIssuer() *ACMEIssuer } for _, iss := range ap.magic.Issuers { if acmeIssuer, ok := iss.(acmeCapable); ok { if acmeIssuer.GetACMEIssuer().issuer.HandleHTTPChallenge(w, r) { return true } } } // it's possible another server in this process initiated the challenge; // users have requested that Caddy only handle HTTP challenges it initiated, // so that users can proxy the others through to their backends; but we // might not have an automation policy for all identifiers that are trying // to get certificates (e.g. the admin endpoint), so we do this manual check if challenge, ok := certmagic.GetACMEChallenge(r.Host); ok { return certmagic.SolveHTTPChallenge(t.logger, w, r, challenge.Challenge) } } else if zerosslValidation { for _, iss := range ap.magic.Issuers { if ziss, ok := iss.(*ZeroSSLIssuer); ok { if ziss.issuer.HandleZeroSSLHTTPValidation(w, r) { return true } } } } return false } // AddAutomationPolicy provisions and adds ap to the list of the app's // automation policies. If an existing automation policy exists that has // fewer hosts in its list than ap does, ap will be inserted before that // other policy (this helps ensure that ap will be prioritized/chosen // over, say, a catch-all policy). func (t *TLS) AddAutomationPolicy(ap *AutomationPolicy) error { if t.Automation == nil { t.Automation = new(AutomationConfig) } err := ap.Provision(t) if err != nil { return err } // sort new automation policies just before any other which is a superset // of this one; if we find an existing policy that covers every subject in // ap but less specifically (e.g. a catch-all policy, or one with wildcards // or with fewer subjects), insert ap just before it, otherwise ap would // never be used because the first matching policy is more general for i, existing := range t.Automation.Policies { // first see if existing is superset of ap for all names var otherIsSuperset bool outer: for _, thisSubj := range ap.subjects { for _, otherSubj := range existing.subjects { if certmagic.MatchWildcard(thisSubj, otherSubj) { otherIsSuperset = true break outer } } } // if existing AP is a superset or if it contains fewer names (i.e. is // more general), then new AP is more specific, so insert before it if otherIsSuperset || len(existing.SubjectsRaw) < len(ap.SubjectsRaw) { t.Automation.Policies = append(t.Automation.Policies[:i], append([]*AutomationPolicy{ap}, t.Automation.Policies[i:]...)...) return nil } } // otherwise just append the new one t.Automation.Policies = append(t.Automation.Policies, ap) return nil } func (t *TLS) getConfigForName(name string) *certmagic.Config { ap := t.getAutomationPolicyForName(name) return ap.magic } // getAutomationPolicyForName returns the first matching automation policy // for the given subject name. If no matching policy can be found, the // default policy is used, depending on whether the name qualifies for a // public certificate or not. func (t *TLS) getAutomationPolicyForName(name string) *AutomationPolicy { for _, ap := range t.Automation.Policies { if len(ap.subjects) == 0 { return ap // no host filter is an automatic match } for _, h := range ap.subjects { if certmagic.MatchWildcard(name, h) { return ap } } } if certmagic.SubjectQualifiesForPublicCert(name) || t.Automation.defaultInternalAutomationPolicy == nil { return t.Automation.defaultPublicAutomationPolicy } return t.Automation.defaultInternalAutomationPolicy } // AllMatchingCertificates returns the list of all certificates in // the cache which could be used to satisfy the given SAN. func AllMatchingCertificates(san string) []certmagic.Certificate { return certCache.AllMatchingCertificates(san) } func (t *TLS) HasCertificateForSubject(subject string) bool { certCacheMu.RLock() allMatchingCerts := certCache.AllMatchingCertificates(subject) certCacheMu.RUnlock() for _, cert := range allMatchingCerts { // check if the cert is manually loaded by this config if _, ok := t.loaded[cert.Hash()]; ok { return true } // check if the cert is automatically managed by this config for _, name := range cert.Names { if _, ok := t.managing[name]; ok { return true } } } return false } // keepStorageClean starts a goroutine that immediately cleans up all // known storage units if it was not recently done, and then runs the // operation at every tick from t.storageCleanTicker. func (t *TLS) keepStorageClean() { t.storageCleanTicker = time.NewTicker(t.storageCleanInterval()) t.storageCleanStop = make(chan struct{}) go func() { defer func() { if err := recover(); err != nil { log.Printf("[PANIC] storage cleaner: %v\n%s", err, debug.Stack()) } }() t.cleanStorageUnits() for { select { case <-t.storageCleanStop: return case <-t.storageCleanTicker.C: t.cleanStorageUnits() } } }() } func (t *TLS) cleanStorageUnits() { storageCleanMu.Lock() defer storageCleanMu.Unlock() // TODO: This check might not be needed anymore now that CertMagic syncs // and throttles storage cleaning globally across the cluster. // The original comment below might be outdated: // // If storage was cleaned recently, don't do it again for now. Although the ticker // calling this function drops missed ticks for us, config reloads discard the old // ticker and replace it with a new one, possibly invoking a cleaning to happen again // too soon. (We divide the interval by 2 because the actual cleaning takes non-zero // time, and we don't want to skip cleanings if we don't have to; whereas if a cleaning // took most of the interval, we'd probably want to skip the next one so we aren't // constantly cleaning. This allows cleanings to take up to half the interval's // duration before we decide to skip the next one.) if !storageClean.IsZero() && time.Since(storageClean) < t.storageCleanInterval()/2 { return } id, err := caddy.InstanceID() if err != nil { if c := t.logger.Check(zapcore.WarnLevel, "unable to get instance ID; storage clean stamps will be incomplete"); c != nil { c.Write(zap.Error(err)) } } options := certmagic.CleanStorageOptions{ Logger: t.logger, InstanceID: id.String(), Interval: t.storageCleanInterval(), OCSPStaples: true, ExpiredCerts: true, ExpiredCertGracePeriod: 24 * time.Hour * 14, } // start with the default/global storage err = certmagic.CleanStorage(t.ctx, t.ctx.Storage(), options) if err != nil { // probably don't want to return early, since we should still // see if any other storages can get cleaned up if c := t.logger.Check(zapcore.ErrorLevel, "could not clean default/global storage"); c != nil { c.Write(zap.Error(err)) } } // then clean each storage defined in ACME automation policies if t.Automation != nil { for _, ap := range t.Automation.Policies { if ap.storage == nil { continue } if err := certmagic.CleanStorage(t.ctx, ap.storage, options); err != nil { if c := t.logger.Check(zapcore.ErrorLevel, "could not clean storage configured in automation policy"); c != nil { c.Write(zap.Error(err)) } } } } // remember last time storage was finished cleaning storageClean = time.Now() t.logger.Info("finished cleaning storage units") } func (t *TLS) storageCleanInterval() time.Duration { if t.Automation != nil && t.Automation.StorageCleanInterval > 0 { return time.Duration(t.Automation.StorageCleanInterval) } return defaultStorageCleanInterval } // onEvent translates CertMagic events into Caddy events then dispatches them. func (t *TLS) onEvent(ctx context.Context, eventName string, data map[string]any) error { evt := t.events.Emit(t.ctx, eventName, data) return evt.Aborted } // CertificateLoader is a type that can load certificates. // Certificates can optionally be associated with tags. type CertificateLoader interface { LoadCertificates() ([]Certificate, error) } // Certificate is a TLS certificate, optionally // associated with arbitrary tags. type Certificate struct { tls.Certificate Tags []string } // AutomateLoader will automatically manage certificates for the names in the // list, including obtaining and renewing certificates. Automated certificates // are managed according to their matching automation policy, configured // elsewhere in this app. // // Technically, this is a no-op certificate loader module that is treated as // a special case: it uses this app's automation features to load certificates // for the list of hostnames, rather than loading certificates manually. But // the end result is the same: certificates for these subject names will be // loaded into the in-memory cache and may then be used. type AutomateLoader []string // CaddyModule returns the Caddy module information. func (AutomateLoader) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.certificates.automate", New: func() caddy.Module { return new(AutomateLoader) }, } } // CertCacheOptions configures the certificate cache. type CertCacheOptions struct { // Maximum number of certificates to allow in the // cache. If reached, certificates will be randomly // evicted to make room for new ones. Default: 10,000 Capacity int `json:"capacity,omitempty"` } // Variables related to storage cleaning. var ( defaultStorageCleanInterval = 24 * time.Hour storageClean time.Time storageCleanMu sync.Mutex ) // Interface guards var ( _ caddy.App = (*TLS)(nil) _ caddy.Provisioner = (*TLS)(nil) _ caddy.Validator = (*TLS)(nil) _ caddy.CleanerUpper = (*TLS)(nil) ) ================================================ FILE: modules/caddytls/values.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "crypto/tls" "crypto/x509" "fmt" "github.com/caddyserver/certmagic" "github.com/klauspost/cpuid/v2" ) // CipherSuiteNameSupported returns true if name is // a supported cipher suite. func CipherSuiteNameSupported(name string) bool { return CipherSuiteID(name) != 0 } // CipherSuiteID returns the ID of the cipher suite associated with // the given name, or 0 if the name is not recognized/supported. func CipherSuiteID(name string) uint16 { for _, cs := range SupportedCipherSuites() { if cs.Name == name { return cs.ID } } return 0 } // SupportedCipherSuites returns a list of all the cipher suites // Caddy supports. The list is NOT ordered by security preference. func SupportedCipherSuites() []*tls.CipherSuite { return tls.CipherSuites() } // defaultCipherSuites is the ordered list of all the cipher // suites we want to support by default, assuming AES-NI // (hardware acceleration for AES). var defaultCipherSuitesWithAESNI = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, } // defaultCipherSuites is the ordered list of all the cipher // suites we want to support by default, assuming lack of // AES-NI (NO hardware acceleration for AES). var defaultCipherSuitesWithoutAESNI = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, } // getOptimalDefaultCipherSuites returns an appropriate cipher // suite to use depending on the hardware support for AES. // // See https://github.com/caddyserver/caddy/issues/1674 func getOptimalDefaultCipherSuites() []uint16 { if cpuid.CPU.Supports(cpuid.AESNI) { return defaultCipherSuitesWithAESNI } return defaultCipherSuitesWithoutAESNI } // SupportedCurves is the unordered map of supported curves // or key exchange mechanisms ("curves" traditionally). // https://golang.org/pkg/crypto/tls/#CurveID var SupportedCurves = map[string]tls.CurveID{ "x25519mlkem768": tls.X25519MLKEM768, "x25519": tls.X25519, "secp256r1": tls.CurveP256, "secp384r1": tls.CurveP384, "secp521r1": tls.CurveP521, } // supportedCertKeyTypes is all the key types that are supported // for certificates that are obtained through ACME. var supportedCertKeyTypes = map[string]certmagic.KeyType{ "rsa2048": certmagic.RSA2048, "rsa4096": certmagic.RSA4096, "p256": certmagic.P256, "p384": certmagic.P384, "ed25519": certmagic.ED25519, } // defaultCurves is the list of only the curves or key exchange // mechanisms we want to use by default. The order is irrelevant. // // This list should only include mechanisms which are fast by // design (e.g. X25519) and those for which an optimized assembly // implementation exists (e.g. P256). The latter ones can be // found here: // https://github.com/golang/go/tree/master/src/crypto/elliptic var defaultCurves = []tls.CurveID{ tls.X25519MLKEM768, tls.X25519, tls.CurveP256, } // SupportedProtocols is a map of supported protocols. var SupportedProtocols = map[string]uint16{ "tls1.2": tls.VersionTLS12, "tls1.3": tls.VersionTLS13, } // unsupportedProtocols is a map of unsupported protocols. // Used for logging only, not enforcement. var unsupportedProtocols = map[string]uint16{ //nolint:staticcheck "ssl3.0": tls.VersionSSL30, "tls1.0": tls.VersionTLS10, "tls1.1": tls.VersionTLS11, } // publicKeyAlgorithms is the map of supported public key algorithms. var publicKeyAlgorithms = map[string]x509.PublicKeyAlgorithm{ "rsa": x509.RSA, "dsa": x509.DSA, "ecdsa": x509.ECDSA, } // ProtocolName returns the standard name for the passed protocol version ID // (e.g. "TLS1.3") or a fallback representation of the ID value if the version // is not supported. func ProtocolName(id uint16) string { for k, v := range SupportedProtocols { if v == id { return k } } for k, v := range unsupportedProtocols { if v == id { return k } } return fmt.Sprintf("0x%04x", id) } ================================================ FILE: modules/caddytls/zerosslissuer.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddytls import ( "context" "crypto/x509" "fmt" "strconv" "time" "github.com/caddyserver/certmagic" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(new(ZeroSSLIssuer)) } // ZeroSSLIssuer uses the ZeroSSL API to get certificates. // Note that this is distinct from ZeroSSL's ACME endpoint. // To use ZeroSSL's ACME endpoint, use the ACMEIssuer // configured with ZeroSSL's ACME directory endpoint. type ZeroSSLIssuer struct { // The API key (or "access key") for using the ZeroSSL API. // REQUIRED. APIKey string `json:"api_key,omitempty"` //nolint:gosec // false positive... yes this is exported, for JSON interop // How many days the certificate should be valid for. // Only certain values are accepted; see ZeroSSL docs. ValidityDays int `json:"validity_days,omitempty"` // The host to bind to when opening a listener for // verifying domain names (or IPs). ListenHost string `json:"listen_host,omitempty"` // If HTTP is forwarded from port 80, specify the // forwarded port here. AlternateHTTPPort int `json:"alternate_http_port,omitempty"` // Use CNAME validation instead of HTTP. ZeroSSL's // API uses CNAME records for DNS validation, similar // to how Let's Encrypt uses TXT records for the // DNS challenge. CNAMEValidation *DNSChallengeConfig `json:"cname_validation,omitempty"` logger *zap.Logger storage certmagic.Storage issuer *certmagic.ZeroSSLIssuer } // CaddyModule returns the Caddy module information. func (*ZeroSSLIssuer) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "tls.issuance.zerossl", New: func() caddy.Module { return new(ZeroSSLIssuer) }, } } // Provision sets up the issuer. func (iss *ZeroSSLIssuer) Provision(ctx caddy.Context) error { iss.logger = ctx.Logger() iss.storage = ctx.Storage() repl := caddy.NewReplacer() var dnsManager *certmagic.DNSManager if iss.CNAMEValidation != nil && len(iss.CNAMEValidation.ProviderRaw) > 0 { val, err := ctx.LoadModule(iss.CNAMEValidation, "ProviderRaw") if err != nil { return fmt.Errorf("loading DNS provider module: %v", err) } dnsManager = &certmagic.DNSManager{ DNSProvider: val.(certmagic.DNSProvider), TTL: time.Duration(iss.CNAMEValidation.TTL), PropagationDelay: time.Duration(iss.CNAMEValidation.PropagationDelay), PropagationTimeout: time.Duration(iss.CNAMEValidation.PropagationTimeout), Resolvers: iss.CNAMEValidation.Resolvers, OverrideDomain: iss.CNAMEValidation.OverrideDomain, Logger: iss.logger.Named("cname"), } } iss.issuer = &certmagic.ZeroSSLIssuer{ APIKey: repl.ReplaceAll(iss.APIKey, ""), ValidityDays: iss.ValidityDays, ListenHost: iss.ListenHost, AltHTTPPort: iss.AlternateHTTPPort, Storage: iss.storage, CNAMEValidation: dnsManager, Logger: iss.logger, } return nil } // Issue obtains a certificate for the given csr. func (iss *ZeroSSLIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) { return iss.issuer.Issue(ctx, csr) } // IssuerKey returns the unique issuer key for the configured CA endpoint. func (iss *ZeroSSLIssuer) IssuerKey() string { return iss.issuer.IssuerKey() } // Revoke revokes the given certificate. func (iss *ZeroSSLIssuer) Revoke(ctx context.Context, cert certmagic.CertificateResource, reason int) error { return iss.issuer.Revoke(ctx, cert, reason) } // UnmarshalCaddyfile deserializes Caddyfile tokens into iss. // // ... zerossl { // validity_days // alt_http_port // dns ... // propagation_delay // propagation_timeout // resolvers // dns_ttl // } func (iss *ZeroSSLIssuer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume issuer name // API key is required if !d.NextArg() { return d.ArgErr() } iss.APIKey = d.Val() if d.NextArg() { return d.ArgErr() } for nesting := d.Nesting(); d.NextBlock(nesting); { switch d.Val() { case "validity_days": if iss.ValidityDays != 0 { return d.Errf("validity days is already specified: %d", iss.ValidityDays) } days, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("invalid number of days %s: %v", d.Val(), err) } iss.ValidityDays = days case "alt_http_port": if !d.NextArg() { return d.ArgErr() } port, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("invalid port %s: %v", d.Val(), err) } iss.AlternateHTTPPort = port case "dns": if !d.NextArg() { return d.ArgErr() } provName := d.Val() if iss.CNAMEValidation == nil { iss.CNAMEValidation = new(DNSChallengeConfig) } unm, err := caddyfile.UnmarshalModule(d, "dns.providers."+provName) if err != nil { return err } iss.CNAMEValidation.ProviderRaw = caddyconfig.JSONModuleObject(unm, "name", provName, nil) case "propagation_delay": if !d.NextArg() { return d.ArgErr() } delayStr := d.Val() delay, err := caddy.ParseDuration(delayStr) if err != nil { return d.Errf("invalid propagation_delay duration %s: %v", delayStr, err) } if iss.CNAMEValidation == nil { iss.CNAMEValidation = new(DNSChallengeConfig) } iss.CNAMEValidation.PropagationDelay = caddy.Duration(delay) case "propagation_timeout": if !d.NextArg() { return d.ArgErr() } timeoutStr := d.Val() var timeout time.Duration if timeoutStr == "-1" { timeout = time.Duration(-1) } else { var err error timeout, err = caddy.ParseDuration(timeoutStr) if err != nil { return d.Errf("invalid propagation_timeout duration %s: %v", timeoutStr, err) } } if iss.CNAMEValidation == nil { iss.CNAMEValidation = new(DNSChallengeConfig) } iss.CNAMEValidation.PropagationTimeout = caddy.Duration(timeout) case "resolvers": if iss.CNAMEValidation == nil { iss.CNAMEValidation = new(DNSChallengeConfig) } iss.CNAMEValidation.Resolvers = d.RemainingArgs() if len(iss.CNAMEValidation.Resolvers) == 0 { return d.ArgErr() } case "dns_ttl": if !d.NextArg() { return d.ArgErr() } ttlStr := d.Val() ttl, err := caddy.ParseDuration(ttlStr) if err != nil { return d.Errf("invalid dns_ttl duration %s: %v", ttlStr, err) } if iss.CNAMEValidation == nil { iss.CNAMEValidation = new(DNSChallengeConfig) } iss.CNAMEValidation.TTL = caddy.Duration(ttl) default: return d.Errf("unrecognized zerossl issuer property: %s", d.Val()) } } return nil } // Interface guards var ( _ certmagic.Issuer = (*ZeroSSLIssuer)(nil) _ certmagic.Revoker = (*ZeroSSLIssuer)(nil) _ caddy.Provisioner = (*ZeroSSLIssuer)(nil) ) ================================================ FILE: modules/filestorage/filestorage.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package filestorage import ( "github.com/caddyserver/certmagic" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(FileStorage{}) } // FileStorage is a certmagic.Storage wrapper for certmagic.FileStorage. type FileStorage struct { // The base path to the folder used for storage. Root string `json:"root,omitempty"` } // CaddyModule returns the Caddy module information. func (FileStorage) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.storage.file_system", New: func() caddy.Module { return new(FileStorage) }, } } // CertMagicStorage converts s to a certmagic.Storage instance. func (s FileStorage) CertMagicStorage() (certmagic.Storage, error) { return &certmagic.FileStorage{Path: s.Root}, nil } // UnmarshalCaddyfile sets up the storage module from Caddyfile tokens. func (s *FileStorage) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { if !d.Next() { return d.Err("expected tokens") } if d.NextArg() { s.Root = d.Val() } if d.NextArg() { return d.ArgErr() } for d.NextBlock(0) { switch d.Val() { case "root": if !d.NextArg() { return d.ArgErr() } if s.Root != "" { return d.Err("root already set") } s.Root = d.Val() if d.NextArg() { return d.ArgErr() } default: return d.Errf("unrecognized parameter '%s'", d.Val()) } } if s.Root == "" { return d.Err("missing root path (to use default, omit storage config entirely)") } return nil } // Interface guards var ( _ caddy.StorageConverter = (*FileStorage)(nil) _ caddyfile.Unmarshaler = (*FileStorage)(nil) ) ================================================ FILE: modules/internal/network/networkproxy.go ================================================ package network import ( "errors" "net/http" "net/url" "strings" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(ProxyFromURL{}) caddy.RegisterModule(ProxyFromNone{}) } // The "url" proxy source uses the defined URL as the proxy type ProxyFromURL struct { URL string `json:"url"` ctx caddy.Context logger *zap.Logger } // CaddyModule implements Module. func (p ProxyFromURL) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.network_proxy.url", New: func() caddy.Module { return &ProxyFromURL{} }, } } func (p *ProxyFromURL) Provision(ctx caddy.Context) error { p.ctx = ctx p.logger = ctx.Logger() return nil } // Validate implements Validator. func (p ProxyFromURL) Validate() error { if _, err := url.Parse(p.URL); err != nil { return err } return nil } // ProxyFunc implements ProxyFuncProducer. func (p ProxyFromURL) ProxyFunc() func(*http.Request) (*url.URL, error) { if strings.Contains(p.URL, "{") && strings.Contains(p.URL, "}") { // courtesy of @ImpostorKeanu: https://github.com/caddyserver/caddy/pull/6397 return func(r *http.Request) (*url.URL, error) { // retrieve the replacer from context. repl, ok := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) if !ok { err := errors.New("failed to obtain replacer from request") p.logger.Error(err.Error()) return nil, err } // apply placeholders to the value // note: h.ForwardProxyURL should never be empty at this point s := repl.ReplaceAll(p.URL, "") if s == "" { p.logger.Error("network_proxy URL was empty after applying placeholders", zap.String("initial_value", p.URL), zap.String("final_value", s), zap.String("hint", "check for invalid placeholders")) return nil, errors.New("empty value for network_proxy URL") } // parse the url pUrl, err := url.Parse(s) if err != nil { p.logger.Warn("failed to derive transport proxy from network_proxy URL") pUrl = nil } else if pUrl.Host == "" || strings.Split("", pUrl.Host)[0] == ":" { // url.Parse does not return an error on these values: // // - http://:80 // - pUrl.Host == ":80" // - /some/path // - pUrl.Host == "" // // Super edge cases, but humans are human. err = errors.New("supplied network_proxy URL is missing a host value") pUrl = nil } else { p.logger.Debug("setting transport proxy url", zap.String("url", s)) } return pUrl, err } } return func(r *http.Request) (*url.URL, error) { return url.Parse(p.URL) } } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (p *ProxyFromURL) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() d.Next() p.URL = d.Val() return nil } // The "none" proxy source module disables the use of network proxy. type ProxyFromNone struct{} func (p ProxyFromNone) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.network_proxy.none", New: func() caddy.Module { return &ProxyFromNone{} }, } } // UnmarshalCaddyfile implements caddyfile.Unmarshaler. func (p ProxyFromNone) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { return nil } // ProxyFunc implements ProxyFuncProducer. func (p ProxyFromNone) ProxyFunc() func(*http.Request) (*url.URL, error) { return nil } var ( _ caddy.Module = ProxyFromURL{} _ caddy.Provisioner = (*ProxyFromURL)(nil) _ caddy.Validator = ProxyFromURL{} _ caddy.ProxyFuncProducer = ProxyFromURL{} _ caddyfile.Unmarshaler = (*ProxyFromURL)(nil) _ caddy.Module = ProxyFromNone{} _ caddy.ProxyFuncProducer = ProxyFromNone{} _ caddyfile.Unmarshaler = ProxyFromNone{} ) ================================================ FILE: modules/logging/appendencoder.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "encoding/json" "fmt" "os" "strings" "time" "go.uber.org/zap" "go.uber.org/zap/buffer" "go.uber.org/zap/zapcore" "golang.org/x/term" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(AppendEncoder{}) } // AppendEncoder can be used to add fields to all log entries // that pass through it. It is a wrapper around another // encoder, which it uses to actually encode the log entries. // It is most useful for adding information about the Caddy // instance that is producing the log entries, possibly via // an environment variable. type AppendEncoder struct { // The underlying encoder that actually encodes the // log entries. If not specified, defaults to "json", // unless the output is a terminal, in which case // it defaults to "console". WrappedRaw json.RawMessage `json:"wrap,omitempty" caddy:"namespace=caddy.logging.encoders inline_key=format"` // A map of field names to their values. The values // can be global placeholders (e.g. env vars), or constants. // Note that the encoder does not run as part of an HTTP // request context, so request placeholders are not available. Fields map[string]any `json:"fields,omitempty"` wrapped zapcore.Encoder repl *caddy.Replacer wrappedIsDefault bool ctx caddy.Context } // CaddyModule returns the Caddy module information. func (AppendEncoder) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.encoders.append", New: func() caddy.Module { return new(AppendEncoder) }, } } // Provision sets up the encoder. func (fe *AppendEncoder) Provision(ctx caddy.Context) error { fe.ctx = ctx fe.repl = caddy.NewReplacer() if fe.WrappedRaw == nil { // if wrap is not specified, default to JSON fe.wrapped = &JSONEncoder{} if p, ok := fe.wrapped.(caddy.Provisioner); ok { if err := p.Provision(ctx); err != nil { return fmt.Errorf("provisioning fallback encoder module: %v", err) } } fe.wrappedIsDefault = true } else { // set up wrapped encoder val, err := ctx.LoadModule(fe, "WrappedRaw") if err != nil { return fmt.Errorf("loading fallback encoder module: %v", err) } fe.wrapped = val.(zapcore.Encoder) } return nil } // ConfigureDefaultFormat will set the default format to "console" // if the writer is a terminal. If already configured, it passes // through the writer so a deeply nested encoder can configure // its own default format. func (fe *AppendEncoder) ConfigureDefaultFormat(wo caddy.WriterOpener) error { if !fe.wrappedIsDefault { if cfd, ok := fe.wrapped.(caddy.ConfiguresFormatterDefault); ok { return cfd.ConfigureDefaultFormat(wo) } return nil } if caddy.IsWriterStandardStream(wo) && term.IsTerminal(int(os.Stderr.Fd())) { fe.wrapped = &ConsoleEncoder{} if p, ok := fe.wrapped.(caddy.Provisioner); ok { if err := p.Provision(fe.ctx); err != nil { return fmt.Errorf("provisioning fallback encoder module: %v", err) } } } return nil } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. Syntax: // // append { // wrap // fields { // // } // // } func (fe *AppendEncoder) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume encoder name // parse a field parseField := func() error { if fe.Fields == nil { fe.Fields = make(map[string]any) } field := d.Val() if !d.NextArg() { return d.ArgErr() } fe.Fields[field] = d.ScalarVal() if d.NextArg() { return d.ArgErr() } return nil } for d.NextBlock(0) { switch d.Val() { case "wrap": if !d.NextArg() { return d.ArgErr() } moduleName := d.Val() moduleID := "caddy.logging.encoders." + moduleName unm, err := caddyfile.UnmarshalModule(d, moduleID) if err != nil { return err } enc, ok := unm.(zapcore.Encoder) if !ok { return d.Errf("module %s (%T) is not a zapcore.Encoder", moduleID, unm) } fe.WrappedRaw = caddyconfig.JSONModuleObject(enc, "format", moduleName, nil) case "fields": for nesting := d.Nesting(); d.NextBlock(nesting); { err := parseField() if err != nil { return err } } default: // if unknown, assume it's a field so that // the config can be flat err := parseField() if err != nil { return err } } } return nil } // AddArray is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddArray(key string, marshaler zapcore.ArrayMarshaler) error { return fe.wrapped.AddArray(key, marshaler) } // AddObject is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddObject(key string, marshaler zapcore.ObjectMarshaler) error { return fe.wrapped.AddObject(key, marshaler) } // AddBinary is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddBinary(key string, value []byte) { fe.wrapped.AddBinary(key, value) } // AddByteString is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddByteString(key string, value []byte) { fe.wrapped.AddByteString(key, value) } // AddBool is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddBool(key string, value bool) { fe.wrapped.AddBool(key, value) } // AddComplex128 is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddComplex128(key string, value complex128) { fe.wrapped.AddComplex128(key, value) } // AddComplex64 is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddComplex64(key string, value complex64) { fe.wrapped.AddComplex64(key, value) } // AddDuration is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddDuration(key string, value time.Duration) { fe.wrapped.AddDuration(key, value) } // AddFloat64 is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddFloat64(key string, value float64) { fe.wrapped.AddFloat64(key, value) } // AddFloat32 is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddFloat32(key string, value float32) { fe.wrapped.AddFloat32(key, value) } // AddInt is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddInt(key string, value int) { fe.wrapped.AddInt(key, value) } // AddInt64 is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddInt64(key string, value int64) { fe.wrapped.AddInt64(key, value) } // AddInt32 is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddInt32(key string, value int32) { fe.wrapped.AddInt32(key, value) } // AddInt16 is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddInt16(key string, value int16) { fe.wrapped.AddInt16(key, value) } // AddInt8 is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddInt8(key string, value int8) { fe.wrapped.AddInt8(key, value) } // AddString is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddString(key, value string) { fe.wrapped.AddString(key, value) } // AddTime is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddTime(key string, value time.Time) { fe.wrapped.AddTime(key, value) } // AddUint is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddUint(key string, value uint) { fe.wrapped.AddUint(key, value) } // AddUint64 is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddUint64(key string, value uint64) { fe.wrapped.AddUint64(key, value) } // AddUint32 is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddUint32(key string, value uint32) { fe.wrapped.AddUint32(key, value) } // AddUint16 is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddUint16(key string, value uint16) { fe.wrapped.AddUint16(key, value) } // AddUint8 is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddUint8(key string, value uint8) { fe.wrapped.AddUint8(key, value) } // AddUintptr is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddUintptr(key string, value uintptr) { fe.wrapped.AddUintptr(key, value) } // AddReflected is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) AddReflected(key string, value any) error { return fe.wrapped.AddReflected(key, value) } // OpenNamespace is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) OpenNamespace(key string) { fe.wrapped.OpenNamespace(key) } // Clone is part of the zapcore.ObjectEncoder interface. func (fe AppendEncoder) Clone() zapcore.Encoder { return AppendEncoder{ Fields: fe.Fields, wrapped: fe.wrapped.Clone(), repl: fe.repl, } } // EncodeEntry partially implements the zapcore.Encoder interface. func (fe AppendEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) { fe.wrapped = fe.wrapped.Clone() for _, field := range fields { field.AddTo(fe) } // append fields from config for key, value := range fe.Fields { // if the value is a string if str, ok := value.(string); ok { isPlaceholder := strings.HasPrefix(str, "{") && strings.HasSuffix(str, "}") && strings.Count(str, "{") == 1 if isPlaceholder { // and it looks like a placeholder, evaluate it replaced, _ := fe.repl.Get(strings.Trim(str, "{}")) zap.Any(key, replaced).AddTo(fe) } else { // just use the string as-is zap.String(key, str).AddTo(fe) } } else { // not a string, so use the value as any zap.Any(key, value).AddTo(fe) } } return fe.wrapped.EncodeEntry(ent, nil) } // Interface guards var ( _ zapcore.Encoder = (*AppendEncoder)(nil) _ caddyfile.Unmarshaler = (*AppendEncoder)(nil) _ caddy.ConfiguresFormatterDefault = (*AppendEncoder)(nil) ) ================================================ FILE: modules/logging/cores.go ================================================ package logging import ( "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(MockCore{}) } // MockCore is a no-op module, purely for testing type MockCore struct { zapcore.Core `json:"-"` } // CaddyModule returns the Caddy module information. func (MockCore) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.cores.mock", New: func() caddy.Module { return new(MockCore) }, } } func (lec *MockCore) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { return nil } // Interface guards var ( _ zapcore.Core = (*MockCore)(nil) _ caddy.Module = (*MockCore)(nil) _ caddyfile.Unmarshaler = (*MockCore)(nil) ) ================================================ FILE: modules/logging/encoders.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "time" "go.uber.org/zap" "go.uber.org/zap/buffer" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(ConsoleEncoder{}) caddy.RegisterModule(JSONEncoder{}) } // ConsoleEncoder encodes log entries that are mostly human-readable. type ConsoleEncoder struct { zapcore.Encoder `json:"-"` LogEncoderConfig } // CaddyModule returns the Caddy module information. func (ConsoleEncoder) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.encoders.console", New: func() caddy.Module { return new(ConsoleEncoder) }, } } // Provision sets up the encoder. func (ce *ConsoleEncoder) Provision(_ caddy.Context) error { if ce.LevelFormat == "" { ce.LevelFormat = "color" } if ce.TimeFormat == "" { ce.TimeFormat = "wall_milli" } ce.Encoder = zapcore.NewConsoleEncoder(ce.ZapcoreEncoderConfig()) return nil } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. Syntax: // // console { // // } // // See the godoc on the LogEncoderConfig type for the syntax of // subdirectives that are common to most/all encoders. func (ce *ConsoleEncoder) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume encoder name if d.NextArg() { return d.ArgErr() } err := ce.LogEncoderConfig.UnmarshalCaddyfile(d) if err != nil { return err } return nil } // JSONEncoder encodes entries as JSON. type JSONEncoder struct { zapcore.Encoder `json:"-"` LogEncoderConfig } // CaddyModule returns the Caddy module information. func (JSONEncoder) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.encoders.json", New: func() caddy.Module { return new(JSONEncoder) }, } } // Provision sets up the encoder. func (je *JSONEncoder) Provision(_ caddy.Context) error { je.Encoder = zapcore.NewJSONEncoder(je.ZapcoreEncoderConfig()) return nil } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. Syntax: // // json { // // } // // See the godoc on the LogEncoderConfig type for the syntax of // subdirectives that are common to most/all encoders. func (je *JSONEncoder) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume encoder name if d.NextArg() { return d.ArgErr() } err := je.LogEncoderConfig.UnmarshalCaddyfile(d) if err != nil { return err } return nil } // LogEncoderConfig holds configuration common to most encoders. type LogEncoderConfig struct { MessageKey *string `json:"message_key,omitempty"` LevelKey *string `json:"level_key,omitempty"` TimeKey *string `json:"time_key,omitempty"` NameKey *string `json:"name_key,omitempty"` CallerKey *string `json:"caller_key,omitempty"` StacktraceKey *string `json:"stacktrace_key,omitempty"` LineEnding *string `json:"line_ending,omitempty"` // Recognized values are: unix_seconds_float, unix_milli_float, unix_nano, iso8601, rfc3339, rfc3339_nano, wall, wall_milli, wall_nano, common_log. // The value may also be custom format per the Go `time` package layout specification, as described [here](https://pkg.go.dev/time#pkg-constants). TimeFormat string `json:"time_format,omitempty"` TimeLocal bool `json:"time_local,omitempty"` // Recognized values are: s/second/seconds, ns/nano/nanos, ms/milli/millis, string. // Empty and unrecognized value default to seconds. DurationFormat string `json:"duration_format,omitempty"` // Recognized values are: lower, upper, color. // Empty and unrecognized value default to lower. LevelFormat string `json:"level_format,omitempty"` } // UnmarshalCaddyfile populates the struct from Caddyfile tokens. Syntax: // // { // message_key // level_key // time_key // name_key // caller_key // stacktrace_key // line_ending // time_format // time_local // duration_format // level_format // } func (lec *LogEncoderConfig) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { for d.NextBlock(0) { subdir := d.Val() switch subdir { case "time_local": lec.TimeLocal = true if d.NextArg() { return d.ArgErr() } continue } var arg string if !d.AllArgs(&arg) { return d.ArgErr() } switch subdir { case "message_key": lec.MessageKey = &arg case "level_key": lec.LevelKey = &arg case "time_key": lec.TimeKey = &arg case "name_key": lec.NameKey = &arg case "caller_key": lec.CallerKey = &arg case "stacktrace_key": lec.StacktraceKey = &arg case "line_ending": lec.LineEnding = &arg case "time_format": lec.TimeFormat = arg case "duration_format": lec.DurationFormat = arg case "level_format": lec.LevelFormat = arg default: return d.Errf("unrecognized subdirective %s", subdir) } } return nil } // ZapcoreEncoderConfig returns the equivalent zapcore.EncoderConfig. // If lec is nil, zap.NewProductionEncoderConfig() is returned. func (lec *LogEncoderConfig) ZapcoreEncoderConfig() zapcore.EncoderConfig { cfg := zap.NewProductionEncoderConfig() if lec == nil { lec = new(LogEncoderConfig) } if lec.MessageKey != nil { cfg.MessageKey = *lec.MessageKey } if lec.LevelKey != nil { cfg.LevelKey = *lec.LevelKey } if lec.TimeKey != nil { cfg.TimeKey = *lec.TimeKey } if lec.NameKey != nil { cfg.NameKey = *lec.NameKey } if lec.CallerKey != nil { cfg.CallerKey = *lec.CallerKey } if lec.StacktraceKey != nil { cfg.StacktraceKey = *lec.StacktraceKey } if lec.LineEnding != nil { cfg.LineEnding = *lec.LineEnding } // time format var timeFormatter zapcore.TimeEncoder switch lec.TimeFormat { case "", "unix_seconds_float": timeFormatter = zapcore.EpochTimeEncoder case "unix_milli_float": timeFormatter = zapcore.EpochMillisTimeEncoder case "unix_nano": timeFormatter = zapcore.EpochNanosTimeEncoder case "iso8601": timeFormatter = zapcore.ISO8601TimeEncoder default: timeFormat := lec.TimeFormat switch lec.TimeFormat { case "rfc3339": timeFormat = time.RFC3339 case "rfc3339_nano": timeFormat = time.RFC3339Nano case "wall": timeFormat = "2006/01/02 15:04:05" case "wall_milli": timeFormat = "2006/01/02 15:04:05.000" case "wall_nano": timeFormat = "2006/01/02 15:04:05.000000000" case "common_log": timeFormat = "02/Jan/2006:15:04:05 -0700" } timeFormatter = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) { var time time.Time if lec.TimeLocal { time = ts.Local() } else { time = ts.UTC() } encoder.AppendString(time.Format(timeFormat)) } } cfg.EncodeTime = timeFormatter // duration format var durFormatter zapcore.DurationEncoder switch lec.DurationFormat { case "s", "second", "seconds": durFormatter = zapcore.SecondsDurationEncoder case "ns", "nano", "nanos": durFormatter = zapcore.NanosDurationEncoder case "ms", "milli", "millis": durFormatter = zapcore.MillisDurationEncoder case "string": durFormatter = zapcore.StringDurationEncoder default: durFormatter = zapcore.SecondsDurationEncoder } cfg.EncodeDuration = durFormatter // level format var levelFormatter zapcore.LevelEncoder switch lec.LevelFormat { case "", "lower": levelFormatter = zapcore.LowercaseLevelEncoder case "upper": levelFormatter = zapcore.CapitalLevelEncoder case "color": levelFormatter = zapcore.CapitalColorLevelEncoder } cfg.EncodeLevel = levelFormatter return cfg } var bufferpool = buffer.NewPool() // Interface guards var ( _ zapcore.Encoder = (*ConsoleEncoder)(nil) _ zapcore.Encoder = (*JSONEncoder)(nil) _ caddyfile.Unmarshaler = (*ConsoleEncoder)(nil) _ caddyfile.Unmarshaler = (*JSONEncoder)(nil) ) ================================================ FILE: modules/logging/filewriter.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "encoding/json" "fmt" "io" "math" "os" "path/filepath" "strconv" "strings" "time" "github.com/DeRuina/timberjack" "github.com/dustin/go-humanize" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(FileWriter{}) } // fileMode is a string made of 1 to 4 octal digits representing // a numeric mode as specified with the `chmod` unix command. // `"0777"` and `"777"` are thus equivalent values. type fileMode os.FileMode // UnmarshalJSON satisfies json.Unmarshaler. func (m *fileMode) UnmarshalJSON(b []byte) error { if len(b) == 0 { return io.EOF } var s string if err := json.Unmarshal(b, &s); err != nil { return err } mode, err := parseFileMode(s) if err != nil { return err } *m = fileMode(mode) return err } // MarshalJSON satisfies json.Marshaler. func (m *fileMode) MarshalJSON() ([]byte, error) { return fmt.Appendf(nil, "\"%04o\"", *m), nil } // parseFileMode parses a file mode string, // adding support for `chmod` unix command like // 1 to 4 digital octal values. func parseFileMode(s string) (os.FileMode, error) { modeStr := fmt.Sprintf("%04s", s) mode, err := strconv.ParseUint(modeStr, 8, 32) if err != nil { return 0, err } return os.FileMode(mode), nil } // FileWriter can write logs to files. By default, log files // are rotated ("rolled") when they get large, and old log // files get deleted, to ensure that the process does not // exhaust disk space. type FileWriter struct { // Filename is the name of the file to write. Filename string `json:"filename,omitempty"` // The file permissions mode. // 0600 by default. Mode fileMode `json:"mode,omitempty"` // DirMode controls permissions for any directories created to reach Filename. // Default: 0700 (current behavior). // // Special values: // - "inherit" → copy the nearest existing parent directory's perms (with r→x normalization) // - "from_file" → derive from the file Mode (with r→x), e.g. 0644 → 0755, 0600 → 0700 // Numeric octal strings (e.g. "0755") are also accepted. Subject to process umask. DirMode string `json:"dir_mode,omitempty"` // Roll toggles log rolling or rotation, which is // enabled by default. Roll *bool `json:"roll,omitempty"` // When a log file reaches approximately this size, // it will be rotated. RollSizeMB int `json:"roll_size_mb,omitempty"` // Roll log file after some time RollInterval time.Duration `json:"roll_interval,omitempty"` // Roll log file at fix minutes // For example []int{0, 30} will roll file at xx:00 and xx:30 each hour // Invalid value are ignored with a warning on stderr // See https://github.com/DeRuina/timberjack#%EF%B8%8F-rotation-notes--warnings for caveats RollAtMinutes []int `json:"roll_minutes,omitempty"` // Roll log file at fix time // For example []string{"00:00", "12:00"} will roll file at 00:00 and 12:00 each day // Invalid value are ignored with a warning on stderr // See https://github.com/DeRuina/timberjack#%EF%B8%8F-rotation-notes--warnings for caveats RollAt []string `json:"roll_at,omitempty"` // Whether to compress rolled files. // Default: true. // Deprecated: Use RollCompression instead, setting it to "none". RollCompress *bool `json:"roll_gzip,omitempty"` // RollCompression selects the compression algorithm for rolled files. // Accepted values: "none", "gzip", "zstd". // Default: gzip RollCompression string `json:"roll_compression,omitempty"` // Whether to use local timestamps in rolled filenames. // Default: false RollLocalTime bool `json:"roll_local_time,omitempty"` // The maximum number of rolled log files to keep. // Default: 10 RollKeep int `json:"roll_keep,omitempty"` // How many days to keep rolled log files. Default: 90 RollKeepDays int `json:"roll_keep_days,omitempty"` // Rotated file will have format --.log // Optional. If unset or invalid, defaults to 2006-01-02T15-04-05.000 (with fallback warning) // must be a Go time compatible format, see https://pkg.go.dev/time#pkg-constants BackupTimeFormat string `json:"backup_time_format,omitempty"` } // CaddyModule returns the Caddy module information. func (FileWriter) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.writers.file", New: func() caddy.Module { return new(FileWriter) }, } } // Provision sets up the module func (fw *FileWriter) Provision(ctx caddy.Context) error { // Replace placeholder in filename repl := caddy.NewReplacer() filename, err := repl.ReplaceOrErr(fw.Filename, true, true) if err != nil { return fmt.Errorf("invalid filename for log file: %v", err) } fw.Filename = filename return nil } func (fw FileWriter) String() string { fpath, err := caddy.FastAbs(fw.Filename) if err == nil { return fpath } return fw.Filename } // WriterKey returns a unique key representing this fw. func (fw FileWriter) WriterKey() string { return "file:" + fw.Filename } // OpenWriter opens a new file writer. func (fw FileWriter) OpenWriter() (io.WriteCloser, error) { modeIfCreating := os.FileMode(fw.Mode) if modeIfCreating == 0 { modeIfCreating = 0o600 } // roll log files as a sensible default to avoid disk space exhaustion roll := fw.Roll == nil || *fw.Roll // Ensure directory exists before opening the file. dirPath := filepath.Dir(fw.Filename) switch strings.ToLower(strings.TrimSpace(fw.DirMode)) { case "", "0": // Preserve current behavior: locked-down directories by default. if err := os.MkdirAll(dirPath, 0o700); err != nil { return nil, err } case "inherit": if err := mkdirAllInherit(dirPath); err != nil { return nil, err } case "from_file": if err := mkdirAllFromFile(dirPath, os.FileMode(fw.Mode)); err != nil { return nil, err } default: dm, err := parseFileMode(fw.DirMode) if err != nil { return nil, fmt.Errorf("dir_mode: %w", err) } if err := os.MkdirAll(dirPath, dm); err != nil { return nil, err } } // create/open the file file, err := os.OpenFile(fw.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, modeIfCreating) if err != nil { return nil, err } info, err := file.Stat() if roll { file.Close() // timberjack will reopen it on its own } // Ensure already existing files have the right mode, since OpenFile will not set the mode in such case. if configuredMode := os.FileMode(fw.Mode); configuredMode != 0 { if err != nil { return nil, fmt.Errorf("unable to stat log file to see if we need to set permissions: %v", err) } // only chmod if the configured mode is different if info.Mode()&os.ModePerm != configuredMode&os.ModePerm { if err = os.Chmod(fw.Filename, configuredMode); err != nil { return nil, err } } } // if not rolling, then the plain file handle is all we need if !roll { return file, nil } // otherwise, return a rolling log if fw.RollSizeMB == 0 { fw.RollSizeMB = 100 } if fw.RollCompress == nil { compress := true fw.RollCompress = &compress } if fw.RollKeep == 0 { fw.RollKeep = 10 } if fw.RollKeepDays == 0 { fw.RollKeepDays = 90 } // Determine compression algorithm to use. Priority: // 1) explicit RollCompression (none|gzip|zstd) // 2) if RollCompress is unset or true -> gzip // 3) if RollCompress is false -> none var compression string if fw.RollCompression != "" { compression = strings.ToLower(strings.TrimSpace(fw.RollCompression)) if compression != "none" && compression != "gzip" && compression != "zstd" { return nil, fmt.Errorf("invalid roll_compression: %s", fw.RollCompression) } } else { if fw.RollCompress == nil || *fw.RollCompress { compression = "gzip" } else { compression = "none" } } return &timberjack.Logger{ Filename: fw.Filename, MaxSize: fw.RollSizeMB, MaxAge: fw.RollKeepDays, MaxBackups: fw.RollKeep, LocalTime: fw.RollLocalTime, Compression: compression, RotationInterval: fw.RollInterval, RotateAtMinutes: fw.RollAtMinutes, RotateAt: fw.RollAt, BackupTimeFormat: fw.BackupTimeFormat, FileMode: os.FileMode(fw.Mode), }, nil } // normalizeDirPerm ensures that read bits also have execute bits set. func normalizeDirPerm(p os.FileMode) os.FileMode { if p&0o400 != 0 { p |= 0o100 } if p&0o040 != 0 { p |= 0o010 } if p&0o004 != 0 { p |= 0o001 } return p } // mkdirAllInherit creates missing dirs using the nearest existing parent's // permissions, normalized with r→x. func mkdirAllInherit(dir string) error { if fi, err := os.Stat(dir); err == nil && fi.IsDir() { return nil } cur := dir var parent string for { next := filepath.Dir(cur) if next == cur { parent = next break } if fi, err := os.Stat(next); err == nil { if !fi.IsDir() { return fmt.Errorf("path component %s exists and is not a directory", next) } parent = next break } cur = next } perm := os.FileMode(0o700) if fi, err := os.Stat(parent); err == nil && fi.IsDir() { perm = fi.Mode().Perm() } perm = normalizeDirPerm(perm) return os.MkdirAll(dir, perm) } // mkdirAllFromFile creates missing dirs using the file's mode (with r→x) so // 0644 → 0755, 0600 → 0700, etc. func mkdirAllFromFile(dir string, fileMode os.FileMode) error { if fi, err := os.Stat(dir); err == nil && fi.IsDir() { return nil } perm := normalizeDirPerm(fileMode.Perm()) | 0o200 // ensure owner write on dir so files can be created return os.MkdirAll(dir, perm) } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. Syntax: // // file { // mode // dir_mode // roll_disabled // roll_size // roll_uncompressed // roll_compression // roll_local_time // roll_keep // roll_keep_for // } // // The roll_size value has megabyte resolution. // Fractional values are rounded up to the next whole megabyte (MiB). // // By default, compression is enabled, but can be turned off by setting // the roll_uncompressed option. // // The roll_keep_for duration has day resolution. // Fractional values are rounded up to the next whole number of days. // // If any of the mode, roll_size, roll_keep, or roll_keep_for subdirectives are // omitted or set to a zero value, then Caddy's default value for that // subdirective is used. func (fw *FileWriter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume writer name if !d.NextArg() { return d.ArgErr() } fw.Filename = d.Val() if d.NextArg() { return d.ArgErr() } for d.NextBlock(0) { switch d.Val() { case "mode": var modeStr string if !d.AllArgs(&modeStr) { return d.ArgErr() } mode, err := parseFileMode(modeStr) if err != nil { return d.Errf("parsing mode: %v", err) } fw.Mode = fileMode(mode) case "dir_mode": var val string if !d.AllArgs(&val) { return d.ArgErr() } val = strings.TrimSpace(val) switch strings.ToLower(val) { case "inherit", "from_file": fw.DirMode = val default: if _, err := parseFileMode(val); err != nil { return d.Errf("parsing dir_mode: %v", err) } fw.DirMode = val } case "roll_disabled": var f bool fw.Roll = &f if d.NextArg() { return d.ArgErr() } case "roll_size": var sizeStr string if !d.AllArgs(&sizeStr) { return d.ArgErr() } size, err := humanize.ParseBytes(sizeStr) if err != nil { return d.Errf("parsing size: %v", err) } fw.RollSizeMB = int(math.Ceil(float64(size) / humanize.MiByte)) case "roll_uncompressed": var f bool fw.RollCompress = &f if d.NextArg() { return d.ArgErr() } case "roll_compression": var comp string if !d.AllArgs(&comp) { return d.ArgErr() } comp = strings.ToLower(strings.TrimSpace(comp)) switch comp { case "none", "gzip", "zstd": fw.RollCompression = comp default: return d.Errf("parsing roll_compression: must be 'none', 'gzip' or 'zstd'") } case "roll_local_time": fw.RollLocalTime = true if d.NextArg() { return d.ArgErr() } case "roll_keep": var keepStr string if !d.AllArgs(&keepStr) { return d.ArgErr() } keep, err := strconv.Atoi(keepStr) if err != nil { return d.Errf("parsing roll_keep number: %v", err) } fw.RollKeep = keep case "roll_keep_for": var keepForStr string if !d.AllArgs(&keepForStr) { return d.ArgErr() } keepFor, err := caddy.ParseDuration(keepForStr) if err != nil { return d.Errf("parsing roll_keep_for duration: %v", err) } if keepFor < 0 { return d.Errf("negative roll_keep_for duration: %v", keepFor) } fw.RollKeepDays = int(math.Ceil(keepFor.Hours() / 24)) case "roll_interval": var durationStr string if !d.AllArgs(&durationStr) { return d.ArgErr() } duration, err := time.ParseDuration(durationStr) if err != nil { return d.Errf("parsing roll_interval duration: %v", err) } fw.RollInterval = duration case "roll_minutes": // Accept either a single comma-separated argument or // multiple space-separated arguments. Collect all // remaining args on the line and split on commas. args := d.RemainingArgs() if len(args) == 0 { return d.ArgErr() } var minutes []int for _, arg := range args { parts := strings.SplitSeq(arg, ",") for p := range parts { ms := strings.TrimSpace(p) if ms == "" { return d.Errf("parsing roll_minutes: empty value") } m, err := strconv.Atoi(ms) if err != nil { return d.Errf("parsing roll_minutes number: %v", err) } minutes = append(minutes, m) } } fw.RollAtMinutes = minutes case "roll_at": // Accept either a single comma-separated argument or // multiple space-separated arguments. Collect all // remaining args on the line and split on commas. args := d.RemainingArgs() if len(args) == 0 { return d.ArgErr() } var times []string for _, arg := range args { parts := strings.SplitSeq(arg, ",") for p := range parts { ts := strings.TrimSpace(p) if ts == "" { return d.Errf("parsing roll_at: empty value") } times = append(times, ts) } } fw.RollAt = times case "backup_time_format": var format string if !d.AllArgs(&format) { return d.ArgErr() } fw.BackupTimeFormat = format default: return d.Errf("unrecognized subdirective '%s'", d.Val()) } } return nil } // Interface guards var ( _ caddy.Provisioner = (*FileWriter)(nil) _ caddy.WriterOpener = (*FileWriter)(nil) _ caddyfile.Unmarshaler = (*FileWriter)(nil) ) ================================================ FILE: modules/logging/filewriter_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !windows package logging import ( "encoding/json" "os" "path" "path/filepath" "syscall" "testing" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func TestFileCreationMode(t *testing.T) { on := true off := false tests := []struct { name string fw FileWriter wantMode os.FileMode }{ { name: "default mode no roll", fw: FileWriter{ Roll: &off, }, wantMode: 0o600, }, { name: "default mode roll", fw: FileWriter{ Roll: &on, }, wantMode: 0o600, }, { name: "custom mode no roll", fw: FileWriter{ Roll: &off, Mode: 0o666, }, wantMode: 0o666, }, { name: "custom mode roll", fw: FileWriter{ Roll: &on, Mode: 0o666, }, wantMode: 0o666, }, } m := syscall.Umask(0o000) defer syscall.Umask(m) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { dir, err := os.MkdirTemp("", "caddytest") if err != nil { t.Fatalf("failed to create tempdir: %v", err) } defer os.RemoveAll(dir) fpath := filepath.Join(dir, "test.log") tt.fw.Filename = fpath logger, err := tt.fw.OpenWriter() if err != nil { t.Fatalf("failed to create file: %v", err) } defer logger.Close() st, err := os.Stat(fpath) if err != nil { t.Fatalf("failed to check file permissions: %v", err) } if st.Mode() != tt.wantMode { t.Errorf("%s: file mode is %v, want %v", tt.name, st.Mode(), tt.wantMode) } }) } } func TestFileRotationPreserveMode(t *testing.T) { m := syscall.Umask(0o000) defer syscall.Umask(m) dir, err := os.MkdirTemp("", "caddytest") if err != nil { t.Fatalf("failed to create tempdir: %v", err) } defer os.RemoveAll(dir) fpath := path.Join(dir, "test.log") roll := true mode := fileMode(0o640) fw := FileWriter{ Filename: fpath, Mode: mode, Roll: &roll, RollSizeMB: 1, } logger, err := fw.OpenWriter() if err != nil { t.Fatalf("failed to create file: %v", err) } defer logger.Close() b := make([]byte, 1024*1024-1000) logger.Write(b) logger.Write(b[0:2000]) files, err := os.ReadDir(dir) if err != nil { t.Fatalf("failed to read temporary log dir: %v", err) } // We might get 2 or 3 files depending // on the race between compressed log file generation, // removal of the non compressed file and reading the directory. // Ordering of the files are [ test-*.log test-*.log.gz test.log ] if len(files) < 2 || len(files) > 3 { t.Log("got files: ", files) t.Fatalf("got %v files want 2", len(files)) } wantPattern := "test-*-*-*-*-*.*.log" test_date_log := files[0] if m, _ := path.Match(wantPattern, test_date_log.Name()); m != true { t.Fatalf("got %v filename want %v", test_date_log.Name(), wantPattern) } st, err := os.Stat(path.Join(dir, test_date_log.Name())) if err != nil { t.Fatalf("failed to check file permissions: %v", err) } if st.Mode() != os.FileMode(mode) { t.Errorf("file mode is %v, want %v", st.Mode(), mode) } test_dot_log := files[len(files)-1] if test_dot_log.Name() != "test.log" { t.Fatalf("got %v filename want test.log", test_dot_log.Name()) } st, err = os.Stat(path.Join(dir, test_dot_log.Name())) if err != nil { t.Fatalf("failed to check file permissions: %v", err) } if st.Mode() != os.FileMode(mode) { t.Errorf("file mode is %v, want %v", st.Mode(), mode) } } func TestFileModeConfig(t *testing.T) { tests := []struct { name string d *caddyfile.Dispenser fw FileWriter wantErr bool }{ { name: "set mode", d: caddyfile.NewTestDispenser(` file test.log { mode 0666 } `), fw: FileWriter{ Mode: 0o666, }, wantErr: false, }, { name: "set mode 3 digits", d: caddyfile.NewTestDispenser(` file test.log { mode 666 } `), fw: FileWriter{ Mode: 0o666, }, wantErr: false, }, { name: "set mode 2 digits", d: caddyfile.NewTestDispenser(` file test.log { mode 66 } `), fw: FileWriter{ Mode: 0o066, }, wantErr: false, }, { name: "set mode 1 digits", d: caddyfile.NewTestDispenser(` file test.log { mode 6 } `), fw: FileWriter{ Mode: 0o006, }, wantErr: false, }, { name: "invalid mode", d: caddyfile.NewTestDispenser(` file test.log { mode foobar } `), fw: FileWriter{}, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { fw := &FileWriter{} if err := fw.UnmarshalCaddyfile(tt.d); (err != nil) != tt.wantErr { t.Fatalf("UnmarshalCaddyfile() error = %v, want %v", err, tt.wantErr) } if fw.Mode != tt.fw.Mode { t.Errorf("got mode %v, want %v", fw.Mode, tt.fw.Mode) } }) } } func TestFileModeJSON(t *testing.T) { tests := []struct { name string config string fw FileWriter wantErr bool }{ { name: "set mode", config: ` { "mode": "0666" } `, fw: FileWriter{ Mode: 0o666, }, wantErr: false, }, { name: "set mode invalid value", config: ` { "mode": "0x666" } `, fw: FileWriter{}, wantErr: true, }, { name: "set mode invalid string", config: ` { "mode": 777 } `, fw: FileWriter{}, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { fw := &FileWriter{} if err := json.Unmarshal([]byte(tt.config), fw); (err != nil) != tt.wantErr { t.Fatalf("UnmarshalJSON() error = %v, want %v", err, tt.wantErr) } if fw.Mode != tt.fw.Mode { t.Errorf("got mode %v, want %v", fw.Mode, tt.fw.Mode) } }) } } func TestFileModeToJSON(t *testing.T) { tests := []struct { name string mode fileMode want string wantErr bool }{ { name: "none zero", mode: 0o644, want: `"0644"`, wantErr: false, }, { name: "zero mode", mode: 0, want: `"0000"`, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var b []byte var err error if b, err = json.Marshal(&tt.mode); (err != nil) != tt.wantErr { t.Fatalf("MarshalJSON() error = %v, want %v", err, tt.wantErr) } got := string(b[:]) if got != tt.want { t.Errorf("got mode %v, want %v", got, tt.want) } }) } } func TestFileModeModification(t *testing.T) { m := syscall.Umask(0o000) defer syscall.Umask(m) dir, err := os.MkdirTemp("", "caddytest") if err != nil { t.Fatalf("failed to create tempdir: %v", err) } defer os.RemoveAll(dir) fpath := path.Join(dir, "test.log") f_tmp, err := os.OpenFile(fpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(0o600)) if err != nil { t.Fatalf("failed to create test file: %v", err) } f_tmp.Close() fw := FileWriter{ Mode: 0o666, Filename: fpath, } logger, err := fw.OpenWriter() if err != nil { t.Fatalf("failed to create file: %v", err) } defer logger.Close() st, err := os.Stat(fpath) if err != nil { t.Fatalf("failed to check file permissions: %v", err) } want := os.FileMode(fw.Mode) if st.Mode() != want { t.Errorf("file mode is %v, want %v", st.Mode(), want) } } func TestDirMode_Inherit(t *testing.T) { m := syscall.Umask(0) defer syscall.Umask(m) parent := t.TempDir() if err := os.Chmod(parent, 0o755); err != nil { t.Fatal(err) } targetDir := filepath.Join(parent, "a", "b") fw := &FileWriter{ Filename: filepath.Join(targetDir, "test.log"), DirMode: "inherit", Mode: 0o640, Roll: func() *bool { f := false; return &f }(), } w, err := fw.OpenWriter() if err != nil { t.Fatal(err) } _ = w.Close() st, err := os.Stat(targetDir) if err != nil { t.Fatal(err) } if got := st.Mode().Perm(); got != 0o755 { t.Fatalf("dir perm = %o, want 0755", got) } } func TestDirMode_FromFile(t *testing.T) { m := syscall.Umask(0) defer syscall.Umask(m) base := t.TempDir() dir1 := filepath.Join(base, "logs1") fw1 := &FileWriter{ Filename: filepath.Join(dir1, "app.log"), DirMode: "from_file", Mode: 0o644, // => dir 0755 Roll: func() *bool { f := false; return &f }(), } w1, err := fw1.OpenWriter() if err != nil { t.Fatal(err) } _ = w1.Close() st1, err := os.Stat(dir1) if err != nil { t.Fatal(err) } if got := st1.Mode().Perm(); got != 0o755 { t.Fatalf("dir perm = %o, want 0755", got) } dir2 := filepath.Join(base, "logs2") fw2 := &FileWriter{ Filename: filepath.Join(dir2, "app.log"), DirMode: "from_file", Mode: 0o600, // => dir 0700 Roll: func() *bool { f := false; return &f }(), } w2, err := fw2.OpenWriter() if err != nil { t.Fatal(err) } _ = w2.Close() st2, err := os.Stat(dir2) if err != nil { t.Fatal(err) } if got := st2.Mode().Perm(); got != 0o700 { t.Fatalf("dir perm = %o, want 0700", got) } } func TestDirMode_ExplicitOctal(t *testing.T) { m := syscall.Umask(0) defer syscall.Umask(m) base := t.TempDir() dest := filepath.Join(base, "logs3") fw := &FileWriter{ Filename: filepath.Join(dest, "app.log"), DirMode: "0750", Mode: 0o640, Roll: func() *bool { f := false; return &f }(), } w, err := fw.OpenWriter() if err != nil { t.Fatal(err) } _ = w.Close() st, err := os.Stat(dest) if err != nil { t.Fatal(err) } if got := st.Mode().Perm(); got != 0o750 { t.Fatalf("dir perm = %o, want 0750", got) } } func TestDirMode_Default0700(t *testing.T) { m := syscall.Umask(0) defer syscall.Umask(m) base := t.TempDir() dest := filepath.Join(base, "logs4") fw := &FileWriter{ Filename: filepath.Join(dest, "app.log"), Mode: 0o640, Roll: func() *bool { f := false; return &f }(), } w, err := fw.OpenWriter() if err != nil { t.Fatal(err) } _ = w.Close() st, err := os.Stat(dest) if err != nil { t.Fatal(err) } if got := st.Mode().Perm(); got != 0o700 { t.Fatalf("dir perm = %o, want 0700", got) } } func TestDirMode_UmaskInteraction(t *testing.T) { _ = syscall.Umask(0o022) // typical umask; restore after defer syscall.Umask(0) base := t.TempDir() dest := filepath.Join(base, "logs5") fw := &FileWriter{ Filename: filepath.Join(dest, "app.log"), DirMode: "0755", Mode: 0o644, Roll: func() *bool { f := false; return &f }(), } w, err := fw.OpenWriter() if err != nil { t.Fatal(err) } _ = w.Close() st, err := os.Stat(dest) if err != nil { t.Fatal(err) } // 0755 &^ 0022 still 0755 for dirs; this just sanity-checks we didn't get stricter unexpectedly if got := st.Mode().Perm(); got != 0o755 { t.Fatalf("dir perm = %o, want 0755 (considering umask)", got) } } func TestCaddyfile_DirMode_Inherit(t *testing.T) { d := caddyfile.NewTestDispenser(` file /var/log/app.log { dir_mode inherit mode 0640 }`) var fw FileWriter if err := fw.UnmarshalCaddyfile(d); err != nil { t.Fatal(err) } if fw.DirMode != "inherit" { t.Fatalf("got %q", fw.DirMode) } if fw.Mode != 0o640 { t.Fatalf("mode = %o", fw.Mode) } } func TestCaddyfile_DirMode_FromFile(t *testing.T) { d := caddyfile.NewTestDispenser(` file /var/log/app.log { dir_mode from_file mode 0600 }`) var fw FileWriter if err := fw.UnmarshalCaddyfile(d); err != nil { t.Fatal(err) } if fw.DirMode != "from_file" { t.Fatalf("got %q", fw.DirMode) } if fw.Mode != 0o600 { t.Fatalf("mode = %o", fw.Mode) } } func TestCaddyfile_DirMode_Octal(t *testing.T) { d := caddyfile.NewTestDispenser(` file /var/log/app.log { dir_mode 0755 }`) var fw FileWriter if err := fw.UnmarshalCaddyfile(d); err != nil { t.Fatal(err) } if fw.DirMode != "0755" { t.Fatalf("got %q", fw.DirMode) } } func TestCaddyfile_DirMode_Invalid(t *testing.T) { d := caddyfile.NewTestDispenser(` file /var/log/app.log { dir_mode nope }`) var fw FileWriter if err := fw.UnmarshalCaddyfile(d); err == nil { t.Fatal("expected error for invalid dir_mode") } } ================================================ FILE: modules/logging/filewriter_test_windows.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build windows package logging import ( "os" "path" "testing" ) // Windows relies on ACLs instead of unix permissions model. // Go allows to open files with a particular mode but it is limited to read or write. // See https://cs.opensource.google/go/go/+/refs/tags/go1.22.3:src/syscall/syscall_windows.go;l=708. // This is pretty restrictive and has little interest for log files and thus we just test that log files are // opened with R/W permissions by default on Windows too. func TestFileCreationMode(t *testing.T) { dir, err := os.MkdirTemp("", "caddytest") if err != nil { t.Fatalf("failed to create tempdir: %v", err) } defer os.RemoveAll(dir) fw := &FileWriter{ Filename: path.Join(dir, "test.log"), } logger, err := fw.OpenWriter() if err != nil { t.Fatalf("failed to create file: %v", err) } defer logger.Close() st, err := os.Stat(fw.Filename) if err != nil { t.Fatalf("failed to check file permissions: %v", err) } if st.Mode().Perm()&0o600 != 0o600 { t.Fatalf("file mode is %v, want rw for user", st.Mode().Perm()) } } func TestDirMode_Windows_CreateSucceeds(t *testing.T) { dir, err := os.MkdirTemp("", "caddytest") if err != nil { t.Fatalf("failed to create tempdir: %v", err) } defer os.RemoveAll(dir) tests := []struct { name string dirMode string }{ {"inherit", "inherit"}, {"from_file", "from_file"}, {"octal", "0755"}, {"default", ""}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { subdir := path.Join(dir, "logs-"+tt.name) fw := &FileWriter{ Filename: path.Join(subdir, "test.log"), DirMode: tt.dirMode, Mode: 0o600, } w, err := fw.OpenWriter() if err != nil { t.Fatalf("failed to open writer: %v", err) } defer w.Close() if _, err := os.Stat(fw.Filename); err != nil { t.Fatalf("expected file to exist: %v", err) } }) } } ================================================ FILE: modules/logging/filterencoder.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "encoding/json" "fmt" "os" "time" "go.uber.org/zap" "go.uber.org/zap/buffer" "go.uber.org/zap/zapcore" "golang.org/x/term" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(FilterEncoder{}) } // FilterEncoder can filter (manipulate) fields on // log entries before they are actually encoded by // an underlying encoder. type FilterEncoder struct { // The underlying encoder that actually encodes the // log entries. If not specified, defaults to "json", // unless the output is a terminal, in which case // it defaults to "console". WrappedRaw json.RawMessage `json:"wrap,omitempty" caddy:"namespace=caddy.logging.encoders inline_key=format"` // A map of field names to their filters. Note that this // is not a module map; the keys are field names. // // Nested fields can be referenced by representing a // layer of nesting with `>`. In other words, for an // object like `{"a":{"b":0}}`, the inner field can // be referenced as `a>b`. // // The following fields are fundamental to the log and // cannot be filtered because they are added by the // underlying logging library as special cases: ts, // level, logger, and msg. FieldsRaw map[string]json.RawMessage `json:"fields,omitempty" caddy:"namespace=caddy.logging.encoders.filter inline_key=filter"` wrapped zapcore.Encoder Fields map[string]LogFieldFilter `json:"-"` // used to keep keys unique across nested objects keyPrefix string wrappedIsDefault bool ctx caddy.Context } // CaddyModule returns the Caddy module information. func (FilterEncoder) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.encoders.filter", New: func() caddy.Module { return new(FilterEncoder) }, } } // Provision sets up the encoder. func (fe *FilterEncoder) Provision(ctx caddy.Context) error { fe.ctx = ctx if fe.WrappedRaw == nil { // if wrap is not specified, default to JSON fe.wrapped = &JSONEncoder{} if p, ok := fe.wrapped.(caddy.Provisioner); ok { if err := p.Provision(ctx); err != nil { return fmt.Errorf("provisioning fallback encoder module: %v", err) } } fe.wrappedIsDefault = true } else { // set up wrapped encoder val, err := ctx.LoadModule(fe, "WrappedRaw") if err != nil { return fmt.Errorf("loading fallback encoder module: %v", err) } fe.wrapped = val.(zapcore.Encoder) } // set up each field filter if fe.Fields == nil { fe.Fields = make(map[string]LogFieldFilter) } vals, err := ctx.LoadModule(fe, "FieldsRaw") if err != nil { return fmt.Errorf("loading log filter modules: %v", err) } for fieldName, modIface := range vals.(map[string]any) { fe.Fields[fieldName] = modIface.(LogFieldFilter) } return nil } // ConfigureDefaultFormat will set the default format to "console" // if the writer is a terminal. If already configured as a filter // encoder, it passes through the writer so a deeply nested filter // encoder can configure its own default format. func (fe *FilterEncoder) ConfigureDefaultFormat(wo caddy.WriterOpener) error { if !fe.wrappedIsDefault { if cfd, ok := fe.wrapped.(caddy.ConfiguresFormatterDefault); ok { return cfd.ConfigureDefaultFormat(wo) } return nil } if caddy.IsWriterStandardStream(wo) && term.IsTerminal(int(os.Stderr.Fd())) { fe.wrapped = &ConsoleEncoder{} if p, ok := fe.wrapped.(caddy.Provisioner); ok { if err := p.Provision(fe.ctx); err != nil { return fmt.Errorf("provisioning fallback encoder module: %v", err) } } } return nil } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. Syntax: // // filter { // wrap // fields { // { // // } // } // { // // } // } func (fe *FilterEncoder) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume encoder name // Track regexp filters for automatic merging regexpFilters := make(map[string][]*RegexpFilter) // parse a field parseField := func() error { if fe.FieldsRaw == nil { fe.FieldsRaw = make(map[string]json.RawMessage) } field := d.Val() if !d.NextArg() { return d.ArgErr() } filterName := d.Val() moduleID := "caddy.logging.encoders.filter." + filterName unm, err := caddyfile.UnmarshalModule(d, moduleID) if err != nil { return err } filter, ok := unm.(LogFieldFilter) if !ok { return d.Errf("module %s (%T) is not a logging.LogFieldFilter", moduleID, unm) } // Special handling for regexp filters to support multiple instances if regexpFilter, isRegexp := filter.(*RegexpFilter); isRegexp { regexpFilters[field] = append(regexpFilters[field], regexpFilter) return nil // Don't set FieldsRaw yet, we'll merge them later } // Check if we're trying to add a non-regexp filter to a field that already has regexp filters if _, hasRegexpFilters := regexpFilters[field]; hasRegexpFilters { return d.Errf("cannot mix regexp filters with other filter types for field %s", field) } // Check if field already has a filter and it's not regexp-related if _, exists := fe.FieldsRaw[field]; exists { return d.Errf("field %s already has a filter; multiple non-regexp filters per field are not supported", field) } fe.FieldsRaw[field] = caddyconfig.JSONModuleObject(filter, "filter", filterName, nil) return nil } for d.NextBlock(0) { switch d.Val() { case "wrap": if !d.NextArg() { return d.ArgErr() } moduleName := d.Val() moduleID := "caddy.logging.encoders." + moduleName unm, err := caddyfile.UnmarshalModule(d, moduleID) if err != nil { return err } enc, ok := unm.(zapcore.Encoder) if !ok { return d.Errf("module %s (%T) is not a zapcore.Encoder", moduleID, unm) } fe.WrappedRaw = caddyconfig.JSONModuleObject(enc, "format", moduleName, nil) case "fields": for nesting := d.Nesting(); d.NextBlock(nesting); { err := parseField() if err != nil { return err } } default: // if unknown, assume it's a field so that // the config can be flat err := parseField() if err != nil { return err } } } // After parsing all fields, merge multiple regexp filters into MultiRegexpFilter for field, filters := range regexpFilters { if len(filters) == 1 { // Single regexp filter, use the original RegexpFilter fe.FieldsRaw[field] = caddyconfig.JSONModuleObject(filters[0], "filter", "regexp", nil) } else { // Multiple regexp filters, merge into MultiRegexpFilter multiFilter := &MultiRegexpFilter{} for _, regexpFilter := range filters { err := multiFilter.AddOperation(regexpFilter.RawRegexp, regexpFilter.Value) if err != nil { return fmt.Errorf("adding regexp operation for field %s: %v", field, err) } } fe.FieldsRaw[field] = caddyconfig.JSONModuleObject(multiFilter, "filter", "multi_regexp", nil) } } return nil } // AddArray is part of the zapcore.ObjectEncoder interface. // Array elements do not get filtered. func (fe FilterEncoder) AddArray(key string, marshaler zapcore.ArrayMarshaler) error { if filter, ok := fe.Fields[fe.keyPrefix+key]; ok { filter.Filter(zap.Array(key, marshaler)).AddTo(fe.wrapped) return nil } return fe.wrapped.AddArray(key, marshaler) } // AddObject is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddObject(key string, marshaler zapcore.ObjectMarshaler) error { if fe.filtered(key, marshaler) { return nil } fe.keyPrefix += key + ">" return fe.wrapped.AddObject(key, logObjectMarshalerWrapper{ enc: fe, marsh: marshaler, }) } // AddBinary is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddBinary(key string, value []byte) { if !fe.filtered(key, value) { fe.wrapped.AddBinary(key, value) } } // AddByteString is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddByteString(key string, value []byte) { if !fe.filtered(key, value) { fe.wrapped.AddByteString(key, value) } } // AddBool is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddBool(key string, value bool) { if !fe.filtered(key, value) { fe.wrapped.AddBool(key, value) } } // AddComplex128 is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddComplex128(key string, value complex128) { if !fe.filtered(key, value) { fe.wrapped.AddComplex128(key, value) } } // AddComplex64 is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddComplex64(key string, value complex64) { if !fe.filtered(key, value) { fe.wrapped.AddComplex64(key, value) } } // AddDuration is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddDuration(key string, value time.Duration) { if !fe.filtered(key, value) { fe.wrapped.AddDuration(key, value) } } // AddFloat64 is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddFloat64(key string, value float64) { if !fe.filtered(key, value) { fe.wrapped.AddFloat64(key, value) } } // AddFloat32 is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddFloat32(key string, value float32) { if !fe.filtered(key, value) { fe.wrapped.AddFloat32(key, value) } } // AddInt is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddInt(key string, value int) { if !fe.filtered(key, value) { fe.wrapped.AddInt(key, value) } } // AddInt64 is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddInt64(key string, value int64) { if !fe.filtered(key, value) { fe.wrapped.AddInt64(key, value) } } // AddInt32 is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddInt32(key string, value int32) { if !fe.filtered(key, value) { fe.wrapped.AddInt32(key, value) } } // AddInt16 is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddInt16(key string, value int16) { if !fe.filtered(key, value) { fe.wrapped.AddInt16(key, value) } } // AddInt8 is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddInt8(key string, value int8) { if !fe.filtered(key, value) { fe.wrapped.AddInt8(key, value) } } // AddString is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddString(key, value string) { if !fe.filtered(key, value) { fe.wrapped.AddString(key, value) } } // AddTime is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddTime(key string, value time.Time) { if !fe.filtered(key, value) { fe.wrapped.AddTime(key, value) } } // AddUint is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddUint(key string, value uint) { if !fe.filtered(key, value) { fe.wrapped.AddUint(key, value) } } // AddUint64 is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddUint64(key string, value uint64) { if !fe.filtered(key, value) { fe.wrapped.AddUint64(key, value) } } // AddUint32 is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddUint32(key string, value uint32) { if !fe.filtered(key, value) { fe.wrapped.AddUint32(key, value) } } // AddUint16 is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddUint16(key string, value uint16) { if !fe.filtered(key, value) { fe.wrapped.AddUint16(key, value) } } // AddUint8 is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddUint8(key string, value uint8) { if !fe.filtered(key, value) { fe.wrapped.AddUint8(key, value) } } // AddUintptr is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddUintptr(key string, value uintptr) { if !fe.filtered(key, value) { fe.wrapped.AddUintptr(key, value) } } // AddReflected is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) AddReflected(key string, value any) error { if !fe.filtered(key, value) { return fe.wrapped.AddReflected(key, value) } return nil } // OpenNamespace is part of the zapcore.ObjectEncoder interface. func (fe FilterEncoder) OpenNamespace(key string) { fe.wrapped.OpenNamespace(key) } // Clone is part of the zapcore.ObjectEncoder interface. // We don't use it as of Oct 2019 (v2 beta 7), I'm not // really sure what it'd be useful for in our case. func (fe FilterEncoder) Clone() zapcore.Encoder { return FilterEncoder{ Fields: fe.Fields, wrapped: fe.wrapped.Clone(), keyPrefix: fe.keyPrefix, } } // EncodeEntry partially implements the zapcore.Encoder interface. func (fe FilterEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) { // without this clone and storing it to fe.wrapped, fields // from subsequent log entries get appended to previous // ones, and I'm not 100% sure why; see end of // https://github.com/uber-go/zap/issues/750 fe.wrapped = fe.wrapped.Clone() for _, field := range fields { field.AddTo(fe) } return fe.wrapped.EncodeEntry(ent, nil) } // filtered returns true if the field was filtered. // If true is returned, the field was filtered and // added to the underlying encoder (so do not do // that again). If false was returned, the field has // not yet been added to the underlying encoder. func (fe FilterEncoder) filtered(key string, value any) bool { filter, ok := fe.Fields[fe.keyPrefix+key] if !ok { return false } filter.Filter(zap.Any(key, value)).AddTo(fe.wrapped) return true } // logObjectMarshalerWrapper allows us to recursively // filter fields of objects as they get encoded. type logObjectMarshalerWrapper struct { enc FilterEncoder marsh zapcore.ObjectMarshaler } // MarshalLogObject implements the zapcore.ObjectMarshaler interface. func (mom logObjectMarshalerWrapper) MarshalLogObject(_ zapcore.ObjectEncoder) error { return mom.marsh.MarshalLogObject(mom.enc) } // Interface guards var ( _ zapcore.Encoder = (*FilterEncoder)(nil) _ zapcore.ObjectMarshaler = (*logObjectMarshalerWrapper)(nil) _ caddyfile.Unmarshaler = (*FilterEncoder)(nil) _ caddy.ConfiguresFormatterDefault = (*FilterEncoder)(nil) ) ================================================ FILE: modules/logging/filters.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "crypto/sha256" "errors" "fmt" "net" "net/http" "net/url" "regexp" "strconv" "strings" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(DeleteFilter{}) caddy.RegisterModule(HashFilter{}) caddy.RegisterModule(ReplaceFilter{}) caddy.RegisterModule(IPMaskFilter{}) caddy.RegisterModule(QueryFilter{}) caddy.RegisterModule(CookieFilter{}) caddy.RegisterModule(RegexpFilter{}) caddy.RegisterModule(RenameFilter{}) caddy.RegisterModule(MultiRegexpFilter{}) } // LogFieldFilter can filter (or manipulate) // a field in a log entry. type LogFieldFilter interface { Filter(zapcore.Field) zapcore.Field } // DeleteFilter is a Caddy log field filter that // deletes the field. type DeleteFilter struct{} // CaddyModule returns the Caddy module information. func (DeleteFilter) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.encoders.filter.delete", New: func() caddy.Module { return new(DeleteFilter) }, } } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (DeleteFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { return nil } // Filter filters the input field. func (DeleteFilter) Filter(in zapcore.Field) zapcore.Field { in.Type = zapcore.SkipType return in } // hash returns the first 4 bytes of the SHA-256 hash of the given data as hexadecimal func hash(s string) string { return fmt.Sprintf("%.4x", sha256.Sum256([]byte(s))) } // HashFilter is a Caddy log field filter that // replaces the field with the initial 4 bytes // of the SHA-256 hash of the content. Operates // on string fields, or on arrays of strings // where each string is hashed. type HashFilter struct{} // CaddyModule returns the Caddy module information. func (HashFilter) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.encoders.filter.hash", New: func() caddy.Module { return new(HashFilter) }, } } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (f *HashFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { return nil } // Filter filters the input field with the replacement value. func (f *HashFilter) Filter(in zapcore.Field) zapcore.Field { if array, ok := in.Interface.(caddyhttp.LoggableStringArray); ok { newArray := make(caddyhttp.LoggableStringArray, len(array)) for i, s := range array { newArray[i] = hash(s) } in.Interface = newArray } else { in.String = hash(in.String) } return in } // ReplaceFilter is a Caddy log field filter that // replaces the field with the indicated string. type ReplaceFilter struct { Value string `json:"value,omitempty"` } // CaddyModule returns the Caddy module information. func (ReplaceFilter) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.encoders.filter.replace", New: func() caddy.Module { return new(ReplaceFilter) }, } } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (f *ReplaceFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume filter name if d.NextArg() { f.Value = d.Val() } return nil } // Filter filters the input field with the replacement value. func (f *ReplaceFilter) Filter(in zapcore.Field) zapcore.Field { in.Type = zapcore.StringType in.String = f.Value return in } // IPMaskFilter is a Caddy log field filter that // masks IP addresses in a string, or in an array // of strings. The string may be a comma separated // list of IP addresses, where all of the values // will be masked. type IPMaskFilter struct { // The IPv4 mask, as an subnet size CIDR. IPv4MaskRaw int `json:"ipv4_cidr,omitempty"` // The IPv6 mask, as an subnet size CIDR. IPv6MaskRaw int `json:"ipv6_cidr,omitempty"` v4Mask net.IPMask v6Mask net.IPMask } // CaddyModule returns the Caddy module information. func (IPMaskFilter) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.encoders.filter.ip_mask", New: func() caddy.Module { return new(IPMaskFilter) }, } } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (m *IPMaskFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume filter name args := d.RemainingArgs() if len(args) > 2 { return d.Errf("too many arguments") } if len(args) > 0 { val, err := strconv.Atoi(args[0]) if err != nil { return d.Errf("error parsing %s: %v", args[0], err) } m.IPv4MaskRaw = val if len(args) > 1 { val, err := strconv.Atoi(args[1]) if err != nil { return d.Errf("error parsing %s: %v", args[1], err) } m.IPv6MaskRaw = val } } for d.NextBlock(0) { switch d.Val() { case "ipv4": if !d.NextArg() { return d.ArgErr() } val, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("error parsing %s: %v", d.Val(), err) } m.IPv4MaskRaw = val case "ipv6": if !d.NextArg() { return d.ArgErr() } val, err := strconv.Atoi(d.Val()) if err != nil { return d.Errf("error parsing %s: %v", d.Val(), err) } m.IPv6MaskRaw = val default: return d.Errf("unrecognized subdirective %s", d.Val()) } } return nil } // Provision parses m's IP masks, from integers. func (m *IPMaskFilter) Provision(ctx caddy.Context) error { parseRawToMask := func(rawField int, bitLen int) net.IPMask { if rawField == 0 { return nil } // we assume the int is a subnet size CIDR // e.g. "16" being equivalent to masking the last // two bytes of an ipv4 address, like "255.255.0.0" return net.CIDRMask(rawField, bitLen) } m.v4Mask = parseRawToMask(m.IPv4MaskRaw, 32) m.v6Mask = parseRawToMask(m.IPv6MaskRaw, 128) return nil } // Filter filters the input field. func (m IPMaskFilter) Filter(in zapcore.Field) zapcore.Field { if array, ok := in.Interface.(caddyhttp.LoggableStringArray); ok { newArray := make(caddyhttp.LoggableStringArray, len(array)) for i, s := range array { newArray[i] = m.mask(s) } in.Interface = newArray } else { in.String = m.mask(in.String) } return in } func (m IPMaskFilter) mask(s string) string { parts := make([]string, 0) for value := range strings.SplitSeq(s, ",") { value = strings.TrimSpace(value) host, port, err := net.SplitHostPort(value) if err != nil { host = value // assume whole thing was IP address } ipAddr := net.ParseIP(host) if ipAddr == nil { parts = append(parts, value) continue } mask := m.v4Mask if ipAddr.To4() == nil { mask = m.v6Mask } masked := ipAddr.Mask(mask) if port == "" { parts = append(parts, masked.String()) continue } parts = append(parts, net.JoinHostPort(masked.String(), port)) } return strings.Join(parts, ", ") } type filterAction string const ( // Replace value(s). replaceAction filterAction = "replace" // Hash value(s). hashAction filterAction = "hash" // Delete. deleteAction filterAction = "delete" ) func (a filterAction) IsValid() error { switch a { case replaceAction, deleteAction, hashAction: return nil } return errors.New("invalid action type") } type queryFilterAction struct { // `replace` to replace the value(s) associated with the parameter(s), `hash` to replace them with the 4 initial bytes of the SHA-256 of their content or `delete` to remove them entirely. Type filterAction `json:"type"` // The name of the query parameter. Parameter string `json:"parameter"` // The value to use as replacement if the action is `replace`. Value string `json:"value,omitempty"` } // QueryFilter is a Caddy log field filter that filters // query parameters from a URL. // // This filter updates the logged URL string to remove, replace or hash // query parameters containing sensitive data. For instance, it can be // used to redact any kind of secrets which were passed as query parameters, // such as OAuth access tokens, session IDs, magic link tokens, etc. type QueryFilter struct { // A list of actions to apply to the query parameters of the URL. Actions []queryFilterAction `json:"actions"` } // Validate checks that action types are correct. func (f *QueryFilter) Validate() error { for _, a := range f.Actions { if err := a.Type.IsValid(); err != nil { return err } } return nil } // CaddyModule returns the Caddy module information. func (QueryFilter) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.encoders.filter.query", New: func() caddy.Module { return new(QueryFilter) }, } } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (m *QueryFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume filter name for d.NextBlock(0) { qfa := queryFilterAction{} switch d.Val() { case "replace": if !d.NextArg() { return d.ArgErr() } qfa.Type = replaceAction qfa.Parameter = d.Val() if !d.NextArg() { return d.ArgErr() } qfa.Value = d.Val() case "hash": if !d.NextArg() { return d.ArgErr() } qfa.Type = hashAction qfa.Parameter = d.Val() case "delete": if !d.NextArg() { return d.ArgErr() } qfa.Type = deleteAction qfa.Parameter = d.Val() default: return d.Errf("unrecognized subdirective %s", d.Val()) } m.Actions = append(m.Actions, qfa) } return nil } // Filter filters the input field. func (m QueryFilter) Filter(in zapcore.Field) zapcore.Field { if array, ok := in.Interface.(caddyhttp.LoggableStringArray); ok { newArray := make(caddyhttp.LoggableStringArray, len(array)) for i, s := range array { newArray[i] = m.processQueryString(s) } in.Interface = newArray } else { in.String = m.processQueryString(in.String) } return in } func (m QueryFilter) processQueryString(s string) string { u, err := url.Parse(s) if err != nil { return s } q := u.Query() for _, a := range m.Actions { switch a.Type { case replaceAction: for i := range q[a.Parameter] { q[a.Parameter][i] = a.Value } case hashAction: for i := range q[a.Parameter] { q[a.Parameter][i] = hash(a.Value) } case deleteAction: q.Del(a.Parameter) } } u.RawQuery = q.Encode() return u.String() } type cookieFilterAction struct { // `replace` to replace the value of the cookie, `hash` to replace it with the 4 initial bytes of the SHA-256 of its content or `delete` to remove it entirely. Type filterAction `json:"type"` // The name of the cookie. Name string `json:"name"` // The value to use as replacement if the action is `replace`. Value string `json:"value,omitempty"` } // CookieFilter is a Caddy log field filter that filters // cookies. // // This filter updates the logged HTTP header string // to remove, replace or hash cookies containing sensitive data. For instance, // it can be used to redact any kind of secrets, such as session IDs. // // If several actions are configured for the same cookie name, only the first // will be applied. type CookieFilter struct { // A list of actions to apply to the cookies. Actions []cookieFilterAction `json:"actions"` } // Validate checks that action types are correct. func (f *CookieFilter) Validate() error { for _, a := range f.Actions { if err := a.Type.IsValid(); err != nil { return err } } return nil } // CaddyModule returns the Caddy module information. func (CookieFilter) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.encoders.filter.cookie", New: func() caddy.Module { return new(CookieFilter) }, } } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (m *CookieFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume filter name for d.NextBlock(0) { cfa := cookieFilterAction{} switch d.Val() { case "replace": if !d.NextArg() { return d.ArgErr() } cfa.Type = replaceAction cfa.Name = d.Val() if !d.NextArg() { return d.ArgErr() } cfa.Value = d.Val() case "hash": if !d.NextArg() { return d.ArgErr() } cfa.Type = hashAction cfa.Name = d.Val() case "delete": if !d.NextArg() { return d.ArgErr() } cfa.Type = deleteAction cfa.Name = d.Val() default: return d.Errf("unrecognized subdirective %s", d.Val()) } m.Actions = append(m.Actions, cfa) } return nil } // Filter filters the input field. func (m CookieFilter) Filter(in zapcore.Field) zapcore.Field { cookiesSlice, ok := in.Interface.(caddyhttp.LoggableStringArray) if !ok { return in } // using a dummy Request to make use of the Cookies() function to parse it originRequest := http.Request{Header: http.Header{"Cookie": cookiesSlice}} cookies := originRequest.Cookies() transformedRequest := http.Request{Header: make(http.Header)} OUTER: for _, c := range cookies { for _, a := range m.Actions { if c.Name != a.Name { continue } switch a.Type { case replaceAction: c.Value = a.Value transformedRequest.AddCookie(c) continue OUTER case hashAction: c.Value = hash(c.Value) transformedRequest.AddCookie(c) continue OUTER case deleteAction: continue OUTER } } transformedRequest.AddCookie(c) } in.Interface = caddyhttp.LoggableStringArray(transformedRequest.Header["Cookie"]) return in } // RegexpFilter is a Caddy log field filter that // replaces the field matching the provided regexp // with the indicated string. If the field is an // array of strings, each of them will have the // regexp replacement applied. type RegexpFilter struct { // The regular expression pattern defining what to replace. RawRegexp string `json:"regexp,omitempty"` // The value to use as replacement Value string `json:"value,omitempty"` regexp *regexp.Regexp } // CaddyModule returns the Caddy module information. func (RegexpFilter) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.encoders.filter.regexp", New: func() caddy.Module { return new(RegexpFilter) }, } } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (f *RegexpFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume filter name if d.NextArg() { f.RawRegexp = d.Val() } if d.NextArg() { f.Value = d.Val() } return nil } // Provision compiles m's regexp. func (m *RegexpFilter) Provision(ctx caddy.Context) error { r, err := regexp.Compile(m.RawRegexp) if err != nil { return err } m.regexp = r return nil } // Filter filters the input field with the replacement value if it matches the regexp. func (f *RegexpFilter) Filter(in zapcore.Field) zapcore.Field { if array, ok := in.Interface.(caddyhttp.LoggableStringArray); ok { newArray := make(caddyhttp.LoggableStringArray, len(array)) for i, s := range array { newArray[i] = f.regexp.ReplaceAllString(s, f.Value) } in.Interface = newArray } else { in.String = f.regexp.ReplaceAllString(in.String, f.Value) } return in } // regexpFilterOperation represents a single regexp operation // within a MultiRegexpFilter. type regexpFilterOperation struct { // The regular expression pattern defining what to replace. RawRegexp string `json:"regexp,omitempty"` // The value to use as replacement Value string `json:"value,omitempty"` regexp *regexp.Regexp } // MultiRegexpFilter is a Caddy log field filter that // can apply multiple regular expression replacements to // the same field. This filter processes operations in the // order they are defined, applying each regexp replacement // sequentially to the result of the previous operation. // // This allows users to define multiple regexp filters for // the same field without them overwriting each other. // // Security considerations: // - Uses Go's regexp package (RE2 engine) which is safe from ReDoS attacks // - Validates all patterns during provisioning // - Limits the maximum number of operations to prevent resource exhaustion // - Sanitizes input to prevent injection attacks type MultiRegexpFilter struct { // A list of regexp operations to apply in sequence. // Maximum of 50 operations allowed for security and performance. Operations []regexpFilterOperation `json:"operations"` } // Security constants const ( maxRegexpOperations = 50 // Maximum operations to prevent resource exhaustion maxPatternLength = 1000 // Maximum pattern length to prevent abuse ) // CaddyModule returns the Caddy module information. func (MultiRegexpFilter) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.encoders.filter.multi_regexp", New: func() caddy.Module { return new(MultiRegexpFilter) }, } } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. // Syntax: // // multi_regexp { // regexp // regexp // ... // } func (f *MultiRegexpFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume filter name for d.NextBlock(0) { switch d.Val() { case "regexp": // Security check: limit number of operations if len(f.Operations) >= maxRegexpOperations { return d.Errf("too many regexp operations (maximum %d allowed)", maxRegexpOperations) } op := regexpFilterOperation{} if !d.NextArg() { return d.ArgErr() } op.RawRegexp = d.Val() // Security validation: check pattern length if len(op.RawRegexp) > maxPatternLength { return d.Errf("regexp pattern too long (maximum %d characters)", maxPatternLength) } // Security validation: basic pattern validation if op.RawRegexp == "" { return d.Errf("regexp pattern cannot be empty") } if !d.NextArg() { return d.ArgErr() } op.Value = d.Val() f.Operations = append(f.Operations, op) default: return d.Errf("unrecognized subdirective %s", d.Val()) } } // Security check: ensure at least one operation is defined if len(f.Operations) == 0 { return d.Err("multi_regexp filter requires at least one regexp operation") } return nil } // Provision compiles all regexp patterns with security validation. func (f *MultiRegexpFilter) Provision(ctx caddy.Context) error { // Security check: validate operation count if len(f.Operations) > maxRegexpOperations { return fmt.Errorf("too many regexp operations: %d (maximum %d allowed)", len(f.Operations), maxRegexpOperations) } if len(f.Operations) == 0 { return fmt.Errorf("multi_regexp filter requires at least one operation") } for i := range f.Operations { // Security validation: pattern length check if len(f.Operations[i].RawRegexp) > maxPatternLength { return fmt.Errorf("regexp pattern %d too long: %d characters (maximum %d)", i, len(f.Operations[i].RawRegexp), maxPatternLength) } // Security validation: empty pattern check if f.Operations[i].RawRegexp == "" { return fmt.Errorf("regexp pattern %d cannot be empty", i) } // Compile and validate the pattern (uses RE2 engine - safe from ReDoS) r, err := regexp.Compile(f.Operations[i].RawRegexp) if err != nil { return fmt.Errorf("compiling regexp pattern %d (%s): %v", i, f.Operations[i].RawRegexp, err) } f.Operations[i].regexp = r } return nil } // Validate ensures the filter is properly configured with security checks. func (f *MultiRegexpFilter) Validate() error { if len(f.Operations) == 0 { return fmt.Errorf("multi_regexp filter requires at least one operation") } if len(f.Operations) > maxRegexpOperations { return fmt.Errorf("too many regexp operations: %d (maximum %d allowed)", len(f.Operations), maxRegexpOperations) } for i, op := range f.Operations { if op.RawRegexp == "" { return fmt.Errorf("regexp pattern %d cannot be empty", i) } if len(op.RawRegexp) > maxPatternLength { return fmt.Errorf("regexp pattern %d too long: %d characters (maximum %d)", i, len(op.RawRegexp), maxPatternLength) } if op.regexp == nil { return fmt.Errorf("regexp pattern %d not compiled (call Provision first)", i) } } return nil } // Filter applies all regexp operations sequentially to the input field. // Input is sanitized and validated for security. func (f *MultiRegexpFilter) Filter(in zapcore.Field) zapcore.Field { if array, ok := in.Interface.(caddyhttp.LoggableStringArray); ok { newArray := make(caddyhttp.LoggableStringArray, len(array)) for i, s := range array { newArray[i] = f.processString(s) } in.Interface = newArray } else { in.String = f.processString(in.String) } return in } // processString applies all regexp operations to a single string with input validation. func (f *MultiRegexpFilter) processString(s string) string { // Security: validate input string length to prevent resource exhaustion const maxInputLength = 1000000 // 1MB max input size if len(s) > maxInputLength { // Log warning but continue processing (truncated) s = s[:maxInputLength] } result := s for _, op := range f.Operations { // Each regexp operation is applied sequentially // Using RE2 engine which is safe from ReDoS attacks result = op.regexp.ReplaceAllString(result, op.Value) // Ensure result doesn't exceed max length after each operation if len(result) > maxInputLength { result = result[:maxInputLength] } } return result } // AddOperation adds a single regexp operation to the filter with validation. // This is used when merging multiple RegexpFilter instances. func (f *MultiRegexpFilter) AddOperation(rawRegexp, value string) error { // Security checks if len(f.Operations) >= maxRegexpOperations { return fmt.Errorf("cannot add operation: maximum %d operations allowed", maxRegexpOperations) } if rawRegexp == "" { return fmt.Errorf("regexp pattern cannot be empty") } if len(rawRegexp) > maxPatternLength { return fmt.Errorf("regexp pattern too long: %d characters (maximum %d)", len(rawRegexp), maxPatternLength) } f.Operations = append(f.Operations, regexpFilterOperation{ RawRegexp: rawRegexp, Value: value, }) return nil } // RenameFilter is a Caddy log field filter that // renames the field's key with the indicated name. type RenameFilter struct { Name string `json:"name,omitempty"` } // CaddyModule returns the Caddy module information. func (RenameFilter) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.encoders.filter.rename", New: func() caddy.Module { return new(RenameFilter) }, } } // UnmarshalCaddyfile sets up the module from Caddyfile tokens. func (f *RenameFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume filter name if d.NextArg() { f.Name = d.Val() } return nil } // Filter renames the input field with the replacement name. func (f *RenameFilter) Filter(in zapcore.Field) zapcore.Field { in.Key = f.Name return in } // Interface guards var ( _ LogFieldFilter = (*DeleteFilter)(nil) _ LogFieldFilter = (*HashFilter)(nil) _ LogFieldFilter = (*ReplaceFilter)(nil) _ LogFieldFilter = (*IPMaskFilter)(nil) _ LogFieldFilter = (*QueryFilter)(nil) _ LogFieldFilter = (*CookieFilter)(nil) _ LogFieldFilter = (*RegexpFilter)(nil) _ LogFieldFilter = (*RenameFilter)(nil) _ LogFieldFilter = (*MultiRegexpFilter)(nil) _ caddyfile.Unmarshaler = (*DeleteFilter)(nil) _ caddyfile.Unmarshaler = (*HashFilter)(nil) _ caddyfile.Unmarshaler = (*ReplaceFilter)(nil) _ caddyfile.Unmarshaler = (*IPMaskFilter)(nil) _ caddyfile.Unmarshaler = (*QueryFilter)(nil) _ caddyfile.Unmarshaler = (*CookieFilter)(nil) _ caddyfile.Unmarshaler = (*RegexpFilter)(nil) _ caddyfile.Unmarshaler = (*RenameFilter)(nil) _ caddyfile.Unmarshaler = (*MultiRegexpFilter)(nil) _ caddy.Provisioner = (*IPMaskFilter)(nil) _ caddy.Provisioner = (*RegexpFilter)(nil) _ caddy.Provisioner = (*MultiRegexpFilter)(nil) _ caddy.Validator = (*QueryFilter)(nil) _ caddy.Validator = (*MultiRegexpFilter)(nil) ) ================================================ FILE: modules/logging/filters_test.go ================================================ package logging import ( "fmt" "strings" "testing" "go.uber.org/zap/zapcore" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func TestIPMaskSingleValue(t *testing.T) { f := IPMaskFilter{IPv4MaskRaw: 16, IPv6MaskRaw: 32} f.Provision(caddy.Context{}) out := f.Filter(zapcore.Field{String: "255.255.255.255"}) if out.String != "255.255.0.0" { t.Fatalf("field has not been filtered: %s", out.String) } out = f.Filter(zapcore.Field{String: "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"}) if out.String != "ffff:ffff::" { t.Fatalf("field has not been filtered: %s", out.String) } out = f.Filter(zapcore.Field{String: "not-an-ip"}) if out.String != "not-an-ip" { t.Fatalf("field has been filtered: %s", out.String) } } func TestIPMaskCommaValue(t *testing.T) { f := IPMaskFilter{IPv4MaskRaw: 16, IPv6MaskRaw: 32} f.Provision(caddy.Context{}) out := f.Filter(zapcore.Field{String: "255.255.255.255, 244.244.244.244"}) if out.String != "255.255.0.0, 244.244.0.0" { t.Fatalf("field has not been filtered: %s", out.String) } out = f.Filter(zapcore.Field{String: "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff, ff00:ffff:ffff:ffff:ffff:ffff:ffff:ffff"}) if out.String != "ffff:ffff::, ff00:ffff::" { t.Fatalf("field has not been filtered: %s", out.String) } out = f.Filter(zapcore.Field{String: "not-an-ip, 255.255.255.255"}) if out.String != "not-an-ip, 255.255.0.0" { t.Fatalf("field has not been filtered: %s", out.String) } } func TestIPMaskMultiValue(t *testing.T) { f := IPMaskFilter{IPv4MaskRaw: 16, IPv6MaskRaw: 32} f.Provision(caddy.Context{}) out := f.Filter(zapcore.Field{Interface: caddyhttp.LoggableStringArray{ "255.255.255.255", "244.244.244.244", }}) arr, ok := out.Interface.(caddyhttp.LoggableStringArray) if !ok { t.Fatalf("field is wrong type: %T", out.Integer) } if arr[0] != "255.255.0.0" { t.Fatalf("field entry 0 has not been filtered: %s", arr[0]) } if arr[1] != "244.244.0.0" { t.Fatalf("field entry 1 has not been filtered: %s", arr[1]) } out = f.Filter(zapcore.Field{Interface: caddyhttp.LoggableStringArray{ "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "ff00:ffff:ffff:ffff:ffff:ffff:ffff:ffff", }}) arr, ok = out.Interface.(caddyhttp.LoggableStringArray) if !ok { t.Fatalf("field is wrong type: %T", out.Integer) } if arr[0] != "ffff:ffff::" { t.Fatalf("field entry 0 has not been filtered: %s", arr[0]) } if arr[1] != "ff00:ffff::" { t.Fatalf("field entry 1 has not been filtered: %s", arr[1]) } } func TestQueryFilterSingleValue(t *testing.T) { f := QueryFilter{[]queryFilterAction{ {replaceAction, "foo", "REDACTED"}, {replaceAction, "notexist", "REDACTED"}, {deleteAction, "bar", ""}, {deleteAction, "notexist", ""}, {hashAction, "hash", ""}, }} if f.Validate() != nil { t.Fatalf("the filter must be valid") } out := f.Filter(zapcore.Field{String: "/path?foo=a&foo=b&bar=c&bar=d&baz=e&hash=hashed"}) if out.String != "/path?baz=e&foo=REDACTED&foo=REDACTED&hash=e3b0c442" { t.Fatalf("query parameters have not been filtered: %s", out.String) } } func TestQueryFilterMultiValue(t *testing.T) { f := QueryFilter{ Actions: []queryFilterAction{ {Type: replaceAction, Parameter: "foo", Value: "REDACTED"}, {Type: replaceAction, Parameter: "notexist", Value: "REDACTED"}, {Type: deleteAction, Parameter: "bar"}, {Type: deleteAction, Parameter: "notexist"}, {Type: hashAction, Parameter: "hash"}, }, } if f.Validate() != nil { t.Fatalf("the filter must be valid") } out := f.Filter(zapcore.Field{Interface: caddyhttp.LoggableStringArray{ "/path1?foo=a&foo=b&bar=c&bar=d&baz=e&hash=hashed", "/path2?foo=c&foo=d&bar=e&bar=f&baz=g&hash=hashed", }}) arr, ok := out.Interface.(caddyhttp.LoggableStringArray) if !ok { t.Fatalf("field is wrong type: %T", out.Interface) } expected1 := "/path1?baz=e&foo=REDACTED&foo=REDACTED&hash=e3b0c442" expected2 := "/path2?baz=g&foo=REDACTED&foo=REDACTED&hash=e3b0c442" if arr[0] != expected1 { t.Fatalf("query parameters in entry 0 have not been filtered correctly: got %s, expected %s", arr[0], expected1) } if arr[1] != expected2 { t.Fatalf("query parameters in entry 1 have not been filtered correctly: got %s, expected %s", arr[1], expected2) } } func TestValidateQueryFilter(t *testing.T) { f := QueryFilter{[]queryFilterAction{ {}, }} if f.Validate() == nil { t.Fatalf("empty action type must be invalid") } f = QueryFilter{[]queryFilterAction{ {Type: "foo"}, }} if f.Validate() == nil { t.Fatalf("unknown action type must be invalid") } } func TestCookieFilter(t *testing.T) { f := CookieFilter{[]cookieFilterAction{ {replaceAction, "foo", "REDACTED"}, {deleteAction, "bar", ""}, {hashAction, "hash", ""}, }} out := f.Filter(zapcore.Field{Interface: caddyhttp.LoggableStringArray{ "foo=a; foo=b; bar=c; bar=d; baz=e; hash=hashed", }}) outval := out.Interface.(caddyhttp.LoggableStringArray) expected := caddyhttp.LoggableStringArray{ "foo=REDACTED; foo=REDACTED; baz=e; hash=1a06df82", } if outval[0] != expected[0] { t.Fatalf("cookies have not been filtered: %s", out.String) } } func TestValidateCookieFilter(t *testing.T) { f := CookieFilter{[]cookieFilterAction{ {}, }} if f.Validate() == nil { t.Fatalf("empty action type must be invalid") } f = CookieFilter{[]cookieFilterAction{ {Type: "foo"}, }} if f.Validate() == nil { t.Fatalf("unknown action type must be invalid") } } func TestRegexpFilterSingleValue(t *testing.T) { f := RegexpFilter{RawRegexp: `secret`, Value: "REDACTED"} f.Provision(caddy.Context{}) out := f.Filter(zapcore.Field{String: "foo-secret-bar"}) if out.String != "foo-REDACTED-bar" { t.Fatalf("field has not been filtered: %s", out.String) } } func TestRegexpFilterMultiValue(t *testing.T) { f := RegexpFilter{RawRegexp: `secret`, Value: "REDACTED"} f.Provision(caddy.Context{}) out := f.Filter(zapcore.Field{Interface: caddyhttp.LoggableStringArray{"foo-secret-bar", "bar-secret-foo"}}) arr, ok := out.Interface.(caddyhttp.LoggableStringArray) if !ok { t.Fatalf("field is wrong type: %T", out.Integer) } if arr[0] != "foo-REDACTED-bar" { t.Fatalf("field entry 0 has not been filtered: %s", arr[0]) } if arr[1] != "bar-REDACTED-foo" { t.Fatalf("field entry 1 has not been filtered: %s", arr[1]) } } func TestHashFilterSingleValue(t *testing.T) { f := HashFilter{} out := f.Filter(zapcore.Field{String: "foo"}) if out.String != "2c26b46b" { t.Fatalf("field has not been filtered: %s", out.String) } } func TestHashFilterMultiValue(t *testing.T) { f := HashFilter{} out := f.Filter(zapcore.Field{Interface: caddyhttp.LoggableStringArray{"foo", "bar"}}) arr, ok := out.Interface.(caddyhttp.LoggableStringArray) if !ok { t.Fatalf("field is wrong type: %T", out.Integer) } if arr[0] != "2c26b46b" { t.Fatalf("field entry 0 has not been filtered: %s", arr[0]) } if arr[1] != "fcde2b2e" { t.Fatalf("field entry 1 has not been filtered: %s", arr[1]) } } func TestMultiRegexpFilterSingleOperation(t *testing.T) { f := MultiRegexpFilter{ Operations: []regexpFilterOperation{ {RawRegexp: `secret`, Value: "REDACTED"}, }, } err := f.Provision(caddy.Context{}) if err != nil { t.Fatalf("unexpected error provisioning: %v", err) } out := f.Filter(zapcore.Field{String: "foo-secret-bar"}) if out.String != "foo-REDACTED-bar" { t.Fatalf("field has not been filtered: %s", out.String) } } func TestMultiRegexpFilterMultipleOperations(t *testing.T) { f := MultiRegexpFilter{ Operations: []regexpFilterOperation{ {RawRegexp: `secret`, Value: "REDACTED"}, {RawRegexp: `password`, Value: "HIDDEN"}, {RawRegexp: `token`, Value: "XXX"}, }, } err := f.Provision(caddy.Context{}) if err != nil { t.Fatalf("unexpected error provisioning: %v", err) } // Test sequential application out := f.Filter(zapcore.Field{String: "my-secret-password-token-data"}) expected := "my-REDACTED-HIDDEN-XXX-data" if out.String != expected { t.Fatalf("field has not been filtered correctly: got %s, expected %s", out.String, expected) } } func TestMultiRegexpFilterMultiValue(t *testing.T) { f := MultiRegexpFilter{ Operations: []regexpFilterOperation{ {RawRegexp: `secret`, Value: "REDACTED"}, {RawRegexp: `\d+`, Value: "NUM"}, }, } err := f.Provision(caddy.Context{}) if err != nil { t.Fatalf("unexpected error provisioning: %v", err) } out := f.Filter(zapcore.Field{Interface: caddyhttp.LoggableStringArray{ "foo-secret-123", "bar-secret-456", }}) arr, ok := out.Interface.(caddyhttp.LoggableStringArray) if !ok { t.Fatalf("field is wrong type: %T", out.Interface) } if arr[0] != "foo-REDACTED-NUM" { t.Fatalf("field entry 0 has not been filtered: %s", arr[0]) } if arr[1] != "bar-REDACTED-NUM" { t.Fatalf("field entry 1 has not been filtered: %s", arr[1]) } } func TestMultiRegexpFilterAddOperation(t *testing.T) { f := MultiRegexpFilter{} err := f.AddOperation("secret", "REDACTED") if err != nil { t.Fatalf("unexpected error adding operation: %v", err) } err = f.AddOperation("password", "HIDDEN") if err != nil { t.Fatalf("unexpected error adding operation: %v", err) } err = f.Provision(caddy.Context{}) if err != nil { t.Fatalf("unexpected error provisioning: %v", err) } if len(f.Operations) != 2 { t.Fatalf("expected 2 operations, got %d", len(f.Operations)) } out := f.Filter(zapcore.Field{String: "my-secret-password"}) expected := "my-REDACTED-HIDDEN" if out.String != expected { t.Fatalf("field has not been filtered correctly: got %s, expected %s", out.String, expected) } } func TestMultiRegexpFilterSecurityLimits(t *testing.T) { f := MultiRegexpFilter{} // Test maximum operations limit for i := 0; i < 51; i++ { err := f.AddOperation(fmt.Sprintf("pattern%d", i), "replacement") if i < 50 { if err != nil { t.Fatalf("unexpected error adding operation %d: %v", i, err) } } else { if err == nil { t.Fatalf("expected error when adding operation %d (exceeds limit)", i) } } } // Test empty pattern validation f2 := MultiRegexpFilter{} err := f2.AddOperation("", "replacement") if err == nil { t.Fatalf("expected error for empty pattern") } // Test pattern length limit f3 := MultiRegexpFilter{} longPattern := strings.Repeat("a", 1001) err = f3.AddOperation(longPattern, "replacement") if err == nil { t.Fatalf("expected error for pattern exceeding length limit") } } func TestMultiRegexpFilterValidation(t *testing.T) { // Test validation with empty operations f := MultiRegexpFilter{} err := f.Validate() if err == nil { t.Fatalf("expected validation error for empty operations") } // Test validation with valid operations err = f.AddOperation("valid", "replacement") if err != nil { t.Fatalf("unexpected error adding operation: %v", err) } err = f.Provision(caddy.Context{}) if err != nil { t.Fatalf("unexpected error provisioning: %v", err) } err = f.Validate() if err != nil { t.Fatalf("unexpected validation error: %v", err) } } func TestMultiRegexpFilterInputSizeLimit(t *testing.T) { f := MultiRegexpFilter{ Operations: []regexpFilterOperation{ {RawRegexp: `test`, Value: "REPLACED"}, }, } err := f.Provision(caddy.Context{}) if err != nil { t.Fatalf("unexpected error provisioning: %v", err) } // Test with very large input (should be truncated) largeInput := strings.Repeat("test", 300000) // Creates ~1.2MB string out := f.Filter(zapcore.Field{String: largeInput}) // The input should be truncated to 1MB and still processed if len(out.String) > 1000000 { t.Fatalf("output string not truncated: length %d", len(out.String)) } // Should still contain replacements within the truncated portion if !strings.Contains(out.String, "REPLACED") { t.Fatalf("replacements not applied to truncated input") } } func TestMultiRegexpFilterOverlappingPatterns(t *testing.T) { f := MultiRegexpFilter{ Operations: []regexpFilterOperation{ {RawRegexp: `secret.*password`, Value: "SENSITIVE"}, {RawRegexp: `password`, Value: "HIDDEN"}, }, } err := f.Provision(caddy.Context{}) if err != nil { t.Fatalf("unexpected error provisioning: %v", err) } // The first pattern should match and replace the entire "secret...password" portion // Then the second pattern should not find "password" anymore since it was already replaced out := f.Filter(zapcore.Field{String: "my-secret-data-password-end"}) expected := "my-SENSITIVE-end" if out.String != expected { t.Fatalf("field has not been filtered correctly: got %s, expected %s", out.String, expected) } } ================================================ FILE: modules/logging/netwriter.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "fmt" "io" "net" "os" "sync" "time" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func init() { caddy.RegisterModule(NetWriter{}) } // NetWriter implements a log writer that outputs to a network socket. If // the socket goes down, it will dump logs to stderr while it attempts to // reconnect. type NetWriter struct { // The address of the network socket to which to connect. Address string `json:"address,omitempty"` // The timeout to wait while connecting to the socket. DialTimeout caddy.Duration `json:"dial_timeout,omitempty"` // If enabled, allow connections errors when first opening the // writer. The error and subsequent log entries will be reported // to stderr instead until a connection can be re-established. SoftStart bool `json:"soft_start,omitempty"` addr caddy.NetworkAddress } // CaddyModule returns the Caddy module information. func (NetWriter) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "caddy.logging.writers.net", New: func() caddy.Module { return new(NetWriter) }, } } // Provision sets up the module. func (nw *NetWriter) Provision(ctx caddy.Context) error { repl := caddy.NewReplacer() address, err := repl.ReplaceOrErr(nw.Address, true, true) if err != nil { return fmt.Errorf("invalid host in address: %v", err) } nw.addr, err = caddy.ParseNetworkAddress(address) if err != nil { return fmt.Errorf("parsing network address '%s': %v", address, err) } if nw.addr.PortRangeSize() != 1 { return fmt.Errorf("multiple ports not supported") } if nw.DialTimeout < 0 { return fmt.Errorf("timeout cannot be less than 0") } return nil } func (nw NetWriter) String() string { return nw.addr.String() } // WriterKey returns a unique key representing this nw. func (nw NetWriter) WriterKey() string { return nw.addr.String() } // OpenWriter opens a new network connection. func (nw NetWriter) OpenWriter() (io.WriteCloser, error) { reconn := &redialerConn{ nw: nw, timeout: time.Duration(nw.DialTimeout), } conn, err := reconn.dial() if err != nil { if !nw.SoftStart { return nil, err } // don't block config load if remote is down or some other external problem; // we can dump logs to stderr for now (see issue #5520) fmt.Fprintf(os.Stderr, "[ERROR] net log writer failed to connect: %v (will retry connection and print errors here in the meantime)\n", err) } reconn.connMu.Lock() reconn.Conn = conn reconn.connMu.Unlock() return reconn, nil } // UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax: // // net
{ // dial_timeout // soft_start // } func (nw *NetWriter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume writer name if !d.NextArg() { return d.ArgErr() } nw.Address = d.Val() if d.NextArg() { return d.ArgErr() } for d.NextBlock(0) { switch d.Val() { case "dial_timeout": if !d.NextArg() { return d.ArgErr() } timeout, err := caddy.ParseDuration(d.Val()) if err != nil { return d.Errf("invalid duration: %s", d.Val()) } if d.NextArg() { return d.ArgErr() } nw.DialTimeout = caddy.Duration(timeout) case "soft_start": if d.NextArg() { return d.ArgErr() } nw.SoftStart = true default: return d.Errf("unrecognized subdirective '%s'", d.Val()) } } return nil } // redialerConn wraps an underlying Conn so that if any // writes fail, the connection is redialed and the write // is retried. type redialerConn struct { net.Conn connMu sync.RWMutex nw NetWriter timeout time.Duration lastRedial time.Time } // Write wraps the underlying Conn.Write method, but if that fails, // it will re-dial the connection anew and try writing again. func (reconn *redialerConn) Write(b []byte) (n int, err error) { reconn.connMu.RLock() conn := reconn.Conn reconn.connMu.RUnlock() if conn != nil { if n, err = conn.Write(b); err == nil { return n, err } } // problem with the connection - lock it and try to fix it reconn.connMu.Lock() defer reconn.connMu.Unlock() // if multiple concurrent writes failed on the same broken conn, then // one of them might have already re-dialed by now; try writing again if reconn.Conn != nil { if n, err = reconn.Conn.Write(b); err == nil { return n, err } } // there's still a problem, so try to re-attempt dialing the socket // if some time has passed in which the issue could have potentially // been resolved - we don't want to block at every single log // emission (!) - see discussion in #4111 if time.Since(reconn.lastRedial) > 10*time.Second { reconn.lastRedial = time.Now() conn2, err2 := reconn.dial() if err2 != nil { // logger socket still offline; instead of discarding the log, dump it to stderr os.Stderr.Write(b) return n, err } if n, err = conn2.Write(b); err == nil { if reconn.Conn != nil { reconn.Conn.Close() } reconn.Conn = conn2 } } else { // last redial attempt was too recent; just dump to stderr for now os.Stderr.Write(b) } return n, err } func (reconn *redialerConn) dial() (net.Conn, error) { return net.DialTimeout(reconn.nw.addr.Network, reconn.nw.addr.JoinHostPort(0), reconn.timeout) } // Interface guards var ( _ caddy.Provisioner = (*NetWriter)(nil) _ caddy.WriterOpener = (*NetWriter)(nil) _ caddyfile.Unmarshaler = (*NetWriter)(nil) ) ================================================ FILE: modules/logging/nopencoder.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging import ( "time" "go.uber.org/zap/buffer" "go.uber.org/zap/zapcore" ) // nopEncoder is a zapcore.Encoder that does nothing. type nopEncoder struct{} // AddArray is part of the zapcore.ObjectEncoder interface. // Array elements do not get filtered. func (nopEncoder) AddArray(key string, marshaler zapcore.ArrayMarshaler) error { return nil } // AddObject is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddObject(key string, marshaler zapcore.ObjectMarshaler) error { return nil } // AddBinary is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddBinary(key string, value []byte) {} // AddByteString is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddByteString(key string, value []byte) {} // AddBool is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddBool(key string, value bool) {} // AddComplex128 is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddComplex128(key string, value complex128) {} // AddComplex64 is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddComplex64(key string, value complex64) {} // AddDuration is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddDuration(key string, value time.Duration) {} // AddFloat64 is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddFloat64(key string, value float64) {} // AddFloat32 is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddFloat32(key string, value float32) {} // AddInt is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddInt(key string, value int) {} // AddInt64 is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddInt64(key string, value int64) {} // AddInt32 is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddInt32(key string, value int32) {} // AddInt16 is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddInt16(key string, value int16) {} // AddInt8 is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddInt8(key string, value int8) {} // AddString is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddString(key, value string) {} // AddTime is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddTime(key string, value time.Time) {} // AddUint is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddUint(key string, value uint) {} // AddUint64 is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddUint64(key string, value uint64) {} // AddUint32 is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddUint32(key string, value uint32) {} // AddUint16 is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddUint16(key string, value uint16) {} // AddUint8 is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddUint8(key string, value uint8) {} // AddUintptr is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddUintptr(key string, value uintptr) {} // AddReflected is part of the zapcore.ObjectEncoder interface. func (nopEncoder) AddReflected(key string, value any) error { return nil } // OpenNamespace is part of the zapcore.ObjectEncoder interface. func (nopEncoder) OpenNamespace(key string) {} // Clone is part of the zapcore.ObjectEncoder interface. // We don't use it as of Oct 2019 (v2 beta 7), I'm not // really sure what it'd be useful for in our case. func (ne nopEncoder) Clone() zapcore.Encoder { return ne } // EncodeEntry partially implements the zapcore.Encoder interface. func (nopEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) { return bufferpool.Get(), nil } // Interface guard var _ zapcore.Encoder = (*nopEncoder)(nil) ================================================ FILE: modules/metrics/adminmetrics.go ================================================ // Copyright 2020 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metrics import ( "errors" "net/http" "github.com/prometheus/client_golang/prometheus" "github.com/caddyserver/caddy/v2" ) func init() { caddy.RegisterModule(AdminMetrics{}) } // AdminMetrics is a module that serves a metrics endpoint so that any gathered // metrics can be exposed for scraping. This module is not configurable, and // is permanently mounted to the admin API endpoint at "/metrics". // See the Metrics module for a configurable endpoint that is usable if the // Admin API is disabled. type AdminMetrics struct { registry *prometheus.Registry metricsHandler http.Handler } // CaddyModule returns the Caddy module information. func (AdminMetrics) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "admin.api.metrics", New: func() caddy.Module { return new(AdminMetrics) }, } } // Provision - func (m *AdminMetrics) Provision(ctx caddy.Context) error { m.registry = ctx.GetMetricsRegistry() if m.registry == nil { return errors.New("no metrics registry found") } m.metricsHandler = createMetricsHandler(nil, false, m.registry) return nil } // Routes returns a route for the /metrics endpoint. func (m *AdminMetrics) Routes() []caddy.AdminRoute { return []caddy.AdminRoute{{Pattern: "/metrics", Handler: caddy.AdminHandlerFunc(m.serveHTTP)}} } func (m *AdminMetrics) serveHTTP(w http.ResponseWriter, r *http.Request) error { m.metricsHandler.ServeHTTP(w, r) return nil } // Interface guards var ( _ caddy.Provisioner = (*AdminMetrics)(nil) _ caddy.AdminRouter = (*AdminMetrics)(nil) ) ================================================ FILE: modules/metrics/metrics.go ================================================ // Copyright 2020 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metrics import ( "errors" "net/http" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" "github.com/caddyserver/caddy/v2" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" "github.com/caddyserver/caddy/v2/modules/caddyhttp" ) func init() { caddy.RegisterModule(Metrics{}) httpcaddyfile.RegisterHandlerDirective("metrics", parseCaddyfile) } // Metrics is a module that serves a /metrics endpoint so that any gathered // metrics can be exposed for scraping. This module is configurable by end-users // unlike AdminMetrics. type Metrics struct { metricsHandler http.Handler // Disable OpenMetrics negotiation, enabled by default. May be necessary if // the produced metrics cannot be parsed by the service scraping metrics. DisableOpenMetrics bool `json:"disable_openmetrics,omitempty"` } // CaddyModule returns the Caddy module information. func (Metrics) CaddyModule() caddy.ModuleInfo { return caddy.ModuleInfo{ ID: "http.handlers.metrics", New: func() caddy.Module { return new(Metrics) }, } } type zapLogger struct { zl *zap.Logger } func (l *zapLogger) Println(v ...any) { l.zl.Sugar().Error(v...) } // Provision sets up m. func (m *Metrics) Provision(ctx caddy.Context) error { log := ctx.Logger() registry := ctx.GetMetricsRegistry() if registry == nil { return errors.New("no metrics registry found") } m.metricsHandler = createMetricsHandler(&zapLogger{log}, !m.DisableOpenMetrics, registry) return nil } func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { var m Metrics err := m.UnmarshalCaddyfile(h.Dispenser) return m, err } // UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax: // // metrics [] { // disable_openmetrics // } func (m *Metrics) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { d.Next() // consume directive name args := d.RemainingArgs() if len(args) > 0 { return d.ArgErr() } for d.NextBlock(0) { switch d.Val() { case "disable_openmetrics": m.DisableOpenMetrics = true default: return d.Errf("unrecognized subdirective %q", d.Val()) } } return nil } func (m Metrics) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { m.metricsHandler.ServeHTTP(w, r) return nil } // Interface guards var ( _ caddy.Provisioner = (*Metrics)(nil) _ caddyhttp.MiddlewareHandler = (*Metrics)(nil) _ caddyfile.Unmarshaler = (*Metrics)(nil) ) func createMetricsHandler(logger promhttp.Logger, enableOpenMetrics bool, registry *prometheus.Registry) http.Handler { return promhttp.InstrumentMetricHandler(registry, promhttp.HandlerFor(registry, promhttp.HandlerOpts{ // will only log errors if logger is non-nil ErrorLog: logger, // Allow OpenMetrics format to be negotiated - largely compatible, // except quantile/le label values always have a decimal. EnableOpenMetrics: enableOpenMetrics, }), ) } ================================================ FILE: modules/metrics/metrics_test.go ================================================ package metrics import ( "testing" "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" ) func TestMetricsUnmarshalCaddyfile(t *testing.T) { m := &Metrics{} d := caddyfile.NewTestDispenser(`metrics bogus`) err := m.UnmarshalCaddyfile(d) if err == nil { t.Errorf("expected error") } m = &Metrics{} d = caddyfile.NewTestDispenser(`metrics`) err = m.UnmarshalCaddyfile(d) if err != nil { t.Errorf("unexpected error: %v", err) } if m.DisableOpenMetrics { t.Errorf("DisableOpenMetrics should've been false: %v", m.DisableOpenMetrics) } m = &Metrics{} d = caddyfile.NewTestDispenser(`metrics { disable_openmetrics }`) err = m.UnmarshalCaddyfile(d) if err != nil { t.Errorf("unexpected error: %v", err) } if !m.DisableOpenMetrics { t.Errorf("DisableOpenMetrics should've been true: %v", m.DisableOpenMetrics) } m = &Metrics{} d = caddyfile.NewTestDispenser(`metrics { bogus }`) err = m.UnmarshalCaddyfile(d) if err == nil { t.Errorf("expected error: %v", err) } } ================================================ FILE: modules/standard/imports.go ================================================ package standard import ( // standard Caddy modules _ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" _ "github.com/caddyserver/caddy/v2/modules/caddyevents" _ "github.com/caddyserver/caddy/v2/modules/caddyevents/eventsconfig" _ "github.com/caddyserver/caddy/v2/modules/caddyfs" _ "github.com/caddyserver/caddy/v2/modules/caddyhttp/standard" _ "github.com/caddyserver/caddy/v2/modules/caddypki" _ "github.com/caddyserver/caddy/v2/modules/caddypki/acmeserver" _ "github.com/caddyserver/caddy/v2/modules/caddytls" _ "github.com/caddyserver/caddy/v2/modules/caddytls/distributedstek" _ "github.com/caddyserver/caddy/v2/modules/caddytls/standardstek" _ "github.com/caddyserver/caddy/v2/modules/filestorage" _ "github.com/caddyserver/caddy/v2/modules/logging" _ "github.com/caddyserver/caddy/v2/modules/metrics" ) ================================================ FILE: modules.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "bytes" "encoding/json" "fmt" "net/http" "net/url" "reflect" "sort" "strings" "sync" ) // Module is a type that is used as a Caddy module. In // addition to this interface, most modules will implement // some interface expected by their host module in order // to be useful. To learn which interface(s) to implement, // see the documentation for the host module. At a bare // minimum, this interface, when implemented, only provides // the module's ID and constructor function. // // Modules will often implement additional interfaces // including Provisioner, Validator, and CleanerUpper. // If a module implements these interfaces, their // methods are called during the module's lifespan. // // When a module is loaded by a host module, the following // happens: 1) ModuleInfo.New() is called to get a new // instance of the module. 2) The module's configuration is // unmarshaled into that instance. 3) If the module is a // Provisioner, the Provision() method is called. 4) If the // module is a Validator, the Validate() method is called. // 5) The module will probably be type-asserted from // 'any' to some other, more useful interface expected // by the host module. For example, HTTP handler modules are // type-asserted as caddyhttp.MiddlewareHandler values. // 6) When a module's containing Context is canceled, if it is // a CleanerUpper, its Cleanup() method is called. type Module interface { // This method indicates that the type is a Caddy // module. The returned ModuleInfo must have both // a name and a constructor function. This method // must not have any side-effects. CaddyModule() ModuleInfo } // ModuleInfo represents a registered Caddy module. type ModuleInfo struct { // ID is the "full name" of the module. It // must be unique and properly namespaced. ID ModuleID // New returns a pointer to a new, empty // instance of the module's type. This // method must not have any side-effects, // and no other initialization should // occur within it. Any initialization // of the returned value should be done // in a Provision() method (see the // Provisioner interface). New func() Module } // ModuleID is a string that uniquely identifies a Caddy module. A // module ID is lightly structured. It consists of dot-separated // labels which form a simple hierarchy from left to right. The last // label is the module name, and the labels before that constitute // the namespace (or scope). // // Thus, a module ID has the form: . // // An ID with no dot has the empty namespace, which is appropriate // for app modules (these are "top-level" modules that Caddy core // loads and runs). // // Module IDs should be lowercase and use underscores (_) instead of // spaces. // // Examples of valid IDs: // - http // - http.handlers.file_server // - caddy.logging.encoders.json type ModuleID string // Namespace returns the namespace (or scope) portion of a module ID, // which is all but the last label of the ID. If the ID has only one // label, then the namespace is empty. func (id ModuleID) Namespace() string { lastDot := strings.LastIndex(string(id), ".") if lastDot < 0 { return "" } return string(id)[:lastDot] } // Name returns the Name (last element) of a module ID. func (id ModuleID) Name() string { if id == "" { return "" } parts := strings.Split(string(id), ".") return parts[len(parts)-1] } func (mi ModuleInfo) String() string { return string(mi.ID) } // ModuleMap is a map that can contain multiple modules, // where the map key is the module's name. (The namespace // is usually read from an associated field's struct tag.) // Because the module's name is given as the key in a // module map, the name does not have to be given in the // json.RawMessage. type ModuleMap map[string]json.RawMessage // RegisterModule registers a module by receiving a // plain/empty value of the module. For registration to // be properly recorded, this should be called in the // init phase of runtime. Typically, the module package // will do this as a side-effect of being imported. // This function panics if the module's info is // incomplete or invalid, or if the module is already // registered. func RegisterModule(instance Module) { mod := instance.CaddyModule() if mod.ID == "" { panic("module ID missing") } if mod.ID == "caddy" || mod.ID == "admin" { panic(fmt.Sprintf("module ID '%s' is reserved", mod.ID)) } if mod.New == nil { panic("missing ModuleInfo.New") } if val := mod.New(); val == nil { panic("ModuleInfo.New must return a non-nil module instance") } modulesMu.Lock() defer modulesMu.Unlock() if _, ok := modules[string(mod.ID)]; ok { panic(fmt.Sprintf("module already registered: %s", mod.ID)) } modules[string(mod.ID)] = mod } // GetModule returns module information from its ID (full name). func GetModule(name string) (ModuleInfo, error) { modulesMu.RLock() defer modulesMu.RUnlock() m, ok := modules[name] if !ok { return ModuleInfo{}, fmt.Errorf("module not registered: %s", name) } return m, nil } // GetModuleName returns a module's name (the last label of its ID) // from an instance of its value. If the value is not a module, an // empty string will be returned. func GetModuleName(instance any) string { var name string if mod, ok := instance.(Module); ok { name = mod.CaddyModule().ID.Name() } return name } // GetModuleID returns a module's ID from an instance of its value. // If the value is not a module, an empty string will be returned. func GetModuleID(instance any) string { var id string if mod, ok := instance.(Module); ok { id = string(mod.CaddyModule().ID) } return id } // GetModules returns all modules in the given scope/namespace. // For example, a scope of "foo" returns modules named "foo.bar", // "foo.loo", but not "bar", "foo.bar.loo", etc. An empty scope // returns top-level modules, for example "foo" or "bar". Partial // scopes are not matched (i.e. scope "foo.ba" does not match // name "foo.bar"). // // Because modules are registered to a map under the hood, the // returned slice will be sorted to keep it deterministic. func GetModules(scope string) []ModuleInfo { modulesMu.RLock() defer modulesMu.RUnlock() scopeParts := strings.Split(scope, ".") // handle the special case of an empty scope, which // should match only the top-level modules if scope == "" { scopeParts = []string{} } var mods []ModuleInfo iterateModules: for id, m := range modules { modParts := strings.Split(id, ".") // match only the next level of nesting if len(modParts) != len(scopeParts)+1 { continue } // specified parts must be exact matches for i := range scopeParts { if modParts[i] != scopeParts[i] { continue iterateModules } } mods = append(mods, m) } // make return value deterministic sort.Slice(mods, func(i, j int) bool { return mods[i].ID < mods[j].ID }) return mods } // Modules returns the names of all registered modules // in ascending lexicographical order. func Modules() []string { modulesMu.RLock() defer modulesMu.RUnlock() names := make([]string, 0, len(modules)) for name := range modules { names = append(names, name) } sort.Strings(names) return names } // getModuleNameInline loads the string value from raw of moduleNameKey, // where raw must be a JSON encoding of a map. It returns that value, // along with the result of removing that key from raw. func getModuleNameInline(moduleNameKey string, raw json.RawMessage) (string, json.RawMessage, error) { var tmp map[string]any err := json.Unmarshal(raw, &tmp) if err != nil { return "", nil, err } moduleName, ok := tmp[moduleNameKey].(string) if !ok || moduleName == "" { return "", nil, fmt.Errorf("module name not specified with key '%s' in %+v", moduleNameKey, tmp) } // remove key from the object, otherwise decoding it later // will yield an error because the struct won't recognize it // (this is only needed because we strictly enforce that // all keys are recognized when loading modules) delete(tmp, moduleNameKey) result, err := json.Marshal(tmp) if err != nil { return "", nil, fmt.Errorf("re-encoding module configuration: %v", err) } return moduleName, result, nil } // Provisioner is implemented by modules which may need to perform // some additional "setup" steps immediately after being loaded. // Provisioning should be fast (imperceptible running time). If // any side-effects result in the execution of this function (e.g. // creating global state, any other allocations which require // garbage collection, opening files, starting goroutines etc.), // be sure to clean up properly by implementing the CleanerUpper // interface to avoid leaking resources. type Provisioner interface { Provision(Context) error } // Validator is implemented by modules which can verify that their // configurations are valid. This method will be called after // Provision() (if implemented). Validation should always be fast // (imperceptible running time) and an error must be returned if // the module's configuration is invalid. type Validator interface { Validate() error } // CleanerUpper is implemented by modules which may have side-effects // such as opened files, spawned goroutines, or allocated some sort // of non-stack state when they were provisioned. This method should // deallocate/cleanup those resources to prevent memory leaks. Cleanup // should be fast and efficient. Cleanup should work even if Provision // returns an error, to allow cleaning up from partial provisionings. type CleanerUpper interface { Cleanup() error } // ParseStructTag parses a caddy struct tag into its keys and values. // It is very simple. The expected syntax is: // `caddy:"key1=val1 key2=val2 ..."` func ParseStructTag(tag string) (map[string]string, error) { results := make(map[string]string) pairs := strings.Split(tag, " ") for i, pair := range pairs { if pair == "" { continue } before, after, isCut := strings.Cut(pair, "=") if !isCut { return nil, fmt.Errorf("missing key in '%s' (pair %d)", pair, i) } results[before] = after } return results, nil } // StrictUnmarshalJSON is like json.Unmarshal but returns an error // if any of the fields are unrecognized. Useful when decoding // module configurations, where you want to be more sure they're // correct. func StrictUnmarshalJSON(data []byte, v any) error { dec := json.NewDecoder(bytes.NewReader(data)) dec.DisallowUnknownFields() err := dec.Decode(v) if jsonErr, ok := err.(*json.SyntaxError); ok { return fmt.Errorf("%w, at offset %d", jsonErr, jsonErr.Offset) } return err } var JSONRawMessageType = reflect.TypeFor[json.RawMessage]() // isJSONRawMessage returns true if the type is encoding/json.RawMessage. func isJSONRawMessage(typ reflect.Type) bool { return typ == JSONRawMessageType } // isModuleMapType returns true if the type is map[string]json.RawMessage. // It assumes that the string key is the module name, but this is not // always the case. To know for sure, this function must return true, but // also the struct tag where this type appears must NOT define an inline_key // attribute, which would mean that the module names appear inline with the // values, not in the key. func isModuleMapType(typ reflect.Type) bool { return typ.Kind() == reflect.Map && typ.Key().Kind() == reflect.String && isJSONRawMessage(typ.Elem()) } // ProxyFuncProducer is implemented by modules which produce a // function that returns a URL to use as network proxy. Modules // in the namespace `caddy.network_proxy` must implement this // interface. type ProxyFuncProducer interface { ProxyFunc() func(*http.Request) (*url.URL, error) } var ( modules = make(map[string]ModuleInfo) modulesMu sync.RWMutex ) ================================================ FILE: modules_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "reflect" "testing" ) func TestGetModules(t *testing.T) { modulesMu.Lock() modules = map[string]ModuleInfo{ "a": {ID: "a"}, "a.b": {ID: "a.b"}, "a.b.c": {ID: "a.b.c"}, "a.b.cd": {ID: "a.b.cd"}, "a.c": {ID: "a.c"}, "a.d": {ID: "a.d"}, "b": {ID: "b"}, "b.a": {ID: "b.a"}, "b.b": {ID: "b.b"}, "b.a.c": {ID: "b.a.c"}, "c": {ID: "c"}, } modulesMu.Unlock() for i, tc := range []struct { input string expect []ModuleInfo }{ { input: "", expect: []ModuleInfo{ {ID: "a"}, {ID: "b"}, {ID: "c"}, }, }, { input: "a", expect: []ModuleInfo{ {ID: "a.b"}, {ID: "a.c"}, {ID: "a.d"}, }, }, { input: "a.b", expect: []ModuleInfo{ {ID: "a.b.c"}, {ID: "a.b.cd"}, }, }, { input: "a.b.c", }, { input: "b", expect: []ModuleInfo{ {ID: "b.a"}, {ID: "b.b"}, }, }, { input: "asdf", }, } { actual := GetModules(tc.input) if !reflect.DeepEqual(actual, tc.expect) { t.Errorf("Test %d: Expected %v but got %v", i, tc.expect, actual) } } } func TestModuleID(t *testing.T) { for i, tc := range []struct { input ModuleID expectNamespace string expectName string }{ { input: "foo", expectNamespace: "", expectName: "foo", }, { input: "foo.bar", expectNamespace: "foo", expectName: "bar", }, { input: "a.b.c", expectNamespace: "a.b", expectName: "c", }, } { actualNamespace := tc.input.Namespace() if actualNamespace != tc.expectNamespace { t.Errorf("Test %d: Expected namespace '%s' but got '%s'", i, tc.expectNamespace, actualNamespace) } actualName := tc.input.Name() if actualName != tc.expectName { t.Errorf("Test %d: Expected name '%s' but got '%s'", i, tc.expectName, actualName) } } } ================================================ FILE: notify/notify_linux.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package notify provides facilities for notifying process managers // of state changes, mainly for when running as a system service. package notify import ( "fmt" "net" "os" "strings" ) // The documentation about this IPC protocol is available here: // https://www.freedesktop.org/software/systemd/man/sd_notify.html func sdNotify(payload string) error { if socketPath == "" { return nil } socketAddr := &net.UnixAddr{ Name: socketPath, Net: "unixgram", } conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) if err != nil { return err } defer conn.Close() _, err = conn.Write([]byte(payload)) return err } // Ready notifies systemd that caddy has finished its // initialization routines. func Ready() error { return sdNotify("READY=1") } // Reloading notifies systemd that caddy is reloading its config. func Reloading() error { return sdNotify("RELOADING=1") } // Stopping notifies systemd that caddy is stopping. func Stopping() error { return sdNotify("STOPPING=1") } // Status sends systemd an updated status message. func Status(msg string) error { return sdNotify("STATUS=" + msg) } // Error is like Status, but sends systemd an error message // instead, with an optional errno-style error number. func Error(err error, errno int) error { collapsedErr := strings.ReplaceAll(err.Error(), "\n", " ") msg := fmt.Sprintf("STATUS=%s", collapsedErr) if errno > 0 { msg += fmt.Sprintf("\nERRNO=%d", errno) } return sdNotify(msg) } var socketPath, _ = os.LookupEnv("NOTIFY_SOCKET") ================================================ FILE: notify/notify_other.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !linux && !windows package notify func Ready() error { return nil } func Reloading() error { return nil } func Stopping() error { return nil } func Status(_ string) error { return nil } func Error(_ error, _ int) error { return nil } ================================================ FILE: notify/notify_windows.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package notify import ( "log" "strings" "golang.org/x/sys/windows/svc" ) // globalStatus store windows service status, it can be // use to notify caddy status. var globalStatus chan<- svc.Status // SetGlobalStatus assigns the channel through which status updates // will be sent to the SCM. This is typically provided by the service // handler when the service starts. func SetGlobalStatus(status chan<- svc.Status) { globalStatus = status } // Ready notifies the SCM that the service is fully running and ready // to accept stop or shutdown control requests. func Ready() error { if globalStatus != nil { globalStatus <- svc.Status{ State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown, } } return nil } // Reloading notifies the SCM that the service is entering a transitional // state. func Reloading() error { if globalStatus != nil { globalStatus <- svc.Status{State: svc.StartPending} } return nil } // Stopping notifies the SCM that the service is in the process of stopping. // This allows Windows to track the shutdown transition properly. func Stopping() error { if globalStatus != nil { globalStatus <- svc.Status{State: svc.StopPending} } return nil } // Status sends an arbitrary service state to the SCM based on a string // identifier of [svc.State]. // The unknown states will be logged. func Status(name string) error { if globalStatus == nil { return nil } var state svc.State var accepts svc.Accepted accepts = 0 switch strings.ToLower(name) { case "stopped": state = svc.Stopped case "start_pending": state = svc.StartPending case "stop_pending": state = svc.StopPending case "running": state = svc.Running accepts = svc.AcceptStop | svc.AcceptShutdown case "continue_pending": state = svc.ContinuePending case "pause_pending": state = svc.PausePending case "paused": state = svc.Paused accepts = svc.AcceptStop | svc.AcceptShutdown | svc.AcceptPauseAndContinue default: log.Printf("unknown state: %s", name) return nil } globalStatus <- svc.Status{State: state, Accepts: accepts} return nil } // Error notifies the SCM that the service is stopping due to a failure, // including a service-specific exit code. func Error(err error, code int) error { if globalStatus != nil { globalStatus <- svc.Status{ State: svc.StopPending, ServiceSpecificExitCode: uint32(code), } } return nil } ================================================ FILE: replacer.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "bytes" "fmt" "io" "net/http" "os" "path/filepath" "runtime" "strconv" "strings" "sync" "time" "go.uber.org/zap" ) // NewReplacer returns a new Replacer. func NewReplacer() *Replacer { rep := &Replacer{ static: make(map[string]any), mapMutex: &sync.RWMutex{}, } rep.providers = []replacementProvider{ globalDefaultReplacementProvider{}, fileReplacementProvider{}, ReplacerFunc(rep.fromStatic), } return rep } // NewEmptyReplacer returns a new Replacer, // without the global default replacements. func NewEmptyReplacer() *Replacer { rep := &Replacer{ static: make(map[string]any), mapMutex: &sync.RWMutex{}, } rep.providers = []replacementProvider{ ReplacerFunc(rep.fromStatic), } return rep } // Replacer can replace values in strings. // A default/empty Replacer is not valid; // use NewReplacer to make one. type Replacer struct { providers []replacementProvider static map[string]any mapMutex *sync.RWMutex } // WithoutFile returns a copy of the current Replacer // without support for the {file.*} placeholder, which // may be unsafe in some contexts. // // EXPERIMENTAL: Subject to change or removal. func (r *Replacer) WithoutFile() *Replacer { rep := &Replacer{static: r.static} for _, v := range r.providers { if _, ok := v.(fileReplacementProvider); ok { continue } rep.providers = append(rep.providers, v) } return rep } // Map adds mapFunc to the list of value providers. // mapFunc will be executed only at replace-time. func (r *Replacer) Map(mapFunc ReplacerFunc) { r.providers = append(r.providers, mapFunc) } // Set sets a custom variable to a static value. func (r *Replacer) Set(variable string, value any) { r.mapMutex.Lock() r.static[variable] = value r.mapMutex.Unlock() } // Get gets a value from the replacer. It returns // the value and whether the variable was known. func (r *Replacer) Get(variable string) (any, bool) { for _, mapFunc := range r.providers { if val, ok := mapFunc.replace(variable); ok { return val, true } } return nil, false } // GetString is the same as Get, but coerces the value to a // string representation as efficiently as possible. func (r *Replacer) GetString(variable string) (string, bool) { s, found := r.Get(variable) return ToString(s), found } // Delete removes a variable with a static value // that was created using Set. func (r *Replacer) Delete(variable string) { r.mapMutex.Lock() delete(r.static, variable) r.mapMutex.Unlock() } // fromStatic provides values from r.static. func (r *Replacer) fromStatic(key string) (any, bool) { r.mapMutex.RLock() defer r.mapMutex.RUnlock() val, ok := r.static[key] return val, ok } // ReplaceOrErr is like ReplaceAll, but any placeholders // that are empty or not recognized will cause an error to // be returned. func (r *Replacer) ReplaceOrErr(input string, errOnEmpty, errOnUnknown bool) (string, error) { return r.replace(input, "", false, errOnEmpty, errOnUnknown, nil) } // ReplaceKnown is like ReplaceAll but only replaces // placeholders that are known (recognized). Unrecognized // placeholders will remain in the output. func (r *Replacer) ReplaceKnown(input, empty string) string { out, _ := r.replace(input, empty, false, false, false, nil) return out } // ReplaceAll efficiently replaces placeholders in input with // their values. All placeholders are replaced in the output // whether they are recognized or not. Values that are empty // string will be substituted with empty. func (r *Replacer) ReplaceAll(input, empty string) string { out, _ := r.replace(input, empty, true, false, false, nil) return out } // ReplaceFunc is the same as ReplaceAll, but calls f for every // replacement to be made, in case f wants to change or inspect // the replacement. func (r *Replacer) ReplaceFunc(input string, f ReplacementFunc) (string, error) { return r.replace(input, "", true, false, false, f) } func (r *Replacer) replace(input, empty string, treatUnknownAsEmpty, errOnEmpty, errOnUnknown bool, f ReplacementFunc, ) (string, error) { if !strings.Contains(input, string(phOpen)) && !strings.Contains(input, string(phClose)) { return input, nil } var sb strings.Builder // it is reasonable to assume that the output // will be approximately as long as the input sb.Grow(len(input)) // iterate the input to find each placeholder var lastWriteCursor int // fail fast if too many placeholders are unclosed var unclosedCount int scan: for i := 0; i < len(input); i++ { // check for escaped braces if i > 0 && input[i-1] == phEscape && (input[i] == phClose || input[i] == phOpen) { sb.WriteString(input[lastWriteCursor : i-1]) lastWriteCursor = i continue } if input[i] != phOpen { continue } // our iterator is now on an unescaped open brace (start of placeholder) // too many unclosed placeholders in absolutely ridiculous input can be extremely slow (issue #4170) if unclosedCount > 100 { return "", fmt.Errorf("too many unclosed placeholders") } // find the end of the placeholder end := strings.Index(input[i:], string(phClose)) + i if end < i { unclosedCount++ continue } // if necessary look for the first closing brace that is not escaped for end > 0 && end < len(input)-1 && input[end-1] == phEscape { nextEnd := strings.Index(input[end+1:], string(phClose)) if nextEnd < 0 { unclosedCount++ continue scan } end += nextEnd + 1 } // write the substring from the last cursor to this point sb.WriteString(input[lastWriteCursor:i]) // trim opening bracket key := input[i+1 : end] // try to get a value for this key, handle empty values accordingly val, found := r.Get(key) if !found { // placeholder is unknown (unrecognized); handle accordingly if errOnUnknown { return "", fmt.Errorf("unrecognized placeholder %s%s%s", string(phOpen), key, string(phClose)) } else if !treatUnknownAsEmpty { // if treatUnknownAsEmpty is true, we'll handle an empty // val later; so only continue otherwise lastWriteCursor = i continue } } // apply any transformations if f != nil { var err error val, err = f(key, val) if err != nil { return "", err } } // convert val to a string as efficiently as possible valStr := ToString(val) // write the value; if it's empty, either return // an error or write a default value if valStr == "" { if errOnEmpty { return "", fmt.Errorf("evaluated placeholder %s%s%s is empty", string(phOpen), key, string(phClose)) } else if empty != "" { sb.WriteString(empty) } } else { sb.WriteString(valStr) } // advance cursor to end of placeholder i = end lastWriteCursor = i + 1 } // flush any unwritten remainder sb.WriteString(input[lastWriteCursor:]) return sb.String(), nil } // ToString returns val as a string, as efficiently as possible. // EXPERIMENTAL: may be changed or removed later. func ToString(val any) string { switch v := val.(type) { case nil: return "" case string: return v case fmt.Stringer: return v.String() case error: return v.Error() case byte: return string(v) case []byte: return string(v) case []rune: return string(v) case int: return strconv.Itoa(v) case int32: return strconv.Itoa(int(v)) case int64: return strconv.Itoa(int(v)) case uint: return strconv.FormatUint(uint64(v), 10) case uint32: return strconv.FormatUint(uint64(v), 10) case uint64: return strconv.FormatUint(v, 10) case float32: return strconv.FormatFloat(float64(v), 'f', -1, 32) case float64: return strconv.FormatFloat(v, 'f', -1, 64) case bool: if v { return "true" } return "false" default: return fmt.Sprintf("%+v", v) } } // ReplacerFunc is a function that returns a replacement for the // given key along with true if the function is able to service // that key (even if the value is blank). If the function does // not recognize the key, false should be returned. type ReplacerFunc func(key string) (any, bool) func (f ReplacerFunc) replace(key string) (any, bool) { return f(key) } // replacementProvider is a type that can provide replacements // for placeholders. Allows for type assertion to determine // which type of provider it is. type replacementProvider interface { replace(key string) (any, bool) } // fileReplacementProvider handles {file.*} replacements, // reading a file from disk and replacing with its contents. type fileReplacementProvider struct{} func (f fileReplacementProvider) replace(key string) (any, bool) { if !strings.HasPrefix(key, filePrefix) { return nil, false } filename := key[len(filePrefix):] maxSize := 1024 * 1024 body, err := readFileIntoBuffer(filename, maxSize) if err != nil { wd, _ := os.Getwd() Log().Error("placeholder: failed to read file", zap.String("file", filename), zap.String("working_dir", wd), zap.Error(err)) return nil, true } body = bytes.TrimSuffix(body, []byte("\n")) body = bytes.TrimSuffix(body, []byte("\r")) return string(body), true } // globalDefaultReplacementProvider handles replacements // that can be used in any context, such as system variables, // time, or environment variables. type globalDefaultReplacementProvider struct{} func (f globalDefaultReplacementProvider) replace(key string) (any, bool) { // check environment variable const envPrefix = "env." if strings.HasPrefix(key, envPrefix) { return os.Getenv(key[len(envPrefix):]), true } switch key { case "system.hostname": // OK if there is an error; just return empty string name, _ := os.Hostname() return name, true case "system.slash": return string(filepath.Separator), true case "system.os": return runtime.GOOS, true case "system.wd": // OK if there is an error; just return empty string wd, _ := os.Getwd() return wd, true case "system.arch": return runtime.GOARCH, true case "time.now": return nowFunc(), true case "time.now.http": // According to the comment for http.TimeFormat, the timezone must be in UTC // to generate the correct format. // https://github.com/caddyserver/caddy/issues/5773 return nowFunc().UTC().Format(http.TimeFormat), true case "time.now.common_log": return nowFunc().Format("02/Jan/2006:15:04:05 -0700"), true case "time.now.year": return strconv.Itoa(nowFunc().Year()), true case "time.now.unix": return strconv.FormatInt(nowFunc().Unix(), 10), true case "time.now.unix_ms": return strconv.FormatInt(nowFunc().UnixNano()/int64(time.Millisecond), 10), true } return nil, false } // readFileIntoBuffer reads the file at filePath into a size limited buffer. func readFileIntoBuffer(filename string, size int) ([]byte, error) { file, err := os.Open(filename) if err != nil { return nil, err } defer file.Close() buffer := make([]byte, size) n, err := file.Read(buffer) if err != nil && err != io.EOF { return nil, err } // slice the buffer to the actual size return buffer[:n], nil } // ReplacementFunc is a function that is called when a // replacement is being performed. It receives the // variable (i.e. placeholder name) and the value that // will be the replacement, and returns the value that // will actually be the replacement, or an error. Note // that errors are sometimes ignored by replacers. type ReplacementFunc func(variable string, val any) (any, error) // nowFunc is a variable so tests can change it // in order to obtain a deterministic time. var nowFunc = time.Now // ReplacerCtxKey is the context key for a replacer. const ReplacerCtxKey CtxKey = "replacer" const phOpen, phClose, phEscape = '{', '}', '\\' const filePrefix = "file." ================================================ FILE: replacer_fuzz.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build gofuzz package caddy func FuzzReplacer(data []byte) (score int) { NewReplacer().ReplaceAll(string(data), "") NewReplacer().ReplaceAll(NewReplacer().ReplaceAll(string(data), ""), "") NewReplacer().ReplaceAll(NewReplacer().ReplaceAll(string(data), ""), NewReplacer().ReplaceAll(string(data), "")) NewReplacer().ReplaceAll(string(data[:len(data)/2]), string(data[len(data)/2:])) return 0 } ================================================ FILE: replacer_test.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "fmt" "os" "path/filepath" "runtime" "sync" "testing" ) func TestReplacer(t *testing.T) { type testCase struct { input, expect, empty string } rep := testReplacer() // ReplaceAll for i, tc := range []testCase{ { input: "{", expect: "{", }, { input: `\{`, expect: `{`, }, { input: "foo{", expect: "foo{", }, { input: `foo\{`, expect: `foo{`, }, { input: "foo{bar", expect: "foo{bar", }, { input: `foo\{bar`, expect: `foo{bar`, }, { input: "foo{bar}", expect: "foo", }, { input: `foo\{bar\}`, expect: `foo{bar}`, }, { input: "}", expect: "}", }, { input: `\}`, expect: `}`, }, { input: "{}", expect: "", }, { input: `\{\}`, expect: `{}`, }, { input: `{"json": "object"}`, expect: "", }, { input: `\{"json": "object"}`, expect: `{"json": "object"}`, }, { input: `\{"json": "object"\}`, expect: `{"json": "object"}`, }, { input: `\{"json": "object{bar}"\}`, expect: `{"json": "object"}`, }, { input: `\{"json": \{"nested": "object"\}\}`, expect: `{"json": {"nested": "object"}}`, }, { input: `\{"json": \{"nested": "{bar}"\}\}`, expect: `{"json": {"nested": ""}}`, }, { input: `pre \{"json": \{"nested": "{bar}"\}\}`, expect: `pre {"json": {"nested": ""}}`, }, { input: `\{"json": \{"nested": "{bar}"\}\} post`, expect: `{"json": {"nested": ""}} post`, }, { input: `pre \{"json": \{"nested": "{bar}"\}\} post`, expect: `pre {"json": {"nested": ""}} post`, }, { input: `{{`, expect: "{{", }, { input: `{{}`, expect: "", }, { input: `{"json": "object"\}`, expect: "", }, { input: `{unknown}`, empty: "-", expect: "-", }, { input: `back\slashes`, expect: `back\slashes`, }, { input: `double back\\slashes`, expect: `double back\\slashes`, }, { input: `placeholder {with \{ brace} in name`, expect: `placeholder in name`, }, { input: `placeholder {with \} brace} in name`, expect: `placeholder in name`, }, { input: `placeholder {with \} \} braces} in name`, expect: `placeholder in name`, }, { input: `\{'group':'default','max_age':3600,'endpoints':[\{'url':'https://some.domain.local/a/d/g'\}],'include_subdomains':true\}`, expect: `{'group':'default','max_age':3600,'endpoints':[{'url':'https://some.domain.local/a/d/g'}],'include_subdomains':true}`, }, { input: `{}{}{}{\\\\}\\\\`, expect: `{\\\}\\\\`, }, { input: string([]byte{0x26, 0x00, 0x83, 0x7B, 0x84, 0x07, 0x5C, 0x7D, 0x84}), expect: string([]byte{0x26, 0x00, 0x83, 0x7B, 0x84, 0x07, 0x7D, 0x84}), }, { input: `\\}`, expect: `\}`, }, } { actual := rep.ReplaceAll(tc.input, tc.empty) if actual != tc.expect { t.Errorf("Test %d: '%s': expected '%s' but got '%s'", i, tc.input, tc.expect, actual) } } } func TestReplacerSet(t *testing.T) { rep := testReplacer() for _, tc := range []struct { variable string value any }{ { variable: "test1", value: "val1", }, { variable: "asdf", value: "123", }, { variable: "numbers", value: 123.456, }, { variable: "äöü", value: "öö_äü", }, { variable: "with space", value: "space value", }, { variable: "1", value: "test-123", }, { variable: "mySuper_IP", value: "1.2.3.4", }, { variable: "testEmpty", value: "", }, } { rep.Set(tc.variable, tc.value) // test if key is added if val, ok := rep.static[tc.variable]; ok { if val != tc.value { t.Errorf("Expected value '%s' for key '%s' got '%s'", tc.value, tc.variable, val) } } else { t.Errorf("Expected existing key '%s' found nothing", tc.variable) } } // test if all keys are still there (by length) length := len(rep.static) if len(rep.static) != 8 { t.Errorf("Expected length '%v' got '%v'", 7, length) } } func TestReplacerReplaceKnown(t *testing.T) { rep := Replacer{ mapMutex: &sync.RWMutex{}, providers: []replacementProvider{ // split our possible vars to two functions (to test if both functions are called) ReplacerFunc(func(key string) (val any, ok bool) { switch key { case "test1": return "val1", true case "asdf": return "123", true case "äöü": return "öö_äü", true case "with space": return "space value", true default: return "NOOO", false } }), ReplacerFunc(func(key string) (val any, ok bool) { switch key { case "1": return "test-123", true case "mySuper_IP": return "1.2.3.4", true case "testEmpty": return "", true default: return "NOOO", false } }), }, } for _, tc := range []struct { testInput string expected string }{ { // test vars without space testInput: "{test1}{asdf}{äöü}{1}{with space}{mySuper_IP}", expected: "val1123öö_äütest-123space value1.2.3.4", }, { // test vars with space testInput: "{test1} {asdf} {äöü} {1} {with space} {mySuper_IP} ", expected: "val1 123 öö_äü test-123 space value 1.2.3.4 ", }, { // test with empty val testInput: "{test1} {testEmpty} {asdf} {1} ", expected: "val1 EMPTY 123 test-123 ", }, { // test vars with not finished placeholders testInput: "{te{test1}{as{{df{1}", expected: "{teval1{as{{dftest-123", }, { // test with non existing vars testInput: "{test1} {nope} {1} ", expected: "val1 {nope} test-123 ", }, } { actual := rep.ReplaceKnown(tc.testInput, "EMPTY") // test if all are replaced as expected if actual != tc.expected { t.Errorf("Expected '%s' got '%s' for '%s'", tc.expected, actual, tc.testInput) } } } func TestReplacerDelete(t *testing.T) { rep := Replacer{ mapMutex: &sync.RWMutex{}, static: map[string]any{ "key1": "val1", "key2": "val2", "key3": "val3", "key4": "val4", }, } startLen := len(rep.static) toDel := []string{ "key2", "key4", } for _, key := range toDel { rep.Delete(key) // test if key is removed from static map if _, ok := rep.static[key]; ok { t.Errorf("Expected '%s' to be removed. It is still in static map.", key) } } // check if static slice is smaller expected := startLen - len(toDel) actual := len(rep.static) if len(rep.static) != expected { t.Errorf("Expected length '%v' got length '%v'", expected, actual) } } func TestReplacerMap(t *testing.T) { rep := testReplacer() for i, tc := range []ReplacerFunc{ func(key string) (val any, ok bool) { return "", false }, func(key string) (val any, ok bool) { return "", false }, } { rep.Map(tc) // test if function (which listens on specific key) is added by checking length if len(rep.providers) == i+1 { // check if the last function is the one we just added pTc := fmt.Sprintf("%p", tc) pRep := fmt.Sprintf("%p", rep.providers[i]) if pRep != pTc { t.Errorf("Expected func pointer '%s' got '%s'", pTc, pRep) } } else { t.Errorf("Expected providers length '%v' got length '%v'", i+1, len(rep.providers)) } } } func TestReplacerNew(t *testing.T) { repl := NewReplacer() if len(repl.providers) != 3 { t.Errorf("Expected providers length '%v' got length '%v'", 3, len(repl.providers)) } // test if default global replacements are added as the first provider hostname, _ := os.Hostname() wd, _ := os.Getwd() os.Setenv("CADDY_REPLACER_TEST", "envtest") defer os.Setenv("CADDY_REPLACER_TEST", "") for _, tc := range []struct { variable string value string }{ { variable: "system.hostname", value: hostname, }, { variable: "system.slash", value: string(filepath.Separator), }, { variable: "system.os", value: runtime.GOOS, }, { variable: "system.arch", value: runtime.GOARCH, }, { variable: "system.wd", value: wd, }, { variable: "env.CADDY_REPLACER_TEST", value: "envtest", }, } { if val, ok := repl.providers[0].replace(tc.variable); ok { if val != tc.value { t.Errorf("Expected value '%s' for key '%s' got '%s'", tc.value, tc.variable, val) } } else { t.Errorf("Expected key '%s' to be recognized by first provider", tc.variable) } } // test if file provider is added as the second provider for _, tc := range []struct { variable string value string }{ { variable: "file.caddytest/integration/testdata/foo.txt", value: "foo", }, { variable: "file.caddytest/integration/testdata/foo_with_trailing_newline.txt", value: "foo", }, { variable: "file.caddytest/integration/testdata/foo_with_multiple_trailing_newlines.txt", value: "foo" + getEOL(), }, } { if val, ok := repl.providers[1].replace(tc.variable); ok { if val != tc.value { t.Errorf("Expected value '%s' for key '%s' got '%s'", tc.value, tc.variable, val) } } else { t.Errorf("Expected key '%s' to be recognized by second provider", tc.variable) } } } func getEOL() string { if os.PathSeparator == '\\' { return "\r\n" // Windows EOL } return "\n" // Unix and modern macOS EOL } func TestReplacerNewWithoutFile(t *testing.T) { repl := NewReplacer().WithoutFile() for _, tc := range []struct { variable string value string notFound bool }{ { variable: "file.caddytest/integration/testdata/foo.txt", notFound: true, }, { variable: "system.os", value: runtime.GOOS, }, } { if val, ok := repl.Get(tc.variable); ok && !tc.notFound { if val != tc.value { t.Errorf("Expected value '%s' for key '%s' got '%s'", tc.value, tc.variable, val) } } else if !tc.notFound { t.Errorf("Expected key '%s' to be recognized", tc.variable) } } } func BenchmarkReplacer(b *testing.B) { type testCase struct { name, input, empty string } rep := testReplacer() rep.Set("str", "a string") rep.Set("int", 123.456) for _, bm := range []testCase{ { name: "no placeholder", input: `simple string`, }, { name: "string replacement", input: `str={str}`, }, { name: "int replacement", input: `int={int}`, }, { name: "placeholder", input: `{"json": "object"}`, }, { name: "escaped placeholder", input: `\{"json": \{"nested": "{bar}"\}\}`, }, } { b.Run(bm.name, func(b *testing.B) { for b.Loop() { rep.ReplaceAll(bm.input, bm.empty) } }) } } func testReplacer() Replacer { return Replacer{ providers: make([]replacementProvider, 0), static: make(map[string]any), mapMutex: &sync.RWMutex{}, } } ================================================ FILE: service_windows.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "os" "path/filepath" "golang.org/x/sys/windows/svc" "github.com/caddyserver/caddy/v2/notify" ) func init() { isService, err := svc.IsWindowsService() if err != nil || !isService { return } // Windows services always start in the system32 directory, try to // switch into the directory where the caddy executable is. execPath, err := os.Executable() if err == nil { _ = os.Chdir(filepath.Dir(execPath)) } go func() { _ = svc.Run("", runner{}) }() } type runner struct{} func (runner) Execute(args []string, request <-chan svc.ChangeRequest, status chan<- svc.Status) (bool, uint32) { notify.SetGlobalStatus(status) status <- svc.Status{State: svc.StartPending} for { req := <-request switch req.Cmd { case svc.Interrogate: status <- req.CurrentStatus case svc.Stop, svc.Shutdown: status <- svc.Status{State: svc.StopPending} exitProcessFromSignal("SIGINT") return false, 0 } } } ================================================ FILE: sigtrap.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "context" "os" "os/signal" "go.uber.org/zap" ) // TrapSignals create signal/interrupt handlers as best it can for the // current OS. This is a rather invasive function to call in a Go program // that captures signals already, so in that case it would be better to // implement these handlers yourself. func TrapSignals() { trapSignalsCrossPlatform() trapSignalsPosix() } // trapSignalsCrossPlatform captures SIGINT or interrupt (depending // on the OS), which initiates a graceful shutdown. A second SIGINT // or interrupt will forcefully exit the process immediately. func trapSignalsCrossPlatform() { go func() { shutdown := make(chan os.Signal, 1) signal.Notify(shutdown, os.Interrupt) for i := 0; true; i++ { <-shutdown if i > 0 { Log().Warn("force quit", zap.String("signal", "SIGINT")) os.Exit(ExitCodeForceQuit) } Log().Info("shutting down", zap.String("signal", "SIGINT")) go exitProcessFromSignal("SIGINT") } }() } // exitProcessFromSignal exits the process from a system signal. func exitProcessFromSignal(sigName string) { logger := Log().With(zap.String("signal", sigName)) exitProcess(context.TODO(), logger) } // Exit codes. Generally, you should NOT // automatically restart the process if the // exit code is ExitCodeFailedStartup (1). const ( ExitCodeSuccess = iota ExitCodeFailedStartup ExitCodeForceQuit ExitCodeFailedQuit ) ================================================ FILE: sigtrap_nonposix.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build windows || plan9 || nacl || js package caddy func trapSignalsPosix() {} ================================================ FILE: sigtrap_posix.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !windows && !plan9 && !nacl && !js package caddy import ( "context" "errors" "os" "os/signal" "syscall" "github.com/caddyserver/certmagic" "go.uber.org/zap" ) // trapSignalsPosix captures POSIX-only signals. func trapSignalsPosix() { // Ignore all SIGPIPE signals to prevent weird issues with systemd: https://github.com/dunglas/frankenphp/issues/1020 // Docker/Moby has a similar hack: https://github.com/moby/moby/blob/d828b032a87606ae34267e349bf7f7ccb1f6495a/cmd/dockerd/docker.go#L87-L90 signal.Ignore(syscall.SIGPIPE) go func() { sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGUSR1, syscall.SIGUSR2) for sig := range sigchan { switch sig { case syscall.SIGQUIT: Log().Info("quitting process immediately", zap.String("signal", "SIGQUIT")) certmagic.CleanUpOwnLocks(context.TODO(), Log()) // try to clean up locks anyway, it's important os.Exit(ExitCodeForceQuit) case syscall.SIGTERM: Log().Info("shutting down apps, then terminating", zap.String("signal", "SIGTERM")) exitProcessFromSignal("SIGTERM") case syscall.SIGUSR1: logger := Log().With(zap.String("signal", "SIGUSR1")) // If we know the last source config file/adapter (set when starting // via `caddy run --config --adapter `), attempt // to reload from that source. Otherwise, ignore the signal. file, adapter, reloadCallback := getLastConfig() if file == "" { logger.Info("last config unknown, ignored SIGUSR1") break } logger = logger.With( zap.String("file", file), zap.String("adapter", adapter)) if reloadCallback == nil { logger.Warn("no reload helper available, ignored SIGUSR1") break } logger.Info("reloading config from last-known source") if err := reloadCallback(file, adapter); errors.Is(err, errReloadFromSourceUnavailable) { // No reload helper available (likely not started via caddy run). logger.Warn("reload from source unavailable in this process; ignored SIGUSR1") } else if err != nil { logger.Error("failed to reload config from file", zap.Error(err)) } else { logger.Info("successfully reloaded config from file") } case syscall.SIGUSR2: Log().Info("not implemented", zap.String("signal", "SIGUSR2")) case syscall.SIGHUP: // ignore; this signal is sometimes sent outside of the user's control Log().Info("not implemented", zap.String("signal", "SIGHUP")) } } }() } ================================================ FILE: storage.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "os" "path/filepath" "runtime" "github.com/caddyserver/certmagic" "go.uber.org/zap" ) // StorageConverter is a type that can convert itself // to a valid, usable certmagic.Storage value. (The // value might be short-lived.) This interface allows // us to adapt any CertMagic storage implementation // into a consistent API for Caddy configuration. type StorageConverter interface { CertMagicStorage() (certmagic.Storage, error) } // HomeDir returns the best guess of the current user's home // directory from environment variables. If unknown, "." (the // current directory) is returned instead, except GOOS=android, // which returns "/sdcard". func HomeDir() string { home := homeDirUnsafe() if home == "" && runtime.GOOS == "android" { home = "/sdcard" } if home == "" { home = "." } return home } // homeDirUnsafe is a low-level function that returns // the user's home directory from environment // variables. Careful: if it cannot be determined, an // empty string is returned. If not accounting for // that case, use HomeDir() instead; otherwise you // may end up using the root of the file system. func homeDirUnsafe() string { home := os.Getenv("HOME") if home == "" && runtime.GOOS == "windows" { drive := os.Getenv("HOMEDRIVE") path := os.Getenv("HOMEPATH") home = drive + path if drive == "" || path == "" { home = os.Getenv("USERPROFILE") } } if home == "" && runtime.GOOS == "plan9" { home = os.Getenv("home") } return home } // AppConfigDir returns the directory where to store user's config. // // If XDG_CONFIG_HOME is set, it returns: $XDG_CONFIG_HOME/caddy. // Otherwise, os.UserConfigDir() is used; if successful, it appends // "Caddy" (Windows & Mac) or "caddy" (every other OS) to the path. // If it returns an error, the fallback path "./caddy" is returned. // // The config directory is not guaranteed to be different from // AppDataDir(). // // Unlike os.UserConfigDir(), this function prefers the // XDG_CONFIG_HOME env var on all platforms, not just Unix. // // Ref: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html func AppConfigDir() string { if basedir := os.Getenv("XDG_CONFIG_HOME"); basedir != "" { return filepath.Join(basedir, "caddy") } basedir, err := os.UserConfigDir() if err != nil { Log().Warn("unable to determine directory for user configuration; falling back to current directory", zap.Error(err)) return "./caddy" } subdir := "caddy" switch runtime.GOOS { case "windows", "darwin": subdir = "Caddy" } return filepath.Join(basedir, subdir) } // AppDataDir returns a directory path that is suitable for storing // application data on disk. It uses the environment for finding the // best place to store data, and appends a "caddy" or "Caddy" (depending // on OS and environment) subdirectory. // // For a base directory path: // If XDG_DATA_HOME is set, it returns: $XDG_DATA_HOME/caddy; otherwise, // on Windows it returns: %AppData%/Caddy, // on Mac: $HOME/Library/Application Support/Caddy, // on Plan9: $home/lib/caddy, // on Android: $HOME/caddy, // and on everything else: $HOME/.local/share/caddy. // // If a data directory cannot be determined, it returns "./caddy" // (this is not ideal, and the environment should be fixed). // // The data directory is not guaranteed to be different from AppConfigDir(). // // Ref: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html func AppDataDir() string { if basedir := os.Getenv("XDG_DATA_HOME"); basedir != "" { return filepath.Join(basedir, "caddy") } switch runtime.GOOS { case "windows": appData := os.Getenv("AppData") if appData != "" { return filepath.Join(appData, "Caddy") } case "darwin": home := homeDirUnsafe() if home != "" { return filepath.Join(home, "Library", "Application Support", "Caddy") } case "plan9": home := homeDirUnsafe() if home != "" { return filepath.Join(home, "lib", "caddy") } case "android": home := homeDirUnsafe() if home != "" { return filepath.Join(home, "caddy") } default: home := homeDirUnsafe() if home != "" { return filepath.Join(home, ".local", "share", "caddy") } } return "./caddy" } // ConfigAutosavePath is the default path to which the last config will be persisted. var ConfigAutosavePath = filepath.Join(AppConfigDir(), "autosave.json") // DefaultStorage is Caddy's default storage module. var DefaultStorage = &certmagic.FileStorage{Path: AppDataDir()} ================================================ FILE: usagepool.go ================================================ // Copyright 2015 Matthew Holt and The Caddy Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "fmt" "sync" "sync/atomic" ) // UsagePool is a thread-safe map that pools values // based on usage (reference counting). Values are // only inserted if they do not already exist. There // are two ways to add values to the pool: // // 1. LoadOrStore will increment usage and store the // value immediately if it does not already exist. // 2. LoadOrNew will atomically check for existence // and construct the value immediately if it does // not already exist, or increment the usage // otherwise, then store that value in the pool. // When the constructed value is finally deleted // from the pool (when its usage reaches 0), it // will be cleaned up by calling Destruct(). // // The use of LoadOrNew allows values to be created // and reused and finally cleaned up only once, even // though they may have many references throughout // their lifespan. This is helpful, for example, when // sharing thread-safe io.Writers that you only want // to open and close once. // // There is no way to overwrite existing keys in the // pool without first deleting it as many times as it // was stored. Deleting too many times will panic. // // The implementation does not use a sync.Pool because // UsagePool needs additional atomicity to run the // constructor functions when creating a new value when // LoadOrNew is used. (We could probably use sync.Pool // but we'd still have to layer our own additional locks // on top.) // // An empty UsagePool is NOT safe to use; always call // NewUsagePool() to make a new one. type UsagePool struct { sync.RWMutex pool map[any]*usagePoolVal } // NewUsagePool returns a new usage pool that is ready to use. func NewUsagePool() *UsagePool { return &UsagePool{ pool: make(map[any]*usagePoolVal), } } // LoadOrNew loads the value associated with key from the pool if it // already exists. If the key doesn't exist, it will call construct // to create a new value and then stores that in the pool. An error // is only returned if the constructor returns an error. The loaded // or constructed value is returned. The loaded return value is true // if the value already existed and was loaded, or false if it was // newly constructed. func (up *UsagePool) LoadOrNew(key any, construct Constructor) (value any, loaded bool, err error) { var upv *usagePoolVal up.Lock() upv, loaded = up.pool[key] if loaded { atomic.AddInt32(&upv.refs, 1) up.Unlock() upv.RLock() value = upv.value err = upv.err upv.RUnlock() } else { upv = &usagePoolVal{refs: 1} upv.Lock() up.pool[key] = upv up.Unlock() value, err = construct() if err == nil { upv.value = value } else { upv.err = err up.Lock() // this *should* be safe, I think, because we have a // write lock on upv, but we might also need to ensure // that upv.err is nil before doing this, since we // released the write lock on up during construct... // but then again it's also after midnight... delete(up.pool, key) up.Unlock() } upv.Unlock() } return value, loaded, err } // LoadOrStore loads the value associated with key from the pool if it // already exists, or stores it if it does not exist. It returns the // value that was either loaded or stored, and true if the value already // existed and was loaded, false if the value didn't exist and was stored. func (up *UsagePool) LoadOrStore(key, val any) (value any, loaded bool) { var upv *usagePoolVal up.Lock() upv, loaded = up.pool[key] if loaded { atomic.AddInt32(&upv.refs, 1) up.Unlock() upv.Lock() if upv.err == nil { value = upv.value } else { upv.value = val upv.err = nil } upv.Unlock() } else { upv = &usagePoolVal{refs: 1, value: val} up.pool[key] = upv up.Unlock() value = val } return value, loaded } // Range iterates the pool similarly to how sync.Map.Range() does: // it calls f for every key in the pool, and if f returns false, // iteration is stopped. Ranging does not affect usage counts. // // This method is somewhat naive and acquires a read lock on the // entire pool during iteration, so do your best to make f() really // fast, m'kay? func (up *UsagePool) Range(f func(key, value any) bool) { up.RLock() defer up.RUnlock() for key, upv := range up.pool { upv.RLock() if upv.err != nil { upv.RUnlock() continue } val := upv.value upv.RUnlock() if !f(key, val) { break } } } // Delete decrements the usage count for key and removes the // value from the underlying map if the usage is 0. It returns // true if the usage count reached 0 and the value was deleted. // It panics if the usage count drops below 0; always call // Delete precisely as many times as LoadOrStore. func (up *UsagePool) Delete(key any) (deleted bool, err error) { up.Lock() upv, ok := up.pool[key] if !ok { up.Unlock() return false, nil } refs := atomic.AddInt32(&upv.refs, -1) if refs == 0 { delete(up.pool, key) up.Unlock() upv.RLock() val := upv.value upv.RUnlock() if destructor, ok := val.(Destructor); ok { err = destructor.Destruct() } deleted = true } else { up.Unlock() if refs < 0 { panic(fmt.Sprintf("deleted more than stored: %#v (usage: %d)", upv.value, upv.refs)) } } return deleted, err } // References returns the number of references (count of usages) to a // key in the pool, and true if the key exists, or false otherwise. func (up *UsagePool) References(key any) (int, bool) { up.RLock() upv, loaded := up.pool[key] up.RUnlock() if loaded { // I wonder if it'd be safer to read this value during // our lock on the UsagePool... guess we'll see... refs := atomic.LoadInt32(&upv.refs) return int(refs), true } return 0, false } // Constructor is a function that returns a new value // that can destruct itself when it is no longer needed. type Constructor func() (Destructor, error) // Destructor is a value that can clean itself up when // it is deallocated. type Destructor interface { Destruct() error } type usagePoolVal struct { refs int32 // accessed atomically; must be 64-bit aligned for 32-bit systems value any err error sync.RWMutex }