diff --git a/CODEOWNERS b/CODEOWNERS index 0cfa49acea6f..7c22f6e5ab36 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -103,17 +103,17 @@ extensions/filters/common/original_src @klarose @mattklein123 # attribute context /*/extensions/filters/common/expr @kyessenov @yangminzhu @tyxia # webassembly access logger extensions -/*/extensions/access_loggers/wasm @mpwarres @lizan @UNOWNED +/*/extensions/access_loggers/wasm @mpwarres @kyessenov @lizan # webassembly bootstrap extensions -/*/extensions/bootstrap/wasm @mpwarres @lizan @UNOWNED +/*/extensions/bootstrap/wasm @mpwarres @kyessenov @lizan # webassembly http extensions -/*/extensions/filters/http/wasm @mpwarres @lizan @UNOWNED +/*/extensions/filters/http/wasm @mpwarres @kyessenov @lizan # webassembly network extensions -/*/extensions/filters/network/wasm @mpwarres @lizan @UNOWNED +/*/extensions/filters/network/wasm @mpwarres @kyessenov @lizan # webassembly common extension -/*/extensions/common/wasm @mpwarres @lizan @UNOWNED +/*/extensions/common/wasm @mpwarres @kyessenov @lizan # webassembly runtimes -/*/extensions/wasm_runtime/ @mpwarres @lizan @UNOWNED +/*/extensions/wasm_runtime/ @mpwarres @kyessenov @lizan # common matcher /*/extensions/common/matcher @mattklein123 @yangminzhu /*/extensions/common/proxy_protocol @alyssawilk @wez470 @@ -137,7 +137,7 @@ extensions/filters/common/original_src @klarose @mattklein123 /*/extensions/stat_sinks/metrics_service @ramaraochavali @jmarantz /*/extensions/stat_sinks/open_telemetry @ohadvano @mattklein123 # webassembly stat-sink extensions -/*/extensions/stat_sinks/wasm @mpwarres @lizan @UNOWNED +/*/extensions/stat_sinks/wasm @mpwarres @kyessenov @lizan /*/extensions/resource_monitors/injected_resource @eziskind @yanavlasov /*/extensions/resource_monitors/common @eziskind @yanavlasov @nezdolik /*/extensions/resource_monitors/fixed_heap @eziskind @yanavlasov @nezdolik diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index a7738a68bc45..b0cb3c9b84fa 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -144,11 +144,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "buf", project_desc = "A new way of working with Protocol Buffers.", # Used for breaking change detection in API protobufs project_url = "https://buf.build", - version = "1.42.0", - sha256 = "412c8bdc2a4361f796df59735eb8b8f1cb85f7bfa91f443e471bf0b090d7c6c2", + version = "1.45.0", + sha256 = "deebd48a6bf85b073d7c7800c17b330376487e86852d4905c76a205b6fd795d4", strip_prefix = "buf", urls = ["https://github.com/bufbuild/buf/releases/download/v{version}/buf-Linux-x86_64.tar.gz"], - release_date = "2024-09-18", + release_date = "2024-10-08", use_category = ["api"], license = "Apache-2.0", license_url = "https://github.com/bufbuild/buf/blob/v{version}/LICENSE", diff --git a/bazel/BUILD b/bazel/BUILD index 011b1c88a334..e46a88f255f2 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -14,7 +14,6 @@ envoy_package() exports_files([ "gen_sh_test_runner.sh", - "generate_release_hash.sh", "sh_test_wrapper.sh", "test_for_benchmark_wrapper.sh", "repository_locations.bzl", diff --git a/bazel/generate_release_hash.sh b/bazel/generate_release_hash.sh deleted file mode 100755 index d866c26a0038..000000000000 --- a/bazel/generate_release_hash.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -set -e -o pipefail - -ENVOY_SRCDIR="${1}" - -if [[ ! -e "$ENVOY_SRCDIR" ]]; then - echo "Unable to find Envoy src dir: ${ENVOY_SRCDIR}" >&2 - exit 1 -fi - -git -C "$ENVOY_SRCDIR" fetch --tags - -git -C "$ENVOY_SRCDIR" tag --list 'v[0-9]*.[0-9]*.[0-9]*' \ - | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' \ - | sort -u \ - | sha256sum diff --git a/bazel/protobuf.patch b/bazel/protobuf.patch index c07b5ce9d83f..fe88b6f41d77 100644 --- a/bazel/protobuf.patch +++ b/bazel/protobuf.patch @@ -1,11 +1,11 @@ diff --git a/BUILD.bazel b/BUILD.bazel -index 0f6e41e3a..c0d2bbccf 100644 +index 42175b5ed..8b313e0f6 100644 --- a/BUILD.bazel +++ b/BUILD.bazel -@@ -454,14 +454,79 @@ cc_library( +@@ -222,14 +222,79 @@ alias( visibility = ["//visibility:public"], ) - + +# Envoy: Patch + cc_binary( @@ -16,7 +16,7 @@ index 0f6e41e3a..c0d2bbccf 100644 visibility = ["//visibility:public"], deps = ["//src/google/protobuf/compiler:protoc_lib"], ) - + +# Lifted from `rules_proto` +config_setting( + name = "linux-aarch_64", @@ -84,13 +84,13 @@ index 0f6e41e3a..c0d2bbccf 100644 name = "protoc_static", copts = COPTS, diff --git a/python/google/protobuf/__init__.py b/python/google/protobuf/__init__.py -index e7555ee10..a93beb1c5 100644 +index 022974824..418779fd2 100755 --- a/python/google/protobuf/__init__.py +++ b/python/google/protobuf/__init__.py @@ -8,3 +8,10 @@ # Copyright 2007 Google Inc. All Rights Reserved. - - __version__ = '5.26.1' + + __version__ = '5.28.2' + + +if __name__ != '__main__': @@ -98,10 +98,24 @@ index e7555ee10..a93beb1c5 100644 + __import__('pkg_resources').declare_namespace(__name__) + except ImportError: + __path__ = __import__('pkgutil').extend_path(__path__, __name__) +diff --git a/src/google/protobuf/compiler/BUILD.bazel b/src/google/protobuf/compiler/BUILD.bazel +index 3f5624d5f..e194b172f 100644 +--- a/src/google/protobuf/compiler/BUILD.bazel ++++ b/src/google/protobuf/compiler/BUILD.bazel +@@ -517,7 +517,7 @@ cc_library( + srcs = ["retention.cc"], + hdrs = ["retention.h"], + strip_include_prefix = "/src", +- visibility = ["//src/google/protobuf:__subpackages__"], ++ visibility = ["//visibility:public"], + deps = [ + "//src/google/protobuf", + "//src/google/protobuf:port", diff --git a/src/google/protobuf/io/BUILD.bazel b/src/google/protobuf/io/BUILD.bazel +index 66819966b..446d75420 100644 --- a/src/google/protobuf/io/BUILD.bazel +++ b/src/google/protobuf/io/BUILD.bazel -@@ -138,6 +138,6 @@ cc_library( +@@ -158,7 +158,7 @@ cc_library( "@com_google_absl//absl/log:absl_log", ] + select({ "//build_defs:config_msvc": [], @@ -109,11 +123,12 @@ diff --git a/src/google/protobuf/io/BUILD.bazel b/src/google/protobuf/io/BUILD.b + "//conditions:default": ["@envoy//bazel/foreign_cc:zlib"], }), ) - + diff --git a/src/google/protobuf/port_def.inc b/src/google/protobuf/port_def.inc ---- a/src/google/protobuf/port_def.inc 2023-06-27 01:17:34.917105764 +0000 -+++ b/src/google/protobuf/port_def.inc 2023-06-27 01:18:12.069060142 +0000 -@@ -1004,7 +1004,7 @@ +index 37d80e591..3dddd4ecf 100644 +--- a/src/google/protobuf/port_def.inc ++++ b/src/google/protobuf/port_def.inc +@@ -856,7 +856,7 @@ static_assert(PROTOBUF_ABSL_MIN(20230125, 3), #pragma clang diagnostic ignored "-Wshorten-64-to-32" // Turn on -Wdeprecated-enum-enum-conversion. This deprecation comes in C++20 // via http://wg21.link/p1120r0. @@ -122,27 +137,10 @@ diff --git a/src/google/protobuf/port_def.inc b/src/google/protobuf/port_def.inc // This error has been generally flaky, but we need to disable it specifically // to fix https://github.com/protocolbuffers/protobuf/issues/12313 #pragma clang diagnostic ignored "-Wunused-parameter" -diff --git a/src/google/protobuf/compiler/BUILD.bazel b/src/google/protobuf/compiler/BUILD.bazel ---- a/src/google/protobuf/compiler/BUILD.bazel -+++ b/src/google/protobuf/compiler/BUILD.bazel -@@ -306,7 +306,7 @@ - srcs = ["retention.cc"], - hdrs = ["retention.h"], - strip_include_prefix = "/src", -- visibility = ["//src/google/protobuf:__subpackages__"], -+ visibility = ["//visibility:public"], - deps = [ - "//src/google/protobuf", - "//src/google/protobuf:port", - -diff --git a/src/google/protobuf/port_def.inc b/src/google/protobuf/port_def.inc -index 1c6a24945..c27d0bf2a 100644 ---- a/src/google/protobuf/port_def.inc -+++ b/src/google/protobuf/port_def.inc -@@ -1062,6 +1062,9 @@ static_assert(PROTOBUF_ABSL_MIN(20230125, 3), +@@ -925,6 +925,9 @@ static_assert(PROTOBUF_ABSL_MIN(20230125, 3), #pragma warning(disable: 4125) #endif - + +#if defined(__GNUC__) +#pragma GCC diagnostic ignored "-Wundef" +#endif diff --git a/bazel/repo.bzl b/bazel/repo.bzl index a4e421cf116a..cdc3ee7bd53b 100644 --- a/bazel/repo.bzl +++ b/bazel/repo.bzl @@ -66,9 +66,27 @@ envoy_entry_point( init_data = [":__init__.py"], ) +genrule( + name = "generate_release_hash_bin", + outs = ["generate_release_hash.sh"], + cmd = """ + echo " +#!/usr/bin/env bash + +set -e -o pipefail + +git ls-remote --tags https://github.com/envoyproxy/envoy \\\\ + | grep -E 'refs/tags/v[0-9]+\\\\.[0-9]+\\\\.[0-9]+$$' \\\\ + | sort -u \\\\ + | sha256sum \\\\ + | cut -d ' ' -f 1" > $@ + chmod +x $@ + """ +) + sh_binary( name = "generate_release_hash", - srcs = ["generate_release_hash.sh"], + srcs = [":generate_release_hash_bin"], visibility = ["//visibility:public"], ) @@ -80,11 +98,11 @@ genrule( name = "default_release_hash", outs = ["default_release_hash.txt"], cmd = """ - $(location @envoy//bazel:generate_release_hash.sh) %s > $@ - """ % PATH, + $(location :generate_release_hash) > $@ + """, stamp = True, tags = ["no-remote-exec"], - tools = ["@envoy//bazel:generate_release_hash.sh"], + tools = [":generate_release_hash"], ) label_flag( diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 8a65e300d1e4..9a6aae232500 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -1,6 +1,6 @@ # This should match the schema defined in external_deps.bzl. -PROTOBUF_VERSION = "26.1" +PROTOBUF_VERSION = "28.2" # These names of these deps *must* match the names used in `/bazel/protobuf.patch`, # and both must match the names from the protobuf releases (see @@ -8,11 +8,11 @@ PROTOBUF_VERSION = "26.1" # The names change in upcoming versions. # The shas are calculated from the downloads on the releases page. PROTOC_VERSIONS = dict( - linux_aarch_64 = "64a3b3b5f7dac0c8f9cf1cb85b2b1a237eb628644f6bcb0fb8f23db6e0d66181", - linux_x86_64 = "a7be2928c0454f132c599e25b79b7ad1b57663f2337d7f7e468a1d59b98ec1b0", - osx_aarch_64 = "26a29befa8891ecc48809958c909d284f2b9539a2eb47f22cadc631fe6abe8fd", - osx_x86_64 = "febd8821c3a2a23f72f4641471e0ab6486f4fb07b68111490a27a31681465b3c", - win64 = "9090d135a1159042b13b4e51b210e40cb820d85a5032a6eca5f9b3ca3bdfb539", + linux_aarch_64 = "91d8253cdc0f0f0fc51c2b69c80677996632f525ad84504bfa5b4ee38ad3e49c", + linux_x86_64 = "2febfd42b59ce93a28eb789019a470a3dd0449619bc04f84dad1333da261dec1", + osx_aarch_64 = "7bb048f52841789d9ec61983be0ce4c9e4fb3bd9a143462820ba9a3be0a03797", + osx_x86_64 = "232f07d12bf4806207a79ec2c7378301c52e6f2f7efdd21c0dd416f0bda103ec", + win64 = "4bde19271ed7cab9003570f28c6e4c4d71963eaf1211a86bf3bb25d9b895177a", ) REPOSITORY_LOCATIONS_SPEC = dict( @@ -33,11 +33,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Bazel features", project_desc = "Support Bazel feature detection from starlark", project_url = "https://github.com/bazel-contrib/bazel_features", - version = "1.17.0", - sha256 = "bdc12fcbe6076180d835c9dd5b3685d509966191760a0eb10b276025fcb76158", + version = "1.19.0", + sha256 = "3646ffd447753490b77d2380fa63f4d55dd9722e565d84dfda01536b48e183da", urls = ["https://github.com/bazel-contrib/bazel_features/releases/download/v{version}/bazel_features-v{version}.tar.gz"], strip_prefix = "bazel_features-{version}", - release_date = "2024-09-13", + release_date = "2024-10-09", use_category = ["build"], license = "Apache-2.0", license_url = "https://github.com/bazel-contrib/bazel_features/blob/v{version}/LICENSE", @@ -73,10 +73,10 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Apple Rules for Bazel", project_desc = "Bazel rules for Apple platforms", project_url = "https://github.com/bazelbuild/rules_apple", - version = "3.9.0", - sha256 = "f8fa96115c33e128cb72e9b7118a5f9294731a7dda8e36d04ddb582671f48dc1", + version = "3.9.2", + sha256 = "86025f64d723a66438787d089bea4a7bc387877229f927dcb72ee26a8db96917", urls = ["https://github.com/bazelbuild/rules_apple/releases/download/{version}/rules_apple.{version}.tar.gz"], - release_date = "2024-09-18", + release_date = "2024-09-24", use_category = ["build"], license = "Apache-2.0", license_url = "https://github.com/bazelbuild/rules_apple/blob/{version}/LICENSE", @@ -167,12 +167,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Aspect Bazel helpers", project_desc = "Base Starlark libraries and basic Bazel rules which are useful for constructing rulesets and BUILD files", project_url = "https://github.com/aspect-build/bazel-lib", - version = "2.9.0", - sha256 = "04299d5460ef8ed92f1251d468a3c1ce746f9f3003047c728383c42048950cb5", + version = "2.9.2", + sha256 = "c96a40d7d054950812df4ee4bcb96c424ddbb9d044bc6bcac11468f25e0193d4", strip_prefix = "bazel-lib-{version}", urls = ["https://github.com/aspect-build/bazel-lib/archive/v{version}.tar.gz"], use_category = ["build"], - release_date = "2024-09-19", + release_date = "2024-10-15", cpe = "N/A", license = "Apache-2.0", license_url = "https://github.com/aspect-build/bazel-lib/blob/v{version}/LICENSE", @@ -240,12 +240,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Perfetto", project_desc = "Perfetto Tracing SDK", project_url = "https://perfetto.dev/", - version = "47.0", - sha256 = "9bbd38a0f074038bde6ccbcf5f2ff32587eb60faec254932268ecb6f17f18186", + version = "48.1", + sha256 = "8d1c6bf44f1bdb098ab70cd60da3ce6b6e731e4eb21dd52b2527cbdcf85d984d", strip_prefix = "perfetto-{version}/sdk", urls = ["https://github.com/google/perfetto/archive/v{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - release_date = "2024-08-07", + release_date = "2024-10-15", cpe = "N/A", license = "Apache-2.0", license_url = "https://github.com/google/perfetto/blob/v{version}/LICENSE", @@ -591,8 +591,8 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "OpenTelemetry", project_desc = "Observability framework and toolkit designed to create and manage telemetry data such as traces, metrics, and logs.", project_url = "https://opentelemetry.io", - version = "1.16.1", - sha256 = "b8a78bb2a3a78133dbb08bcd04342f4b1e03cb4a19079b8416d408d905fffc37", + version = "1.17.0", + sha256 = "13542725463f1ea106edaef078c2276065cf3da998cb1d3dcf92630daa3f64d4", strip_prefix = "opentelemetry-cpp-{version}", urls = ["https://github.com/open-telemetry/opentelemetry-cpp/archive/refs/tags/v{version}.tar.gz"], use_category = ["observability_ext"], @@ -601,7 +601,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.tracers.opentelemetry.samplers.always_on", "envoy.tracers.opentelemetry.samplers.dynatrace", ], - release_date = "2024-07-17", + release_date = "2024-10-07", cpe = "N/A", license = "Apache-2.0", license_url = "https://github.com/open-telemetry/opentelemetry-cpp/blob/v{version}/LICENSE", @@ -952,11 +952,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( # test/common/json:gen_excluded_unicodes to recompute the ranges # excluded from differential fuzzing that are populated in # test/common/json/json_sanitizer_test_util.cc. - sha256 = "4fc5ff1b2c339fb86cd3a25f0b5311478ab081e65ad258c6789359cd84d421f8", + sha256 = "b2340aa47faf7ef10a0328190319d3f3bee1b24f426d4ce8f4253b6f27ce16db", strip_prefix = "protobuf-{version}", urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v{version}/protobuf-{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - release_date = "2024-03-27", + release_date = "2024-09-18", cpe = "cpe:2.3:a:google:protobuf:*", license = "Protocol Buffers", license_url = "https://github.com/protocolbuffers/protobuf/blob/v{version}/LICENSE", @@ -1469,8 +1469,8 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Bazel rust rules", project_desc = "Bazel rust rules (used by Wasm)", project_url = "https://github.com/bazelbuild/rules_rust", - version = "0.51.0", - sha256 = "042acfb73469b2d1848fe148d81c3422c61ea47a9e1900f1c9ec36f51e8e7193", + version = "0.52.2", + sha256 = "671ddb3fe5ebcf9dd34d051eca7352fbaf33fa53bf61eed0b75a4c34829e5480", # Note: rules_rust should point to the releases, not archive to avoid the hassle of bootstrapping in crate_universe. # This is described in https://bazelbuild.github.io/rules_rust/crate_universe.html#setup, otherwise bootstrap # is required which in turn requires a system CC toolchains, not the bazel controlled ones. @@ -1482,7 +1482,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( ], implied_untracked_deps = ["rules_cc"], extensions = ["envoy.wasm.runtime.wasmtime"], - release_date = "2024-09-19", + release_date = "2024-10-07", cpe = "N/A", license = "Apache-2.0", license_url = "https://github.com/bazelbuild/rules_rust/blob/{version}/LICENSE.txt", @@ -1592,7 +1592,7 @@ def _compiled_protoc_deps(locations, versions): sha256 = sha, urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v{version}/protoc-{version}-%s.zip" % platform.replace("_", "-", 1)], use_category = ["dataplane_core", "controlplane"], - release_date = "2024-03-27", + release_date = "2024-09-18", cpe = "N/A", license = "Protocol Buffers", license_url = "https://github.com/protocolbuffers/protobuf/blob/v{version}/LICENSE", diff --git a/changelogs/current.yaml b/changelogs/current.yaml index 5c32d59ef138..03172d327c5e 100644 --- a/changelogs/current.yaml +++ b/changelogs/current.yaml @@ -5,12 +5,28 @@ behavior_changes: minor_behavior_changes: # *Changes that may cause incompatibilities for some users, but should not for most* +- area: access_log + change: | + New implementation of the JSON formatter will be enabled by default. + The :ref:`sort_properties ` field will + be ignored in the new implementation because the new implementation always sorts properties. And the new implementation + will always keep the value type in the JSON output. For example, the duration field will always be rendered as a number + instead of a string. + This behavior change could be disabled temporarily by setting the runtime + ``envoy.reloadable_features.logging_with_fast_json_formatter`` to false. +- area: formatter + change: | + The NaN and Infinity values of float will be serialized to ``null`` and ``"inf"`` respectively in the + metadata (``DYNAMIC_METADATA``, ``CLUSTER_METADATA``, etc.) formatter. bug_fixes: # *Changes expected to improve the state of the world and are unlikely to have negative effects* removed_config_or_runtime: # *Normally occurs at the end of the* :ref:`deprecation period ` +- area: upstream + change: | + Removed runtime flag ``envoy.restart_features.allow_client_socket_creation_failure`` and legacy code paths. new_features: - area: wasm diff --git a/docs/root/_static/cache-filter-chain.svg b/docs/root/_static/cache-filter-chain.svg new file mode 100644 index 000000000000..fe54227819ff --- /dev/null +++ b/docs/root/_static/cache-filter-chain.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/configuration/http/http_filters/cache_filter.rst b/docs/root/configuration/http/http_filters/cache_filter.rst index 32aa6b24d16e..68deed12bb0a 100644 --- a/docs/root/configuration/http/http_filters/cache_filter.rst +++ b/docs/root/configuration/http/http_filters/cache_filter.rst @@ -7,12 +7,24 @@ Cache filter * :ref:`v3 API reference ` * :ref:`v3 SimpleHTTPCache API reference ` * This filter doesn't support virtual host-specific configurations. +* When the cache is enabled, cacheable requests are only sent through filters in the + :ref:`upstream_http_filters ` + chain and *not* through any filters in the regular filter chain that are further + upstream than the cache filter, while non-cacheable requests still go through the + listener filter chain. It is therefore recommended for consistency that only the + router filter should be further upstream in the listener filter chain than the + cache filter. + +.. image:: /_static/cache-filter-chain.svg + :width: 80% + :align: center The HTTP Cache filter implements most of the complexity of HTTP caching semantics. For HTTP Requests: -* HTTP Cache respects request's ``Cache-Control`` directive. For example, if request comes with ``Cache-Control: no-store`` the request won't be cached. +* HTTP Cache respects request's ``Cache-Control`` directive. For example, if request comes with ``Cache-Control: no-store`` the request won't be cached, unless + :ref:`ignore_request_cache_control_header ` is true. * HTTP Cache wont store HTTP HEAD Requests. For HTTP Responses: diff --git a/mobile/library/cc/engine_builder.cc b/mobile/library/cc/engine_builder.cc index 26fafc8188b7..572c8b6cfda8 100644 --- a/mobile/library/cc/engine_builder.cc +++ b/mobile/library/cc/engine_builder.cc @@ -646,6 +646,8 @@ std::unique_ptr EngineBuilder::generate h2_options->mutable_connection_keepalive()->mutable_timeout()->set_seconds( h2_connection_keepalive_timeout_seconds_); h2_options->mutable_max_concurrent_streams()->set_value(100); + h2_options->mutable_initial_stream_window_size()->set_value(initial_stream_window_size_); + h2_options->mutable_initial_connection_window_size()->set_value(initial_connection_window_size_); envoy::extensions::http::header_formatters::preserve_case::v3::PreserveCaseFormatterConfig preserve_case_config; @@ -718,6 +720,16 @@ std::unique_ptr EngineBuilder::generate ->mutable_http3_protocol_options() ->mutable_quic_protocol_options() ->set_client_connection_options(http3_client_connection_options_); + alpn_options.mutable_auto_config() + ->mutable_http3_protocol_options() + ->mutable_quic_protocol_options() + ->mutable_initial_stream_window_size() + ->set_value(initial_stream_window_size_); + alpn_options.mutable_auto_config() + ->mutable_http3_protocol_options() + ->mutable_quic_protocol_options() + ->mutable_initial_connection_window_size() + ->set_value(initial_connection_window_size_); alpn_options.mutable_auto_config()->mutable_alternate_protocols_cache_options()->set_name( "default_alternate_protocols_cache"); for (const auto& [host, port] : quic_hints_) { @@ -841,10 +853,6 @@ std::unique_ptr EngineBuilder::generate use_gro_if_available_); ProtobufWkt::Struct& restart_features = *(*runtime_values.mutable_fields())["restart_features"].mutable_struct_value(); - // TODO(abeyad): This runtime flag is set because https://github.com/envoyproxy/envoy/pull/32370 - // needed to be merged with the default off due to unresolved test issues. Once those are fixed, - // and the default for `allow_client_socket_creation_failure` is true, we can remove this. - (*restart_features.mutable_fields())["allow_client_socket_creation_failure"].set_bool_value(true); for (auto& guard_and_value : restart_runtime_guards_) { (*restart_features.mutable_fields())[guard_and_value.first].set_bool_value( guard_and_value.second); diff --git a/mobile/library/cc/engine_builder.h b/mobile/library/cc/engine_builder.h index 0decc13dd788..6d67acf504c7 100644 --- a/mobile/library/cc/engine_builder.h +++ b/mobile/library/cc/engine_builder.h @@ -205,7 +205,10 @@ class EngineBuilder { // https://source.chromium.org/chromium/chromium/src/+/main:net/quic/quic_session_pool.cc;l=790-793;drc=7f04a8e033c23dede6beae129cd212e6d4473d72 // https://source.chromium.org/chromium/chromium/src/+/main:net/third_party/quiche/src/quiche/quic/core/quic_constants.h;l=43-47;drc=34ad7f3844f882baf3d31a6bc6e300acaa0e3fc8 int32_t udp_socket_send_buffer_size_ = 1452 * 20; - + // These are the same values Cronet uses for QUIC: + // https://source.chromium.org/chromium/chromium/src/+/main:net/quic/quic_context.cc;l=21-22;drc=6849bf6b37e96bd1c38a5f77f7deaa28b53779c4;bpv=1;bpt=1 + const uint32_t initial_stream_window_size_ = 6 * 1024 * 1024; // 6MB + const uint32_t initial_connection_window_size_ = 15 * 1024 * 1024; // 15MB int quic_connection_idle_timeout_seconds_ = 30; }; diff --git a/mobile/test/cc/unit/envoy_config_test.cc b/mobile/test/cc/unit/envoy_config_test.cc index f0302a298d12..3d3f161bc8a8 100644 --- a/mobile/test/cc/unit/envoy_config_test.cc +++ b/mobile/test/cc/unit/envoy_config_test.cc @@ -111,12 +111,12 @@ TEST(TestConfig, ConfigIsApplied) { "key: \"always_use_v6\" value { bool_value: true }", "key: \"prefer_quic_client_udp_gro\" value { bool_value: true }", "key: \"test_feature_false\" value { bool_value: true }", - "key: \"allow_client_socket_creation_failure\" value { bool_value: true }", "key: \"device_os\" value { string_value: \"probably-ubuntu-on-CI\" } }", "key: \"app_version\" value { string_value: \"1.2.3\" } }", "key: \"app_id\" value { string_value: \"1234-1234-1234\" } }", "validation_context { trusted_ca {", - }; + "initial_stream_window_size { value: 6291456 }", + "initial_connection_window_size { value: 15728640 }"}; for (const auto& string : must_contain) { EXPECT_THAT(config_str, HasSubstr(string)) << "'" << string << "' not found in " << config_str; diff --git a/source/common/formatter/BUILD b/source/common/formatter/BUILD index 76a0fc474cfe..57d08475ef2a 100644 --- a/source/common/formatter/BUILD +++ b/source/common/formatter/BUILD @@ -27,6 +27,7 @@ envoy_cc_library( "//source/common/http:utility_lib", "//source/common/json:json_loader_lib", "//source/common/json:json_streamer_lib", + "//source/common/json:json_utility_lib", "@com_google_absl//absl/strings:str_format", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], @@ -101,6 +102,7 @@ envoy_cc_library( "//source/common/grpc:common_lib", "//source/common/http:utility_lib", "//source/common/json:json_loader_lib", + "//source/common/json:json_utility_lib", "//source/common/protobuf:message_validator_lib", "//source/common/stream_info:utility_lib", ], diff --git a/source/common/formatter/stream_info_formatter.cc b/source/common/formatter/stream_info_formatter.cc index 5ff679a731fd..5463da936071 100644 --- a/source/common/formatter/stream_info_formatter.cc +++ b/source/common/formatter/stream_info_formatter.cc @@ -3,6 +3,7 @@ #include "source/common/common/random_generator.h" #include "source/common/config/metadata.h" #include "source/common/http/utility.h" +#include "source/common/json/json_utility.h" #include "source/common/runtime/runtime_features.h" #include "source/common/stream_info/utility.h" @@ -61,20 +62,11 @@ MetadataFormatter::formatMetadata(const envoy::config::core::v3::Metadata& metad } std::string str; + str.reserve(256); if (value.kind_case() == ProtobufWkt::Value::kStringValue) { str = value.string_value(); } else { -#ifdef ENVOY_ENABLE_YAML - absl::StatusOr json_or_error = - MessageUtil::getJsonStringFromMessage(value, false, true); - if (json_or_error.ok()) { - str = json_or_error.value(); - } else { - str = json_or_error.status().message(); - } -#else - IS_ENVOY_BUG("Json support compiled out"); -#endif + Json::Utility::appendValueToString(value, str); } SubstitutionFormatUtils::truncate(str, max_length_); return str; diff --git a/source/common/formatter/substitution_formatter.h b/source/common/formatter/substitution_formatter.h index 2e04904423d5..e2480d1a2f1a 100644 --- a/source/common/formatter/substitution_formatter.h +++ b/source/common/formatter/substitution_formatter.h @@ -16,6 +16,7 @@ #include "source/common/formatter/http_formatter_context.h" #include "source/common/json/json_loader.h" #include "source/common/json/json_streamer.h" +#include "source/common/json/json_utility.h" #include "absl/types/optional.h" #include "re2/re2.h" @@ -439,7 +440,8 @@ class JsonFormatterImplBase : public FormatterBase { } else { // 3. Handle the formatter element with a single provider and value // type needs to be kept. - typedValueToLogLine(formatters, context, info, serializer); + auto value = formatters[0]->formatValueWithContext(context, info); + Json::Utility::appendValueToString(value, log_line); } } @@ -467,48 +469,6 @@ class JsonFormatterImplBase : public FormatterBase { serializer.addRawString(Json::Constants::DoubleQuote); // End the JSON string. } - void typedValueToLogLine(const Formatters& formatters, const FormatterContext& context, - const StreamInfo::StreamInfo& info, - JsonStringSerializer& serializer) const { - - const ProtobufWkt::Value value = formatters[0]->formatValueWithContext(context, info); - - switch (value.kind_case()) { - case ProtobufWkt::Value::KIND_NOT_SET: - case ProtobufWkt::Value::kNullValue: - serializer.addNull(); - break; - case ProtobufWkt::Value::kNumberValue: - serializer.addNumber(value.number_value()); - break; - case ProtobufWkt::Value::kStringValue: - serializer.addString(value.string_value()); - break; - case ProtobufWkt::Value::kBoolValue: - serializer.addBool(value.bool_value()); - break; - case ProtobufWkt::Value::kStructValue: - case ProtobufWkt::Value::kListValue: - // Convert the struct or list to string. This may result in a performance - // degradation. But We rarely hit this case. - // Basically only metadata or filter state formatter may hit this case. -#ifdef ENVOY_ENABLE_YAML - absl::StatusOr json_or = - MessageUtil::getJsonStringFromMessage(value, false, true); - if (json_or.ok()) { - // We assume the output of getJsonStringFromMessage is a valid JSON string piece. - serializer.addRawString(json_or.value()); - } else { - serializer.addString(json_or.status().ToString()); - } -#else - IS_ENVOY_BUG("Json support compiled out"); - serializer.addRawString(R"EOF("Json support compiled out")EOF"); -#endif - break; - } - } - const std::string empty_value_; using ParsedFormatElement = absl::variant; diff --git a/source/common/network/socket_interface_impl.cc b/source/common/network/socket_interface_impl.cc index aadd87857f55..e3e01ffaf38e 100644 --- a/source/common/network/socket_interface_impl.cc +++ b/source/common/network/socket_interface_impl.cc @@ -93,14 +93,8 @@ IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, Address::Type const Api::SysCallSocketResult result = Api::OsSysCallsSingleton::get().socket(domain, flags, protocol); if (!SOCKET_VALID(result.return_value_)) { - if (Runtime::runtimeFeatureEnabled( - "envoy.restart_features.allow_client_socket_creation_failure")) { - IS_ENVOY_BUG(fmt::format("socket(2) failed, got error: {}", errorDetails(result.errno_))); - return nullptr; - } else { - RELEASE_ASSERT(!SOCKET_VALID(result.return_value_), - fmt::format("socket(2) failed, got error: {}", errorDetails(result.errno_))); - } + IS_ENVOY_BUG(fmt::format("socket(2) failed, got error: {}", errorDetails(result.errno_))); + return nullptr; } IoHandlePtr io_handle = makeSocket(result.return_value_, socket_v6only, domain, options); @@ -108,14 +102,8 @@ IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, Address::Type // Cannot set SOCK_NONBLOCK as a ::socket flag. const int rc = io_handle->setBlocking(false).return_value_; if (SOCKET_FAILURE(result.return_value_)) { - if (Runtime::runtimeFeatureEnabled( - "envoy.restart_features.allow_client_socket_creation_failure")) { - IS_ENVOY_BUG(fmt::format("Unable to set socket non-blocking: got error: {}", rc)); - return nullptr; - } else { - RELEASE_ASSERT(SOCKET_FAILURE(rc), - fmt::format("Unable to set socket non-blocking: got error: {}", rc)); - } + IS_ENVOY_BUG(fmt::format("Unable to set socket non-blocking: got error: {}", rc)); + return nullptr; } #endif diff --git a/source/common/protobuf/deterministic_hash.cc b/source/common/protobuf/deterministic_hash.cc index a1c500609e06..62fa7a574515 100644 --- a/source/common/protobuf/deterministic_hash.cc +++ b/source/common/protobuf/deterministic_hash.cc @@ -200,7 +200,7 @@ uint64_t reflectionHashMessage(const Protobuf::Message& message, uint64_t seed) const Protobuf::Descriptor* descriptor = message.GetDescriptor(); seed = HashUtil::xxHash64(descriptor->full_name(), seed); if (descriptor->well_known_type() == Protobuf::Descriptor::WELLKNOWNTYPE_ANY) { - const ProtobufWkt::Any* any = Protobuf::DynamicCastToGenerated(&message); + const ProtobufWkt::Any* any = Protobuf::DynamicCastMessage(&message); ASSERT(any != nullptr, "casting to any should always work for WELLKNOWNTYPE_ANY"); std::unique_ptr submsg = unpackAnyForReflection(*any); if (submsg == nullptr) { diff --git a/source/common/protobuf/protobuf.h b/source/common/protobuf/protobuf.h index 1658cda618d1..7fdc1c2440b9 100644 --- a/source/common/protobuf/protobuf.h +++ b/source/common/protobuf/protobuf.h @@ -69,7 +69,7 @@ using ::google::protobuf::BytesValue; // NOLINT(misc-unused-us using ::google::protobuf::Descriptor; // NOLINT(misc-unused-using-decls) using ::google::protobuf::DescriptorPool; // NOLINT(misc-unused-using-decls) using ::google::protobuf::DescriptorPoolDatabase; // NOLINT(misc-unused-using-decls) -using ::google::protobuf::DynamicCastToGenerated; // NOLINT(misc-unused-using-decls) +using ::google::protobuf::DynamicCastMessage; // NOLINT(misc-unused-using-decls) using ::google::protobuf::DynamicMessageFactory; // NOLINT(misc-unused-using-decls) using ::google::protobuf::EnumValueDescriptor; // NOLINT(misc-unused-using-decls) using ::google::protobuf::FieldDescriptor; // NOLINT(misc-unused-using-decls) @@ -93,15 +93,6 @@ using ::google::protobuf::UInt32Value; // NOLINT(misc-unused-us using Message = ::google::protobuf::MessageLite; -template const T* DynamicCastToGenerated(const Message* from) { - return static_cast(from); -} - -template T* DynamicCastToGenerated(Message* from) { - const Message* message_const = from; - return const_cast(DynamicCastToGenerated(message_const)); -} - using ReflectableMessage = std::unique_ptr<::google::protobuf::Message>; using uint32 = uint32_t; using int32 = int32_t; diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index 2bf646a6b907..3a664bde12ac 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -214,18 +214,7 @@ void ProtoExceptionUtil::throwProtoValidationException(const std::string& valida size_t MessageUtil::hash(const Protobuf::Message& message) { #if defined(ENVOY_ENABLE_FULL_PROTOS) - if (Runtime::runtimeFeatureEnabled("envoy.restart_features.use_fast_protobuf_hash")) { - return DeterministicProtoHash::hash(message); - } else { - std::string text_format; - Protobuf::TextFormat::Printer printer; - printer.SetExpandAny(true); - printer.SetUseFieldNumber(true); - printer.SetSingleLineMode(true); - printer.SetHideUnknownFields(true); - printer.PrintToString(message, &text_format); - return HashUtil::xxHash64(text_format); - } + return DeterministicProtoHash::hash(message); #else return HashUtil::xxHash64(message.SerializeAsString()); #endif diff --git a/source/common/protobuf/visitor.cc b/source/common/protobuf/visitor.cc index f03a84e7f536..d1c9b5d9f9f8 100644 --- a/source/common/protobuf/visitor.cc +++ b/source/common/protobuf/visitor.cc @@ -23,7 +23,7 @@ absl::Status traverseMessageWorker(ConstProtoVisitor& visitor, const Protobuf::M absl::string_view target_type_url; if (message.GetTypeName() == "google.protobuf.Any") { - auto* any_message = Protobuf::DynamicCastToGenerated(&message); + auto* any_message = Protobuf::DynamicCastMessage(&message); inner_message = Helper::typeUrlToMessage(any_message->type_url()); target_type_url = any_message->type_url(); // inner_message must be valid as parsing would have already failed to load if there was an diff --git a/source/common/protobuf/visitor_helper.h b/source/common/protobuf/visitor_helper.h index 138551b8f532..4c6ca0f42668 100644 --- a/source/common/protobuf/visitor_helper.h +++ b/source/common/protobuf/visitor_helper.h @@ -15,7 +15,7 @@ std::unique_ptr typeUrlToMessage(absl::string_view type_url); template absl::StatusOr, absl::string_view>> convertTypedStruct(const Protobuf::Message& message) { - auto* typed_struct = Protobuf::DynamicCastToGenerated(&message); + auto* typed_struct = Protobuf::DynamicCastMessage(&message); auto inner_message = typeUrlToMessage(typed_struct->type_url()); absl::string_view target_type_url = typed_struct->type_url(); // inner_message might be invalid as we did not previously check type_url during loading. diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index babfb11c2a0a..772f6dd62b0b 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -68,6 +68,7 @@ RUNTIME_GUARD(envoy_reloadable_features_http_route_connect_proxy_by_default); RUNTIME_GUARD(envoy_reloadable_features_internal_authority_header_validator); RUNTIME_GUARD(envoy_reloadable_features_jwt_authn_remove_jwt_from_query_params); RUNTIME_GUARD(envoy_reloadable_features_jwt_authn_validate_uri); +RUNTIME_GUARD(envoy_reloadable_features_logging_with_fast_json_formatter); RUNTIME_GUARD(envoy_reloadable_features_lua_flow_control_while_http_call); RUNTIME_GUARD(envoy_reloadable_features_mmdb_files_reload_enabled); RUNTIME_GUARD(envoy_reloadable_features_no_extension_lookup_by_name); @@ -109,12 +110,10 @@ RUNTIME_GUARD(envoy_reloadable_features_validate_grpc_header_before_log_grpc_sta RUNTIME_GUARD(envoy_reloadable_features_validate_upstream_headers); RUNTIME_GUARD(envoy_reloadable_features_xds_failover_to_primary_enabled); RUNTIME_GUARD(envoy_reloadable_features_xdstp_path_avoid_colon_encoding); -RUNTIME_GUARD(envoy_restart_features_allow_client_socket_creation_failure); RUNTIME_GUARD(envoy_restart_features_allow_slot_destroy_on_worker_threads); RUNTIME_GUARD(envoy_restart_features_fix_dispatcher_approximate_now); RUNTIME_GUARD(envoy_restart_features_quic_handle_certs_with_shared_tls_code); RUNTIME_GUARD(envoy_restart_features_use_eds_cache_for_ads); -RUNTIME_GUARD(envoy_restart_features_use_fast_protobuf_hash); // Begin false flags. Most of them should come with a TODO to flip true. @@ -162,9 +161,6 @@ FALSE_RUNTIME_GUARD(envoy_reloadable_features_explicit_internal_address_config); // compliance restrictions. FALSE_RUNTIME_GUARD(envoy_reloadable_features_google_grpc_disable_tls_13); -// A flag to enable the usage of the latest JSON formatter for logging. -// TODO(wbpcode): flip to true after Envoy v1.32.0 is released. -FALSE_RUNTIME_GUARD(envoy_reloadable_features_logging_with_fast_json_formatter); // TODO(yanavlasov): Flip to true after prod testing. // Controls whether a stream stays open when HTTP/2 or HTTP/3 upstream half closes // before downstream. diff --git a/source/extensions/clusters/redis/redis_cluster.h b/source/extensions/clusters/redis/redis_cluster.h index c23aed876c37..5fc5177412be 100644 --- a/source/extensions/clusters/redis/redis_cluster.h +++ b/source/extensions/clusters/redis/redis_cluster.h @@ -128,9 +128,6 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { friend class RedisClusterFactory; friend class RedisClusterTest; - friend class RedisClusterTest; - friend class RedisClsuterFactory; - void startPreInit() override; void updateAllHosts(const Upstream::HostVector& hosts_added, diff --git a/source/extensions/filters/http/cache/BUILD b/source/extensions/filters/http/cache/BUILD index 9107e06fc57a..c8bb391d51cc 100644 --- a/source/extensions/filters/http/cache/BUILD +++ b/source/extensions/filters/http/cache/BUILD @@ -14,8 +14,15 @@ envoy_extension_package() envoy_cc_library( name = "cache_filter_lib", - srcs = ["cache_filter.cc"], - hdrs = ["cache_filter.h"], + srcs = [ + "cache_filter.cc", + "upstream_request.cc", + ], + hdrs = [ + "cache_filter.h", + "filter_state.h", + "upstream_request.h", + ], deps = [ ":cache_custom_headers", ":cache_entry_utils_lib", diff --git a/source/extensions/filters/http/cache/cache_filter.cc b/source/extensions/filters/http/cache/cache_filter.cc index 88aed69f9f08..0ca04ed2131d 100644 --- a/source/extensions/filters/http/cache/cache_filter.cc +++ b/source/extensions/filters/http/cache/cache_filter.cc @@ -9,6 +9,7 @@ #include "source/extensions/filters/http/cache/cache_entry_utils.h" #include "source/extensions/filters/http/cache/cache_filter_logging_info.h" #include "source/extensions/filters/http/cache/cacheability_utils.h" +#include "source/extensions/filters/http/cache/upstream_request.h" #include "absl/memory/memory.h" #include "absl/strings/str_cat.h" @@ -20,10 +21,6 @@ namespace HttpFilters { namespace Cache { namespace { -inline bool isResponseNotModified(const Http::ResponseHeaderMap& response_headers) { - return Http::Utility::getResponseStatus(response_headers) == enumToInt(Http::Code::NotModified); -} - // This value is only used if there is no encoderBufferLimit on the stream; // without *some* constraint here, a very large chunk can be requested and // attempt to load into a memory buffer. @@ -45,7 +42,8 @@ CacheFilterConfig::CacheFilterConfig( const envoy::extensions::filters::http::cache::v3::CacheConfig& config, Server::Configuration::CommonFactoryContext& context) : vary_allow_list_(config.allowed_vary_headers(), context), time_source_(context.timeSource()), - ignore_request_cache_control_header_(config.ignore_request_cache_control_header()) {} + ignore_request_cache_control_header_(config.ignore_request_cache_control_header()), + cluster_manager_(context.clusterManager()) {} CacheFilter::CacheFilter(std::shared_ptr config, std::shared_ptr http_cache) @@ -56,19 +54,41 @@ void CacheFilter::onDestroy() { if (lookup_ != nullptr) { lookup_->onDestroy(); } - if (insert_queue_ != nullptr) { - // The filter can complete and be destroyed while there is still data being - // written to the cache. In this case the filter hands ownership of the - // queue to itself, which cancels all the callbacks to the filter, but allows - // the queue to complete any write operations before deleting itself. - // - // In the case that the queue is already empty, or in a state which cannot - // complete, setSelfOwned will provoke the queue to abort the write operation. - insert_queue_->setSelfOwned(std::move(insert_queue_)); - insert_queue_.reset(); + if (upstream_request_ != nullptr) { + upstream_request_->disconnectFilter(); + upstream_request_ = nullptr; } } +void CacheFilter::sendUpstreamRequest(Http::RequestHeaderMap& request_headers) { + Router::RouteConstSharedPtr route = decoder_callbacks_->route(); + const Router::RouteEntry* route_entry = (route == nullptr) ? nullptr : route->routeEntry(); + if (route_entry == nullptr) { + return sendNoRouteResponse(); + } + Upstream::ThreadLocalCluster* thread_local_cluster = + config_->clusterManager().getThreadLocalCluster(route_entry->clusterName()); + if (thread_local_cluster == nullptr) { + return sendNoClusterResponse(route_entry->clusterName()); + } + upstream_request_ = + UpstreamRequest::create(this, std::move(lookup_), std::move(lookup_result_), cache_, + thread_local_cluster->httpAsyncClient(), config_->upstreamOptions()); + upstream_request_->sendHeaders(request_headers); +} + +void CacheFilter::sendNoRouteResponse() { + decoder_callbacks_->sendLocalReply(Http::Code::NotFound, "", nullptr, absl::nullopt, + "cache_no_route"); +} + +void CacheFilter::sendNoClusterResponse(absl::string_view cluster_name) { + ENVOY_STREAM_LOG(debug, "upstream cluster '{}' was not available to cache", *decoder_callbacks_, + cluster_name); + decoder_callbacks_->sendLocalReply(Http::Code::ServiceUnavailable, "", nullptr, absl::nullopt, + "cache_no_cluster"); +} + void CacheFilter::onStreamComplete() { LookupStatus lookup_status = lookupStatus(); InsertStatus insert_status = insertStatus(); @@ -117,9 +137,16 @@ Http::FilterHeadersStatus CacheFilter::decodeHeaders(Http::RequestHeaderMap& hea return Http::FilterHeadersStatus::StopAllIterationAndWatermark; } -Http::FilterHeadersStatus CacheFilter::encodeHeaders(Http::ResponseHeaderMap& headers, - bool end_stream) { - if (filter_state_ == FilterState::DecodeServingFromCache) { +void CacheFilter::onUpstreamRequestComplete() { upstream_request_ = nullptr; } + +void CacheFilter::onUpstreamRequestReset() { + upstream_request_ = nullptr; + decoder_callbacks_->sendLocalReply(Http::Code::ServiceUnavailable, "", nullptr, absl::nullopt, + "cache_upstream_reset"); +} + +Http::FilterHeadersStatus CacheFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool) { + if (filter_state_ == FilterState::ServingFromCache) { // This call was invoked during decoding by decoder_callbacks_->encodeHeaders because a fresh // cached response was found and is being added to the encoding stream -- ignore it. return Http::FilterHeadersStatus::Continue; @@ -138,96 +165,16 @@ Http::FilterHeadersStatus CacheFilter::encodeHeaders(Http::ResponseHeaderMap& he ENVOY_BUG(Http::Utility::getResponseStatus(headers) != Envoy::enumToInt(Http::Code::RequestTimeout), "Request timed out while cache lookup was outstanding."); - filter_state_ = FilterState::NotServingFromCache; + // Cancel the lookup since it's now not useful. + lookup_->onDestroy(); + lookup_ = nullptr; return Http::FilterHeadersStatus::Continue; } - if (filter_state_ == FilterState::ValidatingCachedResponse && isResponseNotModified(headers)) { - processSuccessfulValidation(headers); - // Stop the encoding stream until the cached response is fetched & added to the encoding stream. - if (is_head_request_) { - // Return since HEAD requests are not cached - return Http::FilterHeadersStatus::Continue; - } else { - return Http::FilterHeadersStatus::StopIteration; - } - } - - // Either a cache miss or a cache entry that is no longer valid. - // Check if the new response can be cached. - if (request_allows_inserts_ && !is_head_request_ && - CacheabilityUtils::isCacheableResponse(headers, config_->varyAllowList())) { - ENVOY_STREAM_LOG(debug, "CacheFilter::encodeHeaders inserting headers", *encoder_callbacks_); - auto insert_context = cache_->makeInsertContext(std::move(lookup_), *encoder_callbacks_); - if (insert_context != nullptr) { - // The callbacks passed to CacheInsertQueue are all called through the dispatcher, - // so they're thread-safe. During CacheFilter::onDestroy the queue is given ownership - // of itself and all the callbacks are cancelled, so they are also filter-destruction-safe. - insert_queue_ = - std::make_unique(cache_, *encoder_callbacks_, std::move(insert_context), - // Cache aborted callback. - [this]() { - insert_queue_ = nullptr; - insert_status_ = InsertStatus::InsertAbortedByCache; - }); - // Add metadata associated with the cached response. Right now this is only response_time; - const ResponseMetadata metadata = {config_->timeSource().systemTime()}; - insert_queue_->insertHeaders(headers, metadata, end_stream); - } - if (end_stream) { - insert_status_ = InsertStatus::InsertSucceeded; - } - // insert_status_ remains absl::nullopt if end_stream == false, as we have not completed the - // insertion yet. - } else { - insert_status_ = InsertStatus::NoInsertResponseNotCacheable; - } - filter_state_ = FilterState::NotServingFromCache; + IS_ENVOY_BUG("encodeHeaders should not be called except under the conditions handled above"); return Http::FilterHeadersStatus::Continue; } -Http::FilterDataStatus CacheFilter::encodeData(Buffer::Instance& data, bool end_stream) { - if (filter_state_ == FilterState::DecodeServingFromCache) { - // This call was invoked during decoding by decoder_callbacks_->encodeData because a fresh - // cached response was found and is being added to the encoding stream -- ignore it. - return Http::FilterDataStatus::Continue; - } - if (filter_state_ == FilterState::EncodeServingFromCache) { - // Stop the encoding stream until the cached response is fetched & added to the encoding stream. - return Http::FilterDataStatus::StopIterationAndBuffer; - } - if (insert_queue_ != nullptr) { - ENVOY_STREAM_LOG(debug, "CacheFilter::encodeData inserting body", *encoder_callbacks_); - insert_queue_->insertBody(data, end_stream); - if (end_stream) { - // We don't actually know if the insert succeeded, but as far as the - // filter is concerned it has been fully handed off to the cache - // implementation. - insert_status_ = InsertStatus::InsertSucceeded; - } - } - return Http::FilterDataStatus::Continue; -} - -Http::FilterTrailersStatus CacheFilter::encodeTrailers(Http::ResponseTrailerMap& trailers) { - if (filter_state_ == FilterState::DecodeServingFromCache) { - // This call was invoked during decoding by decoder_callbacks_->encodeTrailers because a fresh - // cached response was found and is being added to the encoding stream -- ignore it. - return Http::FilterTrailersStatus::Continue; - } - if (filter_state_ == FilterState::EncodeServingFromCache) { - // Stop the encoding stream until the cached response is fetched & added to the encoding stream. - return Http::FilterTrailersStatus::StopIteration; - } - if (insert_queue_ != nullptr) { - ENVOY_STREAM_LOG(debug, "CacheFilter::encodeTrailers inserting trailers", *encoder_callbacks_); - insert_queue_->insertTrailers(trailers); - } - insert_status_ = InsertStatus::InsertSucceeded; - - return Http::FilterTrailersStatus::Continue; -} - /*static*/ LookupStatus CacheFilter::resolveLookupStatus(absl::optional cache_entry_status, FilterState filter_state) { @@ -246,7 +193,7 @@ CacheFilter::resolveLookupStatus(absl::optional cache_entry_st switch (filter_state) { case FilterState::ValidatingCachedResponse: return LookupStatus::RequestIncomplete; - case FilterState::EncodeServingFromCache: + case FilterState::ServingFromCache: ABSL_FALLTHROUGH_INTENDED; case FilterState::ResponseServedFromCache: // Functionally a cache hit, this is differentiated for metrics reporting. @@ -255,8 +202,6 @@ CacheFilter::resolveLookupStatus(absl::optional cache_entry_st return LookupStatus::StaleHitWithFailedValidation; case FilterState::Initial: ABSL_FALLTHROUGH_INTENDED; - case FilterState::DecodeServingFromCache: - ABSL_FALLTHROUGH_INTENDED; case FilterState::Destroyed: IS_ENVOY_BUG(absl::StrCat("Unexpected filter state in requestCacheStatus: cache lookup " "response required validation, but filter state is ", @@ -287,9 +232,7 @@ CacheFilter::resolveLookupStatus(absl::optional cache_entry_st // GCOV_EXCL_START case FilterState::ValidatingCachedResponse: ABSL_FALLTHROUGH_INTENDED; - case FilterState::DecodeServingFromCache: - ABSL_FALLTHROUGH_INTENDED; - case FilterState::EncodeServingFromCache: + case FilterState::ServingFromCache: ABSL_FALLTHROUGH_INTENDED; case FilterState::ResponseServedFromCache: ABSL_FALLTHROUGH_INTENDED; @@ -365,7 +308,8 @@ void CacheFilter::onHeaders(LookupResult&& result, Http::RequestHeaderMap& reque // TODO(yosrym93): Handle request only-if-cached directive lookup_result_ = std::make_unique(std::move(result)); - switch (lookup_result_->cache_entry_status_) { + cache_entry_status_ = lookup_result_->cache_entry_status_; + switch (cache_entry_status_.value()) { case CacheEntryStatus::FoundNotModified: PANIC("unsupported code"); case CacheEntryStatus::RequiresValidation: @@ -383,7 +327,7 @@ void CacheFilter::onHeaders(LookupResult&& result, Http::RequestHeaderMap& reque handleCacheHit(/* end_stream_after_headers = */ end_stream); return; case CacheEntryStatus::Unusable: - decoder_callbacks_->continueDecoding(); + sendUpstreamRequest(request_headers); return; case CacheEntryStatus::LookupError: filter_state_ = FilterState::NotServingFromCache; @@ -392,9 +336,9 @@ void CacheFilter::onHeaders(LookupResult&& result, Http::RequestHeaderMap& reque return; } ENVOY_LOG(error, "Unhandled CacheEntryStatus in CacheFilter::onHeaders: {}", - cacheEntryStatusString(lookup_result_->cache_entry_status_)); + cacheEntryStatusString(cache_entry_status_.value())); // Treat unhandled status as a cache miss. - decoder_callbacks_->continueDecoding(); + sendUpstreamRequest(request_headers); } // TODO(toddmgreer): Handle downstream backpressure. @@ -422,14 +366,11 @@ void CacheFilter::onBody(Buffer::InstancePtr&& body, bool end_stream) { remaining_ranges_.erase(remaining_ranges_.begin()); } else { ASSERT(false, "Received oversized body from cache."); - filter_state_ == FilterState::DecodeServingFromCache ? decoder_callbacks_->resetStream() - : encoder_callbacks_->resetStream(); + decoder_callbacks_->resetStream(); return; } - filter_state_ == FilterState::DecodeServingFromCache - ? decoder_callbacks_->encodeData(*body, end_stream) - : encoder_callbacks_->addEncodedData(*body, true); + decoder_callbacks_->encodeData(*body, end_stream); if (end_stream) { finalizeEncodingCachedResponse(); @@ -454,25 +395,16 @@ void CacheFilter::onTrailers(Http::ResponseTrailerMapPtr&& trailers) { // The filter is being destroyed, any callbacks should be ignored. return; } - if (filter_state_ == FilterState::DecodeServingFromCache) { - decoder_callbacks_->encodeTrailers(std::move(trailers)); - // Filter can potentially be destroyed during encodeTrailers. - if (filter_state_ == FilterState::Destroyed) { - return; - } - } else { - Http::ResponseTrailerMap& response_trailers = encoder_callbacks_->addEncodedTrailers(); - // Filter can potentially be destroyed during addEncodedTrailers. - if (filter_state_ == FilterState::Destroyed) { - return; - } - response_trailers = std::move(*trailers); + decoder_callbacks_->encodeTrailers(std::move(trailers)); + // Filter can potentially be destroyed during encodeTrailers. + if (filter_state_ == FilterState::Destroyed) { + return; } finalizeEncodingCachedResponse(); } void CacheFilter::handleCacheHit(bool end_stream_after_headers) { - filter_state_ = FilterState::DecodeServingFromCache; + filter_state_ = FilterState::ServingFromCache; insert_status_ = InsertStatus::NoInsertCacheHit; encodeCachedResponse(end_stream_after_headers); } @@ -484,7 +416,7 @@ void CacheFilter::handleCacheHitWithRangeRequest() { return; } if (!lookup_result_->range_details_->satisfiable_) { - filter_state_ = FilterState::DecodeServingFromCache; + filter_state_ = FilterState::ServingFromCache; insert_status_ = InsertStatus::NoInsertCacheHit; lookup_result_->headers_->setStatus( static_cast(Envoy::Http::Code::RangeNotSatisfiable)); @@ -503,7 +435,6 @@ void CacheFilter::handleCacheHitWithRangeRequest() { // is 0. lookup_result_->setContentLength(0); encodeCachedResponse(/* end_stream_after_headers = */ true); - decoder_callbacks_->continueDecoding(); return; } @@ -520,7 +451,7 @@ void CacheFilter::handleCacheHitWithRangeRequest() { return; } - filter_state_ = FilterState::DecodeServingFromCache; + filter_state_ = FilterState::ServingFromCache; insert_status_ = InsertStatus::NoInsertCacheHit; lookup_result_->headers_->setStatus(static_cast(Envoy::Http::Code::PartialContent)); @@ -535,84 +466,12 @@ void CacheFilter::handleCacheHitWithRangeRequest() { lookup_result_->setContentLength(ranges[0].length()); remaining_ranges_ = std::move(ranges); encodeCachedResponse(/* end_stream_after_headers = */ false); - decoder_callbacks_->continueDecoding(); } void CacheFilter::handleCacheHitWithValidation(Envoy::Http::RequestHeaderMap& request_headers) { filter_state_ = FilterState::ValidatingCachedResponse; injectValidationHeaders(request_headers); - decoder_callbacks_->continueDecoding(); -} - -void CacheFilter::processSuccessfulValidation(Http::ResponseHeaderMap& response_headers) { - ASSERT(lookup_result_, "CacheFilter trying to validate a non-existent lookup result"); - ASSERT( - filter_state_ == FilterState::ValidatingCachedResponse, - "processSuccessfulValidation must only be called when a cached response is being validated"); - ASSERT(isResponseNotModified(response_headers), - "processSuccessfulValidation must only be called with 304 responses"); - - // Check whether the cached entry should be updated before modifying the 304 response. - const bool should_update_cached_entry = shouldUpdateCachedEntry(response_headers); - - filter_state_ = FilterState::EncodeServingFromCache; - - // Replace the 304 response status code with the cached status code. - response_headers.setStatus(lookup_result_->headers_->getStatusValue()); - - // Remove content length header if the 304 had one; if the cache entry had a - // content length header it will be added by the header adding block below. - response_headers.removeContentLength(); - - // A response that has been validated should not contain an Age header as it is equivalent to a - // freshly served response from the origin, unless the 304 response has an Age header, which - // means it was served by an upstream cache. - // Remove any existing Age header in the cached response. - lookup_result_->headers_->removeInline(CacheCustomHeaders::age()); - - // Add any missing headers from the cached response to the 304 response. - lookup_result_->headers_->iterate([&response_headers](const Http::HeaderEntry& cached_header) { - // TODO(yosrym93): Try to avoid copying the header key twice. - Http::LowerCaseString key(cached_header.key().getStringView()); - absl::string_view value = cached_header.value().getStringView(); - if (response_headers.get(key).empty()) { - response_headers.setCopy(key, value); - } - return Http::HeaderMap::Iterate::Continue; - }); - - if (should_update_cached_entry) { - // TODO(yosrym93): else the cached entry should be deleted. - // Update metadata associated with the cached response. Right now this is only response_time; - const ResponseMetadata metadata = {config_->timeSource().systemTime()}; - cache_->updateHeaders(*lookup_, response_headers, metadata, - [](bool updated ABSL_ATTRIBUTE_UNUSED) {}); - insert_status_ = InsertStatus::HeaderUpdate; - } - - // A cache entry was successfully validated -> encode cached body and trailers. - encodeCachedResponse(/* end_stream_after_headers = */ false); -} - -// TODO(yosrym93): Write a test that exercises this when SimpleHttpCache implements updateHeaders -bool CacheFilter::shouldUpdateCachedEntry(const Http::ResponseHeaderMap& response_headers) const { - ASSERT(isResponseNotModified(response_headers), - "shouldUpdateCachedEntry must only be called with 304 responses"); - ASSERT(lookup_result_, "shouldUpdateCachedEntry precondition unsatisfied: lookup_result_ " - "does not point to a cache lookup result"); - ASSERT(filter_state_ == FilterState::ValidatingCachedResponse, - "shouldUpdateCachedEntry precondition unsatisfied: the " - "CacheFilter is not validating a cache lookup result"); - - // According to: https://httpwg.org/specs/rfc7234.html#freshening.responses, - // and assuming a single cached response per key: - // If the 304 response contains a strong validator (etag) that does not match the cached response, - // the cached response should not be updated. - const Http::HeaderEntry* response_etag = response_headers.getInline(CacheCustomHeaders::etag()); - const Http::HeaderEntry* cached_etag = - lookup_result_->headers_->getInline(CacheCustomHeaders::etag()); - return !response_etag || (cached_etag && cached_etag->value().getStringView() == - response_etag->value().getStringView()); + sendUpstreamRequest(request_headers); } void CacheFilter::injectValidationHeaders(Http::RequestHeaderMap& request_headers) { @@ -649,31 +508,20 @@ void CacheFilter::encodeCachedResponse(bool end_stream_after_headers) { "does not point to a cache lookup result"); // Set appropriate response flags and codes. - Http::StreamFilterCallbacks* callbacks = - filter_state_ == FilterState::DecodeServingFromCache - ? static_cast(decoder_callbacks_) - : static_cast(encoder_callbacks_); - - callbacks->streamInfo().setResponseFlag(StreamInfo::CoreResponseFlag::ResponseFromCacheFilter); - callbacks->streamInfo().setResponseCodeDetails( + decoder_callbacks_->streamInfo().setResponseFlag( + StreamInfo::CoreResponseFlag::ResponseFromCacheFilter); + decoder_callbacks_->streamInfo().setResponseCodeDetails( CacheResponseCodeDetails::get().ResponseFromCacheFilter); - // If the filter is encoding, 304 response headers and cached headers are merged in encodeHeaders. - // If the filter is decoding, we need to serve response headers from cache directly. - if (filter_state_ == FilterState::DecodeServingFromCache) { - decoder_callbacks_->encodeHeaders(std::move(lookup_result_->headers_), - is_head_request_ || end_stream_after_headers, - CacheResponseCodeDetails::get().ResponseFromCacheFilter); - // Filter can potentially be destroyed during encodeHeaders. - if (filter_state_ == FilterState::Destroyed) { - return; - } - } - if (filter_state_ == FilterState::EncodeServingFromCache && is_head_request_) { - filter_state_ = FilterState::ResponseServedFromCache; + decoder_callbacks_->encodeHeaders(std::move(lookup_result_->headers_), + is_head_request_ || end_stream_after_headers, + CacheResponseCodeDetails::get().ResponseFromCacheFilter); + // Filter can potentially be destroyed during encodeHeaders. + if (filter_state_ == FilterState::Destroyed) { return; } - if (end_stream_after_headers || is_head_request_) { + if (is_head_request_ || end_stream_after_headers) { + filter_state_ = FilterState::ResponseServedFromCache; return; } if (remaining_ranges_.empty() && lookup_result_->content_length_.value_or(1) > 0) { @@ -689,15 +537,6 @@ void CacheFilter::encodeCachedResponse(bool end_stream_after_headers) { } void CacheFilter::finalizeEncodingCachedResponse() { - if (filter_state_ == FilterState::EncodeServingFromCache) { - // encodeHeaders returned StopIteration waiting for finishing encoding the cached response -- - // continue encoding. - encoder_callbacks_->continueEncoding(); - // Filter can potentially be destroyed during continueEncoding. - if (filter_state_ == FilterState::Destroyed) { - return; - } - } filter_state_ = FilterState::ResponseServedFromCache; } @@ -706,17 +545,13 @@ LookupStatus CacheFilter::lookupStatus() const { return LookupStatus::RequestIncomplete; } - if (lookup_result_ != nullptr) { - return resolveLookupStatus(lookup_result_->cache_entry_status_, filter_state_); - } else { - return resolveLookupStatus(absl::nullopt, filter_state_); - } + return resolveLookupStatus(cache_entry_status_, filter_state_); } InsertStatus CacheFilter::insertStatus() const { - return insert_status_.value_or((insert_queue_ == nullptr) + return insert_status_.value_or((upstream_request_ == nullptr) ? InsertStatus::NoInsertRequestIncomplete - : InsertStatus::InsertAbortedResponseIncomplete); + : InsertStatus::FilterAbortedBeforeInsertComplete); } } // namespace Cache diff --git a/source/extensions/filters/http/cache/cache_filter.h b/source/extensions/filters/http/cache/cache_filter.h index 923a1cf1975e..3669bbf824c3 100644 --- a/source/extensions/filters/http/cache/cache_filter.h +++ b/source/extensions/filters/http/cache/cache_filter.h @@ -1,8 +1,6 @@ #pragma once -#include #include -#include #include #include "envoy/extensions/filters/http/cache/v3/cache.pb.h" @@ -10,7 +8,7 @@ #include "source/common/common/logger.h" #include "source/extensions/filters/http/cache/cache_filter_logging_info.h" #include "source/extensions/filters/http/cache/cache_headers_utils.h" -#include "source/extensions/filters/http/cache/cache_insert_queue.h" +#include "source/extensions/filters/http/cache/filter_state.h" #include "source/extensions/filters/http/cache/http_cache.h" #include "source/extensions/filters/http/common/pass_through_filter.h" @@ -19,32 +17,7 @@ namespace Extensions { namespace HttpFilters { namespace Cache { -enum class FilterState { - Initial, - - // Cache lookup found a cached response that requires validation. - ValidatingCachedResponse, - - // Cache lookup found a fresh cached response and it is being added to the encoding stream. - DecodeServingFromCache, - - // A cached response was successfully validated and it is being added to the encoding stream - EncodeServingFromCache, - - // The cached response was successfully added to the encoding stream (either during decoding or - // encoding). - ResponseServedFromCache, - - // The filter won't serve a response from the cache, whether because the request wasn't cacheable, - // there was no response in cache, the response in cache couldn't be served, or the request was - // terminated before the cached response could be written. This may be set during decoding or - // encoding. - NotServingFromCache, - - // CacheFilter::onDestroy has been called, the filter will be destroyed soon. Any triggered - // callbacks should be ignored. - Destroyed -}; +class UpstreamRequest; class CacheFilterConfig { public: @@ -54,12 +27,16 @@ class CacheFilterConfig { // The allow list rules that decide if a header can be varied upon. const VaryAllowList& varyAllowList() const { return vary_allow_list_; } TimeSource& timeSource() const { return time_source_; } + const Http::AsyncClient::StreamOptions& upstreamOptions() const { return upstream_options_; } + Upstream::ClusterManager& clusterManager() const { return cluster_manager_; } bool ignoreRequestCacheControlHeader() const { return ignore_request_cache_control_header_; } private: const VaryAllowList vary_allow_list_; TimeSource& time_source_; const bool ignore_request_cache_control_header_; + Upstream::ClusterManager& cluster_manager_; + Http::AsyncClient::StreamOptions upstream_options_; }; /** @@ -80,13 +57,31 @@ class CacheFilter : public Http::PassThroughFilter, // Http::StreamEncoderFilter Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) override; - Http::FilterDataStatus encodeData(Buffer::Instance& buffer, bool end_stream) override; - Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override; static LookupStatus resolveLookupStatus(absl::optional cache_entry_status, FilterState filter_state); private: + // For a cache miss that may be cacheable, the upstream request is sent outside of the usual + // filter chain so that the request can continue even if the downstream client disconnects. + void sendUpstreamRequest(Http::RequestHeaderMap& request_headers); + + // In the event that there is no matching route when attempting to sendUpstreamRequest, + // send a 404 locally. + void sendNoRouteResponse(); + + // In the event that there is no available cluster when attempting to sendUpstreamRequest, + // send a 503 locally. + void sendNoClusterResponse(absl::string_view cluster_name); + + // Called by UpstreamRequest if it is reset before CacheFilter is destroyed. + // CacheFilter must make no more calls to upstream_request_ once this has been called. + void onUpstreamRequestReset(); + + // Called by UpstreamRequest if it finishes without reset before CacheFilter is destroyed. + // CacheFilter must make no more calls to upstream_request_ once this has been called. + void onUpstreamRequestComplete(); + // Utility functions; make any necessary checks and call the corresponding lookup_ functions void getHeaders(Http::RequestHeaderMap& request_headers); void getBody(); @@ -94,7 +89,7 @@ class CacheFilter : public Http::PassThroughFilter, // Callbacks for HttpCache to call when headers/body/trailers are ready. void onHeaders(LookupResult&& result, Http::RequestHeaderMap& request_headers, bool end_stream); - void onBody(Buffer::InstancePtr&& bod, bool end_stream); + void onBody(Buffer::InstancePtr&& body, bool end_stream); void onTrailers(Http::ResponseTrailerMapPtr&& trailers); // Set required state in the CacheFilter for handling a cache hit. @@ -108,16 +103,6 @@ class CacheFilter : public Http::PassThroughFilter, // validation is required. void handleCacheHitWithValidation(Envoy::Http::RequestHeaderMap& request_headers); - // Precondition: lookup_result_ points to a cache lookup result that requires validation. - // filter_state_ is ValidatingCachedResponse. - // Serves a validated cached response after updating it with a 304 response. - void processSuccessfulValidation(Http::ResponseHeaderMap& response_headers); - - // Precondition: lookup_result_ points to a cache lookup result that requires validation. - // filter_state_ is ValidatingCachedResponse. - // Checks if a cached entry should be updated with a 304 response. - bool shouldUpdateCachedEntry(const Http::ResponseHeaderMap& response_headers) const; - // Precondition: lookup_result_ points to a cache lookup result that requires validation. // Should only be called during onHeaders as it modifies RequestHeaderMap. // Adds required conditional headers for cache validation to the request headers @@ -128,6 +113,9 @@ class CacheFilter : public Http::PassThroughFilter, // Adds a cache lookup result to the response encoding stream. // Can be called during decoding if a valid cache hit is found, // or during encoding if a cache entry was validated successfully. + // + // When validating, headers should be set to the merged values from the validation + // response and the lookup_result_; if unset, the headers from the lookup_result_ are used. void encodeCachedResponse(bool end_stream_after_headers); // Precondition: finished adding a response from cache to the response encoding stream. @@ -142,13 +130,18 @@ class CacheFilter : public Http::PassThroughFilter, // being cancelled. InsertStatus insertStatus() const; - // insert_queue_ ownership may be passed to the queue itself during - // CacheFilter::onDestroy, allowing the insert queue to outlive the filter - // while the necessary cache write operations complete. - std::unique_ptr insert_queue_; + // upstream_request_ belongs to the object itself, so that it can be disconnected + // from the filter and still complete the cache-write in the event that the + // downstream disconnects. The filter and the UpstreamRequest must communicate to + // each other their separate destruction-triggers. + // When CacheFilter is destroyed first it should call + // upstream_request_->disconnectFilter() + // and if upstream_request_ is destroyed first, it will call onUpstreamRequestReset. + UpstreamRequest* upstream_request_ = nullptr; std::shared_ptr cache_; LookupContextPtr lookup_; LookupResultPtr lookup_result_; + absl::optional cache_entry_status_; // Tracks what body bytes still need to be read from the cache. This is // currently only one Range, but will expand when full range support is added. Initialized by @@ -169,6 +162,8 @@ class CacheFilter : public Http::PassThroughFilter, // The status of the insert operation or header update, or decision not to insert or update. // If it's too early to determine the final status, this is empty. absl::optional insert_status_; + + friend class UpstreamRequest; }; using CacheFilterSharedPtr = std::shared_ptr; diff --git a/source/extensions/filters/http/cache/cache_filter_logging_info.cc b/source/extensions/filters/http/cache/cache_filter_logging_info.cc index b529358181ef..0c352a2000b3 100644 --- a/source/extensions/filters/http/cache/cache_filter_logging_info.cc +++ b/source/extensions/filters/http/cache/cache_filter_logging_info.cc @@ -44,8 +44,8 @@ absl::string_view insertStatusToString(InsertStatus status) { return "InsertAbortedByCache"; case InsertStatus::InsertAbortedCacheCongested: return "InsertAbortedCacheCongested"; - case InsertStatus::InsertAbortedResponseIncomplete: - return "InsertAbortedResponseIncomplete"; + case InsertStatus::FilterAbortedBeforeInsertComplete: + return "FilterAbortedBeforeInsertComplete"; case InsertStatus::HeaderUpdate: return "HeaderUpdate"; case InsertStatus::NoInsertCacheHit: diff --git a/source/extensions/filters/http/cache/cache_filter_logging_info.h b/source/extensions/filters/http/cache/cache_filter_logging_info.h index e12d0dfdbfcf..5825ae3c7503 100644 --- a/source/extensions/filters/http/cache/cache_filter_logging_info.h +++ b/source/extensions/filters/http/cache/cache_filter_logging_info.h @@ -54,11 +54,9 @@ enum class InsertStatus { // The CacheFilter started an insert, but aborted it because the cache wasn't // ready as a body chunk came in. InsertAbortedCacheCongested, - // The CacheFilter started an insert, but couldn't finish it because the - // stream was closed before the response finished. Until the CacheFilter - // supports caching response trailers, this will also be reported if it tries - // to cache a response with trailers. - InsertAbortedResponseIncomplete, + // The CacheFilter started an insert, but the filter was reset before the insert + // completed. The insert may or may not have gone on to completion independently. + FilterAbortedBeforeInsertComplete, // The CacheFilter attempted to update the headers of an existing cache entry. // This doesn't indicate whether or not the update succeeded. HeaderUpdate, diff --git a/source/extensions/filters/http/cache/cache_insert_queue.cc b/source/extensions/filters/http/cache/cache_insert_queue.cc index 80de4b61280c..47ed1b1ab92e 100644 --- a/source/extensions/filters/http/cache/cache_insert_queue.cc +++ b/source/extensions/filters/http/cache/cache_insert_queue.cc @@ -69,22 +69,33 @@ class CacheInsertFragmentTrailers : public CacheInsertFragment { CacheInsertQueue::CacheInsertQueue(std::shared_ptr cache, Http::StreamEncoderFilterCallbacks& encoder_callbacks, - InsertContextPtr insert_context, AbortInsertCallback abort) + InsertContextPtr insert_context, InsertQueueCallbacks& callbacks) : dispatcher_(encoder_callbacks.dispatcher()), insert_context_(std::move(insert_context)), low_watermark_bytes_(encoder_callbacks.encoderBufferLimit() / 2), - high_watermark_bytes_(encoder_callbacks.encoderBufferLimit()), - encoder_callbacks_(encoder_callbacks), abort_callback_(std::move(abort)), cache_(cache) {} + high_watermark_bytes_(encoder_callbacks.encoderBufferLimit()), callbacks_(callbacks), + cache_(cache) {} void CacheInsertQueue::insertHeaders(const Http::ResponseHeaderMap& response_headers, const ResponseMetadata& metadata, bool end_stream) { end_stream_queued_ = end_stream; // While zero isn't technically true for the size of headers, headers are // typically excluded from the stream buffer limit. + fragment_in_flight_ = true; insert_context_->insertHeaders( response_headers, metadata, [this, end_stream](bool cache_success) { onFragmentComplete(cache_success, end_stream, 0); }, end_stream); - fragment_in_flight_ = true; + // This requirement simplifies the cache implementation; most caches will have to + // do asynchronous operations, and so will post anyway. It is an error to call continueDecoding + // during decodeHeaders, and calling a callback inline *may* do that, therefore we + // require the cache to post. A previous version performed a post here to guarantee + // correct behavior, but that meant for async caches it would double-post - it makes + // more sense to single-post when it may not be necessary (in the rarer case of a cache + // not needing async action) than to double-post in the common async case. + // This requirement may become unnecessary after some more iterations result in + // continueDecoding no longer being a thing in this filter. + ASSERT(fragment_in_flight_, + "insertHeaders must post the callback to dispatcher, not just call it"); } void CacheInsertQueue::insertBody(const Buffer::Instance& fragment, bool end_stream) { @@ -96,19 +107,21 @@ void CacheInsertQueue::insertBody(const Buffer::Instance& fragment, bool end_str queue_size_bytes_ += sz; fragments_.push_back(std::make_unique(fragment, end_stream)); if (!watermarked_ && queue_size_bytes_ > high_watermark_bytes_) { - if (encoder_callbacks_.has_value()) { - encoder_callbacks_.value().get().onEncoderFilterAboveWriteBufferHighWatermark(); + if (callbacks_.has_value()) { + callbacks_->insertQueueOverHighWatermark(); } watermarked_ = true; } } else { + fragment_in_flight_ = true; insert_context_->insertBody( Buffer::OwnedImpl(fragment), [this, end_stream](bool cache_success) { onFragmentComplete(cache_success, end_stream, 0); }, end_stream); - fragment_in_flight_ = true; + ASSERT(fragment_in_flight_, + "insertBody must post the callback to dispatcher, not just call it"); } } @@ -117,9 +130,11 @@ void CacheInsertQueue::insertTrailers(const Http::ResponseTrailerMap& trailers) if (fragment_in_flight_) { fragments_.push_back(std::make_unique(trailers)); } else { + fragment_in_flight_ = true; insert_context_->insertTrailers( trailers, [this](bool cache_success) { onFragmentComplete(cache_success, true, 0); }); - fragment_in_flight_ = true; + ASSERT(fragment_in_flight_, + "insertTrailers must post the callback to dispatcher, not just call it"); } } @@ -135,8 +150,8 @@ void CacheInsertQueue::onFragmentComplete(bool cache_success, bool end_stream, s ASSERT(queue_size_bytes_ >= sz, "queue can't be emptied by more than its size"); queue_size_bytes_ -= sz; if (watermarked_ && queue_size_bytes_ <= low_watermark_bytes_) { - if (encoder_callbacks_.has_value()) { - encoder_callbacks_.value().get().onEncoderFilterBelowWriteBufferLowWatermark(); + if (callbacks_.has_value()) { + callbacks_->insertQueueUnderLowWatermark(); } watermarked_ = false; } @@ -144,17 +159,28 @@ void CacheInsertQueue::onFragmentComplete(bool cache_success, bool end_stream, s // canceled by cache; unwatermark if necessary, inform the filter if // it's still around, and delete the queue. if (watermarked_) { - if (encoder_callbacks_.has_value()) { - encoder_callbacks_.value().get().onEncoderFilterBelowWriteBufferLowWatermark(); + if (callbacks_.has_value()) { + callbacks_->insertQueueUnderLowWatermark(); } watermarked_ = false; } fragments_.clear(); // Clearing self-ownership might provoke the destructor, so take a copy of the // abort callback to avoid reading from 'this' after it may be deleted. - auto abort_callback = std::move(abort_callback_); + // + // This complexity is necessary because if the queue *is not* currently + // self-owned, it will be deleted during insertQueueAborted, so + // clearing self_ownership_ second would be a write-after-destroy error. + // If it *is* currently self-owned, then we must still call the callback if + // any, but clearing self_ownership_ *first* would mean we got destroyed + // so we would no longer have access to the callback. + // Since destroying first *or* second can be an error, rearrange things + // so that destroying first *is not* an error. :) + auto callbacks = std::move(callbacks_); self_ownership_.reset(); - std::move(abort_callback)(); + if (callbacks.has_value()) { + callbacks->insertQueueAborted(); + } return; } if (end_stream) { @@ -178,12 +204,13 @@ void CacheInsertQueue::setSelfOwned(std::unique_ptr self) { // If we sent a high watermark event, this is our last chance to unset it on the // stream, so we'd better do so. if (watermarked_) { - encoder_callbacks_->onEncoderFilterBelowWriteBufferLowWatermark(); + if (callbacks_.has_value()) { + callbacks_->insertQueueUnderLowWatermark(); + } watermarked_ = false; } // Disable all the callbacks, they're going to have nowhere to go. - abort_callback_ = []() {}; - encoder_callbacks_.reset(); + callbacks_.reset(); if (fragments_.empty() && !fragment_in_flight_) { // If the queue is already empty we can just let it be destroyed immediately. return; diff --git a/source/extensions/filters/http/cache/cache_insert_queue.h b/source/extensions/filters/http/cache/cache_insert_queue.h index 22297a70a528..52537ef82f00 100644 --- a/source/extensions/filters/http/cache/cache_insert_queue.h +++ b/source/extensions/filters/http/cache/cache_insert_queue.h @@ -10,9 +10,13 @@ namespace Extensions { namespace HttpFilters { namespace Cache { -using OverHighWatermarkCallback = std::function; -using UnderLowWatermarkCallback = std::function; -using AbortInsertCallback = absl::AnyInvocable; +class InsertQueueCallbacks { +public: + virtual void insertQueueOverHighWatermark() PURE; + virtual void insertQueueUnderLowWatermark() PURE; + virtual void insertQueueAborted() PURE; + virtual ~InsertQueueCallbacks() = default; +}; class CacheInsertFragment; // This queue acts as an intermediary between CacheFilter and the cache @@ -36,7 +40,7 @@ class CacheInsertQueue { public: CacheInsertQueue(std::shared_ptr cache, Http::StreamEncoderFilterCallbacks& encoder_callbacks, - InsertContextPtr insert_context, AbortInsertCallback abort); + InsertContextPtr insert_context, InsertQueueCallbacks& callbacks); void insertHeaders(const Http::ResponseHeaderMap& response_headers, const ResponseMetadata& metadata, bool end_stream); void insertBody(const Buffer::Instance& fragment, bool end_stream); @@ -50,8 +54,7 @@ class CacheInsertQueue { Event::Dispatcher& dispatcher_; const InsertContextPtr insert_context_; const size_t low_watermark_bytes_, high_watermark_bytes_; - OptRef encoder_callbacks_; - AbortInsertCallback abort_callback_; + OptRef callbacks_; std::deque> fragments_; // Size of the data currently in the queue (including any fragment in flight). size_t queue_size_bytes_ = 0; diff --git a/source/extensions/filters/http/cache/filter_state.h b/source/extensions/filters/http/cache/filter_state.h new file mode 100644 index 000000000000..a161aecde53d --- /dev/null +++ b/source/extensions/filters/http/cache/filter_state.h @@ -0,0 +1,36 @@ +#pragma once + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { + +enum class FilterState { + Initial, + + // Cache lookup found a cached response that requires validation. + ValidatingCachedResponse, + + // Cache lookup found a fresh or validated cached response and it is being added to the encoding + // stream. + ServingFromCache, + + // The cached response was successfully added to the encoding stream (either during decoding or + // encoding). + ResponseServedFromCache, + + // The filter won't serve a response from the cache, whether because the request wasn't cacheable, + // there was no response in cache, the response in cache couldn't be served, or the request was + // terminated before the cached response could be written. This may be set during decoding or + // encoding. + NotServingFromCache, + + // CacheFilter::onDestroy has been called, the filter will be destroyed soon. Any triggered + // callbacks should be ignored. + Destroyed +}; + +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cache/http_cache.cc b/source/extensions/filters/http/cache/http_cache.cc index 6796b9778590..1b1862bebd3b 100644 --- a/source/extensions/filters/http/cache/http_cache.cc +++ b/source/extensions/filters/http/cache/http_cache.cc @@ -57,13 +57,7 @@ LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, Syst // Unless this API is still alpha, calls to stableHashKey() must always return // the same result, or a way must be provided to deal with a complete cache // flush. -size_t stableHashKey(const Key& key) { - if (Runtime::runtimeFeatureEnabled("envoy.restart_features.use_fast_protobuf_hash")) { - return DeterministicProtoHash::hash(key); - } else { - return MessageUtil::hash(key); - } -} +size_t stableHashKey(const Key& key) { return DeterministicProtoHash::hash(key); } void LookupRequest::initializeRequestCacheControl(const Http::RequestHeaderMap& request_headers) { const absl::string_view cache_control = diff --git a/source/extensions/filters/http/cache/http_cache.h b/source/extensions/filters/http/cache/http_cache.h index e01fbf7d162a..65a6c0673b51 100644 --- a/source/extensions/filters/http/cache/http_cache.h +++ b/source/extensions/filters/http/cache/http_cache.h @@ -277,13 +277,13 @@ class HttpCache { // read access to a cache entry before its write is complete. In this case the // content-length value may be unset. virtual LookupContextPtr makeLookupContext(LookupRequest&& request, - Http::StreamDecoderFilterCallbacks& callbacks) PURE; + Http::StreamFilterCallbacks& callbacks) PURE; // Returns an InsertContextPtr to manage the state of a cache insertion. // Responses with a chunked transfer-encoding must be dechunked before // insertion. virtual InsertContextPtr makeInsertContext(LookupContextPtr&& lookup_context, - Http::StreamEncoderFilterCallbacks& callbacks) PURE; + Http::StreamFilterCallbacks& callbacks) PURE; // Precondition: lookup_context represents a prior cache lookup that required // validation. diff --git a/source/extensions/filters/http/cache/upstream_request.cc b/source/extensions/filters/http/cache/upstream_request.cc new file mode 100644 index 000000000000..550b81c59a7f --- /dev/null +++ b/source/extensions/filters/http/cache/upstream_request.cc @@ -0,0 +1,272 @@ +#include "source/extensions/filters/http/cache/upstream_request.h" + +#include "source/common/common/enum_to_int.h" +#include "source/common/http/utility.h" +#include "source/extensions/filters/http/cache/cache_custom_headers.h" +#include "source/extensions/filters/http/cache/cache_filter.h" +#include "source/extensions/filters/http/cache/cacheability_utils.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { + +namespace { +inline bool isResponseNotModified(const Http::ResponseHeaderMap& response_headers) { + return Http::Utility::getResponseStatus(response_headers) == enumToInt(Http::Code::NotModified); +} +} // namespace + +void UpstreamRequest::setFilterState(FilterState fs) { + filter_state_ = fs; + if (filter_ != nullptr && filter_->filter_state_ != FilterState::Destroyed) { + filter_->filter_state_ = fs; + } +} + +void UpstreamRequest::setInsertStatus(InsertStatus is) { + if (filter_ != nullptr && filter_->filter_state_ != FilterState::Destroyed) { + filter_->insert_status_ = is; + } +} + +void UpstreamRequest::processSuccessfulValidation(Http::ResponseHeaderMapPtr response_headers) { + ASSERT(lookup_result_, "CacheFilter trying to validate a non-existent lookup result"); + ASSERT( + filter_state_ == FilterState::ValidatingCachedResponse, + "processSuccessfulValidation must only be called when a cached response is being validated"); + ASSERT(isResponseNotModified(*response_headers), + "processSuccessfulValidation must only be called with 304 responses"); + + // Check whether the cached entry should be updated before modifying the 304 response. + const bool should_update_cached_entry = shouldUpdateCachedEntry(*response_headers); + + setFilterState(FilterState::ServingFromCache); + + // Replace the 304 response status code with the cached status code. + response_headers->setStatus(lookup_result_->headers_->getStatusValue()); + + // Remove content length header if the 304 had one; if the cache entry had a + // content length header it will be added by the header adding block below. + response_headers->removeContentLength(); + + // A response that has been validated should not contain an Age header as it is equivalent to a + // freshly served response from the origin, unless the 304 response has an Age header, which + // means it was served by an upstream cache. + // Remove any existing Age header in the cached response. + lookup_result_->headers_->removeInline(CacheCustomHeaders::age()); + + // Add any missing headers from the cached response to the 304 response. + lookup_result_->headers_->iterate([&response_headers](const Http::HeaderEntry& cached_header) { + // TODO(yosrym93): Try to avoid copying the header key twice. + Http::LowerCaseString key(cached_header.key().getStringView()); + absl::string_view value = cached_header.value().getStringView(); + if (response_headers->get(key).empty()) { + response_headers->setCopy(key, value); + } + return Http::HeaderMap::Iterate::Continue; + }); + + if (should_update_cached_entry) { + // TODO(yosrym93): else the cached entry should be deleted. + // Update metadata associated with the cached response. Right now this is only response_time; + const ResponseMetadata metadata = {config_->timeSource().systemTime()}; + cache_->updateHeaders(*lookup_, *response_headers, metadata, + [](bool updated ABSL_ATTRIBUTE_UNUSED) {}); + setInsertStatus(InsertStatus::HeaderUpdate); + } + + // A cache entry was successfully validated, so abort the upstream request, send + // encode the merged-modified headers, and encode cached body and trailers. + if (filter_ != nullptr) { + lookup_result_->headers_ = std::move(response_headers); + filter_->lookup_result_ = std::move(lookup_result_); + filter_->lookup_ = std::move(lookup_); + filter_->upstream_request_ = nullptr; + lookup_result_ = nullptr; + filter_->encodeCachedResponse(/* end_stream_after_headers = */ false); + filter_ = nullptr; + abort(); + } +} + +// TODO(yosrym93): Write a test that exercises this when SimpleHttpCache implements updateHeaders +bool UpstreamRequest::shouldUpdateCachedEntry( + const Http::ResponseHeaderMap& response_headers) const { + ASSERT(isResponseNotModified(response_headers), + "shouldUpdateCachedEntry must only be called with 304 responses"); + ASSERT(lookup_result_, "shouldUpdateCachedEntry precondition unsatisfied: lookup_result_ " + "does not point to a cache lookup result"); + ASSERT(filter_state_ == FilterState::ValidatingCachedResponse, + "shouldUpdateCachedEntry precondition unsatisfied: the " + "CacheFilter is not validating a cache lookup result"); + + // According to: https://httpwg.org/specs/rfc7234.html#freshening.responses, + // and assuming a single cached response per key: + // If the 304 response contains a strong validator (etag) that does not match the cached response, + // the cached response should not be updated. + const Http::HeaderEntry* response_etag = response_headers.getInline(CacheCustomHeaders::etag()); + const Http::HeaderEntry* cached_etag = + lookup_result_->headers_->getInline(CacheCustomHeaders::etag()); + return !response_etag || (cached_etag && cached_etag->value().getStringView() == + response_etag->value().getStringView()); +} + +UpstreamRequest* UpstreamRequest::create(CacheFilter* filter, LookupContextPtr lookup, + LookupResultPtr lookup_result, + std::shared_ptr cache, + Http::AsyncClient& async_client, + const Http::AsyncClient::StreamOptions& options) { + return new UpstreamRequest(filter, std::move(lookup), std::move(lookup_result), std::move(cache), + async_client, options); +} + +UpstreamRequest::UpstreamRequest(CacheFilter* filter, LookupContextPtr lookup, + LookupResultPtr lookup_result, std::shared_ptr cache, + Http::AsyncClient& async_client, + const Http::AsyncClient::StreamOptions& options) + : filter_(filter), lookup_(std::move(lookup)), lookup_result_(std::move(lookup_result)), + is_head_request_(filter->is_head_request_), + request_allows_inserts_(filter->request_allows_inserts_), config_(filter->config_), + filter_state_(filter->filter_state_), cache_(std::move(cache)), + stream_(async_client.start(*this, options)) { + ASSERT(stream_ != nullptr); +} + +void UpstreamRequest::insertQueueOverHighWatermark() { + // TODO(ravenblack): currently AsyncRequest::Stream does not support pausing. +} + +void UpstreamRequest::insertQueueUnderLowWatermark() { + // TODO(ravenblack): currently AsyncRequest::Stream does not support pausing. +} + +void UpstreamRequest::insertQueueAborted() { + insert_queue_ = nullptr; + ENVOY_LOG(debug, "cache aborted insert operation"); + setInsertStatus(InsertStatus::InsertAbortedByCache); + if (filter_ == nullptr) { + abort(); + } +} + +void UpstreamRequest::sendHeaders(Http::RequestHeaderMap& request_headers) { + // If this request had a body or trailers, CacheFilter::decodeHeaders + // would have bypassed cache lookup and insertion, so this class wouldn't + // be instantiated. So end_stream will always be true. + stream_->sendHeaders(request_headers, true); +} + +void UpstreamRequest::abort() { + stream_->reset(); // Calls onReset, resulting in deletion. +} + +UpstreamRequest::~UpstreamRequest() { + if (filter_ != nullptr) { + filter_->onUpstreamRequestReset(); + } + if (lookup_) { + lookup_->onDestroy(); + lookup_ = nullptr; + } + if (insert_queue_) { + // The insert queue may still have actions in flight, so it needs to be allowed + // to drain itself before destruction. + insert_queue_->setSelfOwned(std::move(insert_queue_)); + } +} + +void UpstreamRequest::onReset() { delete this; } +void UpstreamRequest::onComplete() { + if (filter_) { + ENVOY_STREAM_LOG(debug, "UpstreamRequest complete", *filter_->decoder_callbacks_); + filter_->onUpstreamRequestComplete(); + filter_ = nullptr; + } else { + ENVOY_LOG(debug, "UpstreamRequest complete after stream finished"); + } + delete this; +} +void UpstreamRequest::disconnectFilter() { + filter_ = nullptr; + if (insert_queue_ == nullptr) { + abort(); + } +} + +void UpstreamRequest::onHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) { + if (filter_state_ == FilterState::ValidatingCachedResponse && isResponseNotModified(*headers)) { + return processSuccessfulValidation(std::move(headers)); + } + // Either a cache miss or a cache entry that is no longer valid. + // Check if the new response can be cached. + if (request_allows_inserts_ && !is_head_request_ && + CacheabilityUtils::isCacheableResponse(*headers, config_->varyAllowList())) { + if (filter_) { + ENVOY_STREAM_LOG(debug, "UpstreamRequest::onHeaders inserting headers", + *filter_->decoder_callbacks_); + } + auto insert_context = + cache_->makeInsertContext(std::move(lookup_), *filter_->encoder_callbacks_); + lookup_ = nullptr; + if (insert_context != nullptr) { + // The callbacks passed to CacheInsertQueue are all called through the dispatcher, + // so they're thread-safe. During CacheFilter::onDestroy the queue is given ownership + // of itself and all the callbacks are cancelled, so they are also filter-destruction-safe. + insert_queue_ = std::make_unique(cache_, *filter_->encoder_callbacks_, + std::move(insert_context), *this); + // Add metadata associated with the cached response. Right now this is only response_time; + const ResponseMetadata metadata = {config_->timeSource().systemTime()}; + insert_queue_->insertHeaders(*headers, metadata, end_stream); + // insert_status_ remains absl::nullopt if end_stream == false, as we have not completed the + // insertion yet. + if (end_stream) { + setInsertStatus(InsertStatus::InsertSucceeded); + } + } + } else { + setInsertStatus(InsertStatus::NoInsertResponseNotCacheable); + } + setFilterState(FilterState::NotServingFromCache); + if (filter_) { + filter_->decoder_callbacks_->encodeHeaders(std::move(headers), is_head_request_ || end_stream, + StreamInfo::ResponseCodeDetails::get().ViaUpstream); + } +} + +void UpstreamRequest::onData(Buffer::Instance& body, bool end_stream) { + if (insert_queue_ != nullptr) { + insert_queue_->insertBody(body, end_stream); + } + if (filter_) { + ENVOY_STREAM_LOG(debug, "UpstreamRequest::onData inserted body", *filter_->decoder_callbacks_); + filter_->decoder_callbacks_->encodeData(body, end_stream); + if (end_stream) { + // We don't actually know at this point if the insert succeeded, but as far as the + // filter is concerned it has been fully handed off to the cache + // implementation. + setInsertStatus(InsertStatus::InsertSucceeded); + } + } else { + ENVOY_LOG(debug, "UpstreamRequest::onData inserted body"); + } +} + +void UpstreamRequest::onTrailers(Http::ResponseTrailerMapPtr&& trailers) { + if (insert_queue_ != nullptr) { + insert_queue_->insertTrailers(*trailers); + } + if (filter_ != nullptr) { + ENVOY_STREAM_LOG(debug, "UpstreamRequest::onTrailers inserting trailers", + *filter_->decoder_callbacks_); + filter_->decoder_callbacks_->encodeTrailers(std::move(trailers)); + setInsertStatus(InsertStatus::InsertSucceeded); + } else { + ENVOY_LOG(debug, "UpstreamRequest::onTrailers inserting trailers"); + } +} + +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/cache/upstream_request.h b/source/extensions/filters/http/cache/upstream_request.h new file mode 100644 index 000000000000..6aa6259ca26c --- /dev/null +++ b/source/extensions/filters/http/cache/upstream_request.h @@ -0,0 +1,84 @@ +#pragma once + +#include "source/common/common/logger.h" +#include "source/extensions/filters/http/cache/cache_filter_logging_info.h" +#include "source/extensions/filters/http/cache/cache_insert_queue.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace Cache { + +class CacheFilter; +class CacheFilterConfig; +enum class FilterState; + +class UpstreamRequest : public Logger::Loggable, + public Http::AsyncClient::StreamCallbacks, + public InsertQueueCallbacks { +public: + void sendHeaders(Http::RequestHeaderMap& request_headers); + // Called by filter_ when filter_ is destroyed first. + // UpstreamRequest will make no more calls to filter_ once disconnectFilter + // has been called. + void disconnectFilter(); + + // StreamCallbacks + void onHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) override; + void onData(Buffer::Instance& data, bool end_stream) override; + void onTrailers(Http::ResponseTrailerMapPtr&& trailers) override; + void onComplete() override; + void onReset() override; + + // InsertQueueCallbacks + void insertQueueOverHighWatermark() override; + void insertQueueUnderLowWatermark() override; + void insertQueueAborted() override; + + static UpstreamRequest* create(CacheFilter* filter, LookupContextPtr lookup, + LookupResultPtr lookup_result, std::shared_ptr cache, + Http::AsyncClient& async_client, + const Http::AsyncClient::StreamOptions& options); + UpstreamRequest(CacheFilter* filter, LookupContextPtr lookup, LookupResultPtr lookup_result, + std::shared_ptr cache, Http::AsyncClient& async_client, + const Http::AsyncClient::StreamOptions& options); + ~UpstreamRequest() override; + +private: + // Precondition: lookup_result_ points to a cache lookup result that requires validation. + // filter_state_ is ValidatingCachedResponse. + // Serves a validated cached response after updating it with a 304 response. + void processSuccessfulValidation(Http::ResponseHeaderMapPtr response_headers); + + // Updates the filter state belonging to the UpstreamRequest, and the one belonging to + // the filter if it has not been destroyed. + void setFilterState(FilterState fs); + + // Updates the insert status belonging to the filter, if it has not been destroyed. + void setInsertStatus(InsertStatus is); + + // If an error occurs while the stream is active, abort will reset the stream, which + // in turn provokes the rest of the destruction process. + void abort(); + + // Precondition: lookup_result_ points to a cache lookup result that requires validation. + // filter_state_ is ValidatingCachedResponse. + // Checks if a cached entry should be updated with a 304 response. + bool shouldUpdateCachedEntry(const Http::ResponseHeaderMap& response_headers) const; + + CacheFilter* filter_ = nullptr; + LookupContextPtr lookup_; + LookupResultPtr lookup_result_; + bool is_head_request_; + bool request_allows_inserts_; + std::shared_ptr config_; + FilterState filter_state_; + std::shared_ptr cache_; + Http::AsyncClient::Stream* stream_ = nullptr; + std::unique_ptr insert_queue_; +}; + +} // namespace Cache +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/listener/http_inspector/BUILD b/source/extensions/filters/listener/http_inspector/BUILD index fc5dec01f9d4..036867c01678 100644 --- a/source/extensions/filters/listener/http_inspector/BUILD +++ b/source/extensions/filters/listener/http_inspector/BUILD @@ -16,7 +16,6 @@ envoy_cc_library( srcs = ["http_inspector.cc"], hdrs = ["http_inspector.h"], deps = [ - "//bazel/external/http_parser", "//envoy/event:dispatcher_interface", "//envoy/event:timer_interface", "//envoy/network:filter_interface", @@ -25,6 +24,8 @@ envoy_cc_library( "//source/common/common:minimal_logger_lib", "//source/common/http:headers_lib", "//source/common/http:utility_lib", + "//source/common/http/http1:legacy_parser_lib", + "//source/common/http/http1:parser_interface", ], ) diff --git a/source/extensions/filters/listener/http_inspector/http_inspector.cc b/source/extensions/filters/listener/http_inspector/http_inspector.cc index 2d25b0c43607..a3935537ddbc 100644 --- a/source/extensions/filters/listener/http_inspector/http_inspector.cc +++ b/source/extensions/filters/listener/http_inspector/http_inspector.cc @@ -23,14 +23,12 @@ Config::Config(Stats::Scope& scope) const absl::string_view Filter::HTTP2_CONNECTION_PREFACE = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; -Filter::Filter(const ConfigSharedPtr config) : config_(config) { - http_parser_init(&parser_, HTTP_REQUEST); +Filter::Filter(const ConfigSharedPtr config) : config_(config), no_op_callbacks_() { + // Filter for only Request Message types with NoOp Parser callbacks. + parser_ = std::make_unique(Http::Http1::MessageType::Request, + &no_op_callbacks_); } -http_parser_settings Filter::settings_{ - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, -}; - Network::FilterStatus Filter::onData(Network::ListenerFilterBuffer& buffer) { auto raw_slice = buffer.rawSlice(); const char* buf = static_cast(raw_slice.mem_); @@ -81,35 +79,40 @@ ParseState Filter::parseHttpHeader(absl::string_view data) { return ParseState::Error; } - absl::string_view new_data = data.substr(parser_.nread); + absl::string_view new_data = data.substr(nread_); const size_t pos = new_data.find_first_of("\r\n"); if (pos != absl::string_view::npos) { // Include \r or \n new_data = new_data.substr(0, pos + 1); - ssize_t rc = http_parser_execute(&parser_, &settings_, new_data.data(), new_data.length()); + ssize_t rc = parser_->execute(new_data.data(), new_data.length()); + nread_ += rc; ENVOY_LOG(trace, "http inspector: http_parser parsed {} chars, error code: {}", rc, - static_cast(HTTP_PARSER_ERRNO(&parser_))); + parser_->errorMessage()); // Errors in parsing HTTP. - if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) { + if (parser_->getStatus() != Http::Http1::ParserStatus::Ok && + parser_->getStatus() != Http::Http1::ParserStatus::Paused) { return ParseState::Error; } - if (parser_.http_major == 1 && parser_.http_minor == 1) { + if (parser_->isHttp11()) { protocol_ = Http::Headers::get().ProtocolStrings.Http11String; } else { // Set other HTTP protocols to HTTP/1.0 protocol_ = Http::Headers::get().ProtocolStrings.Http10String; } + return ParseState::Done; } else { - ssize_t rc = http_parser_execute(&parser_, &settings_, new_data.data(), new_data.length()); + ssize_t rc = parser_->execute(new_data.data(), new_data.length()); + nread_ += rc; ENVOY_LOG(trace, "http inspector: http_parser parsed {} chars, error code: {}", rc, - static_cast(HTTP_PARSER_ERRNO(&parser_))); + parser_->errorMessage()); // Errors in parsing HTTP. - if (HTTP_PARSER_ERRNO(&parser_) != HPE_OK && HTTP_PARSER_ERRNO(&parser_) != HPE_PAUSED) { + if (parser_->getStatus() != Http::Http1::ParserStatus::Ok && + parser_->getStatus() != Http::Http1::ParserStatus::Paused) { return ParseState::Error; } else { return ParseState::Continue; diff --git a/source/extensions/filters/listener/http_inspector/http_inspector.h b/source/extensions/filters/listener/http_inspector/http_inspector.h index 476c4e3b5891..c8f5e908a0fc 100644 --- a/source/extensions/filters/listener/http_inspector/http_inspector.h +++ b/source/extensions/filters/listener/http_inspector/http_inspector.h @@ -1,7 +1,5 @@ #pragma once -#include - #include "envoy/event/file_event.h" #include "envoy/event/timer.h" #include "envoy/network/filter.h" @@ -9,6 +7,8 @@ #include "envoy/stats/stats_macros.h" #include "source/common/common/logger.h" +#include "source/common/http/http1/legacy_parser_impl.h" +#include "source/common/http/http1/parser.h" #include "absl/container/flat_hash_set.h" @@ -58,6 +58,41 @@ class Config { HttpInspectorStats stats_; }; +class NoOpParserCallbacks : public Http::Http1::ParserCallbacks { +public: + Http::Http1::CallbackResult onMessageBegin() override { + return Http::Http1::CallbackResult::Success; + } + + Http::Http1::CallbackResult onUrl(const char* /*data*/, size_t /*length*/) override { + return Http::Http1::CallbackResult::Success; + } + + Http::Http1::CallbackResult onStatus(const char* /*data*/, size_t /*length*/) override { + return Http::Http1::CallbackResult::Success; + } + + Http::Http1::CallbackResult onHeaderField(const char* /*data*/, size_t /*length*/) override { + return Http::Http1::CallbackResult::Success; + } + + Http::Http1::CallbackResult onHeaderValue(const char* /*data*/, size_t /*length*/) override { + return Http::Http1::CallbackResult::Success; + } + + Http::Http1::CallbackResult onHeadersComplete() override { + return Http::Http1::CallbackResult::Success; + } + + void bufferBody(const char* /*data*/, size_t /*length*/) override {} + + Http::Http1::CallbackResult onMessageComplete() override { + return Http::Http1::CallbackResult::Success; + } + + void onChunkHeader(bool /*is_final_chunk*/) override {} +}; + using ConfigSharedPtr = std::shared_ptr; /** @@ -85,8 +120,10 @@ class Filter : public Network::ListenerFilter, Logger::Loggable parser_; + NoOpParserCallbacks no_op_callbacks_; + ssize_t nread_ = 0; }; } // namespace HttpInspector diff --git a/source/extensions/http/cache/file_system_http_cache/file_system_http_cache.cc b/source/extensions/http/cache/file_system_http_cache/file_system_http_cache.cc index 5e220e2b4598..6497a941f8fa 100644 --- a/source/extensions/http/cache/file_system_http_cache/file_system_http_cache.cc +++ b/source/extensions/http/cache/file_system_http_cache/file_system_http_cache.cc @@ -115,9 +115,8 @@ FileSystemHttpCache::makeVaryKey(const Key& base, const VaryAllowList& vary_allo return vary_key; } -LookupContextPtr -FileSystemHttpCache::makeLookupContext(LookupRequest&& lookup, - Http::StreamDecoderFilterCallbacks& callbacks) { +LookupContextPtr FileSystemHttpCache::makeLookupContext(LookupRequest&& lookup, + Http::StreamFilterCallbacks& callbacks) { return std::make_unique(callbacks.dispatcher(), *this, std::move(lookup)); } @@ -352,12 +351,14 @@ std::string FileSystemHttpCache::generateFilename(const Key& key) const { } InsertContextPtr FileSystemHttpCache::makeInsertContext(LookupContextPtr&& lookup_context, - Http::StreamEncoderFilterCallbacks&) { + Http::StreamFilterCallbacks&) { auto file_lookup_context = std::unique_ptr( dynamic_cast(lookup_context.release())); ASSERT(file_lookup_context); if (file_lookup_context->workInProgress()) { - return std::make_unique(); + auto ret = std::make_unique(*file_lookup_context->dispatcher()); + file_lookup_context->onDestroy(); + return ret; } return std::make_unique(shared_from_this(), std::move(file_lookup_context)); } diff --git a/source/extensions/http/cache/file_system_http_cache/file_system_http_cache.h b/source/extensions/http/cache/file_system_http_cache/file_system_http_cache.h index be4c59402444..97cd77a40e28 100644 --- a/source/extensions/http/cache/file_system_http_cache/file_system_http_cache.h +++ b/source/extensions/http/cache/file_system_http_cache/file_system_http_cache.h @@ -47,9 +47,9 @@ class FileSystemHttpCache : public HttpCache, // Overrides for HttpCache LookupContextPtr makeLookupContext(LookupRequest&& lookup, - Http::StreamDecoderFilterCallbacks& callbacks) override; + Http::StreamFilterCallbacks& callbacks) override; InsertContextPtr makeInsertContext(LookupContextPtr&& lookup_context, - Http::StreamEncoderFilterCallbacks& callbacks) override; + Http::StreamFilterCallbacks& callbacks) override; CacheInfo cacheInfo() const override; const CacheStats& stats() const; diff --git a/source/extensions/http/cache/file_system_http_cache/insert_context.cc b/source/extensions/http/cache/file_system_http_cache/insert_context.cc index 55503ef6dd25..a2fc30b2f699 100644 --- a/source/extensions/http/cache/file_system_http_cache/insert_context.cc +++ b/source/extensions/http/cache/file_system_http_cache/insert_context.cc @@ -193,7 +193,10 @@ void FileInsertContext::insertTrailers(const Http::ResponseTrailerMap& trailers, cancel_action_in_flight_ = std::move(queued.value()); } -void FileInsertContext::onDestroy() { cancelInsert("InsertContext destroyed prematurely"); } +void FileInsertContext::onDestroy() { + lookup_context_->onDestroy(); + cancelInsert("InsertContext destroyed prematurely"); +} void FileInsertContext::commit() { ASSERT(dispatcher()->isThreadSafe()); diff --git a/source/extensions/http/cache/file_system_http_cache/insert_context.h b/source/extensions/http/cache/file_system_http_cache/insert_context.h index a70fbbae1d06..d154e862e750 100644 --- a/source/extensions/http/cache/file_system_http_cache/insert_context.h +++ b/source/extensions/http/cache/file_system_http_cache/insert_context.h @@ -21,17 +21,21 @@ class FileSystemHttpCache; class DontInsertContext : public InsertContext { public: + explicit DontInsertContext(Event::Dispatcher& dispatcher) : dispatcher_(dispatcher) {} void insertHeaders(const Http::ResponseHeaderMap&, const ResponseMetadata&, InsertCallback insert_complete, bool) override { - insert_complete(false); + dispatcher_.post([cb = std::move(insert_complete)]() mutable { cb(false); }); } void insertBody(const Buffer::Instance&, InsertCallback ready_for_next_chunk, bool) override { - ready_for_next_chunk(false); + dispatcher_.post([cb = std::move(ready_for_next_chunk)]() mutable { cb(false); }); } void insertTrailers(const Http::ResponseTrailerMap&, InsertCallback insert_complete) override { - insert_complete(false); + dispatcher_.post([cb = std::move(insert_complete)]() mutable { cb(false); }); } void onDestroy() override{}; + +private: + Event::Dispatcher& dispatcher_; }; class FileInsertContext : public InsertContext, public Logger::Loggable { diff --git a/source/extensions/http/cache/simple_http_cache/simple_http_cache.cc b/source/extensions/http/cache/simple_http_cache/simple_http_cache.cc index 04c70bba9d39..15070529c623 100644 --- a/source/extensions/http/cache/simple_http_cache/simple_http_cache.cc +++ b/source/extensions/http/cache/simple_http_cache/simple_http_cache.cc @@ -168,7 +168,7 @@ class SimpleInsertContext : public InsertContext { } // namespace LookupContextPtr SimpleHttpCache::makeLookupContext(LookupRequest&& request, - Http::StreamDecoderFilterCallbacks& callbacks) { + Http::StreamFilterCallbacks& callbacks) { return std::make_unique(callbacks.dispatcher(), *this, std::move(request)); } @@ -311,10 +311,12 @@ bool SimpleHttpCache::varyInsert(const Key& request_key, } InsertContextPtr SimpleHttpCache::makeInsertContext(LookupContextPtr&& lookup_context, - Http::StreamEncoderFilterCallbacks&) { + Http::StreamFilterCallbacks&) { ASSERT(lookup_context != nullptr); - return std::make_unique(dynamic_cast(*lookup_context), - *this); + auto ret = std::make_unique( + dynamic_cast(*lookup_context), *this); + lookup_context->onDestroy(); + return ret; } constexpr absl::string_view Name = "envoy.extensions.http.cache.simple"; diff --git a/source/extensions/http/cache/simple_http_cache/simple_http_cache.h b/source/extensions/http/cache/simple_http_cache/simple_http_cache.h index 515ccc5f0183..3acb936ac687 100644 --- a/source/extensions/http/cache/simple_http_cache/simple_http_cache.h +++ b/source/extensions/http/cache/simple_http_cache/simple_http_cache.h @@ -35,9 +35,9 @@ class SimpleHttpCache : public HttpCache, public Singleton::Instance { public: // HttpCache LookupContextPtr makeLookupContext(LookupRequest&& request, - Http::StreamDecoderFilterCallbacks& callbacks) override; + Http::StreamFilterCallbacks& callbacks) override; InsertContextPtr makeInsertContext(LookupContextPtr&& lookup_context, - Http::StreamEncoderFilterCallbacks& callbacks) override; + Http::StreamFilterCallbacks& callbacks) override; void updateHeaders(const LookupContext& lookup_context, const Http::ResponseHeaderMap& response_headers, const ResponseMetadata& metadata, UpdateHeadersCallback on_complete) override; diff --git a/source/extensions/http/early_header_mutation/header_mutation/config.cc b/source/extensions/http/early_header_mutation/header_mutation/config.cc index d00f1027a8ec..7dddb6547eb2 100644 --- a/source/extensions/http/early_header_mutation/header_mutation/config.cc +++ b/source/extensions/http/early_header_mutation/header_mutation/config.cc @@ -12,7 +12,7 @@ Envoy::Http::EarlyHeaderMutationPtr Factory::createExtension(const Protobuf::Message& message, Server::Configuration::FactoryContext& context) { auto mptr = Envoy::Config::Utility::translateAnyToFactoryConfig( - *Envoy::Protobuf::DynamicCastToGenerated(&message), + *Envoy::Protobuf::DynamicCastMessage(&message), context.messageValidationVisitor(), *this); const auto& proto_config = MessageUtil::downcastAndValidate< const envoy::extensions::http::early_header_mutation::header_mutation::v3::HeaderMutation&>( diff --git a/source/extensions/http/original_ip_detection/custom_header/config.cc b/source/extensions/http/original_ip_detection/custom_header/config.cc index f5395ba679e2..e89bcff44b8c 100644 --- a/source/extensions/http/original_ip_detection/custom_header/config.cc +++ b/source/extensions/http/original_ip_detection/custom_header/config.cc @@ -17,7 +17,7 @@ absl::StatusOr CustomHeaderIPDetectionFactory::createExtension(const Protobuf::Message& message, Server::Configuration::FactoryContext& context) { auto mptr = Envoy::Config::Utility::translateAnyToFactoryConfig( - *Envoy::Protobuf::DynamicCastToGenerated(&message), + *Envoy::Protobuf::DynamicCastMessage(&message), context.messageValidationVisitor(), *this); const auto& proto_config = MessageUtil::downcastAndValidate< const envoy::extensions::http::original_ip_detection::custom_header::v3::CustomHeaderConfig&>( diff --git a/source/extensions/network/dns_resolver/apple/apple_dns_impl.cc b/source/extensions/network/dns_resolver/apple/apple_dns_impl.cc index 112bcbb0ae22..23f8affa0c27 100644 --- a/source/extensions/network/dns_resolver/apple/apple_dns_impl.cc +++ b/source/extensions/network/dns_resolver/apple/apple_dns_impl.cc @@ -379,7 +379,7 @@ void AppleDnsResolverImpl::PendingResolution::onDNSServiceGetAddrInfoReply( isAddressFamilyProcessed(kDNSServiceProtocol_IPv6)) { ENVOY_LOG(debug, "DNS Resolver flushing queries pending callback"); pending_response_.status_ = ResolutionStatus::Completed; - pending_response_.details_ = "apple_dns_success"; + pending_response_.details_ = absl::StrCat("apple_dns_completed_", error_code); finishResolve(); // Note: Nothing can follow this call to finishResolve due to deletion of this // object upon resolution. diff --git a/source/extensions/upstreams/http/udp/upstream_request.h b/source/extensions/upstreams/http/udp/upstream_request.h index 168d1754a85b..4311936f0d8a 100644 --- a/source/extensions/upstreams/http/udp/upstream_request.h +++ b/source/extensions/upstreams/http/udp/upstream_request.h @@ -47,10 +47,7 @@ class UdpConnPool : public Router::GenericConnPool { auto ret = std::make_unique( Network::Socket::Type::Datagram, host->address(), /*remote_address=*/nullptr, Network::SocketCreationOptions{}); - if (Runtime::runtimeFeatureEnabled( - "envoy.restart_features.allow_client_socket_creation_failure")) { - RELEASE_ASSERT(ret->isOpen(), "Socket creation fail"); - } + RELEASE_ASSERT(ret->isOpen(), "Socket creation fail"); return ret; } diff --git a/test/common/formatter/BUILD b/test/common/formatter/BUILD index d9850856a402..43afe4b70f6d 100644 --- a/test/common/formatter/BUILD +++ b/test/common/formatter/BUILD @@ -87,6 +87,7 @@ envoy_cc_test( "//test/mocks/server:factory_context_mocks", "//test/mocks/stream_info:stream_info_mocks", "//test/test_common:registry_lib", + "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/test/common/formatter/command_extension.cc b/test/common/formatter/command_extension.cc index b196a924b4c5..a29f6b8c9feb 100644 --- a/test/common/formatter/command_extension.cc +++ b/test/common/formatter/command_extension.cc @@ -30,7 +30,7 @@ TestCommandFactory::createCommandParserFromProto(const Protobuf::Message& messag Server::Configuration::GenericFactoryContext&) { // Cast the config message to the actual type to test that it was constructed properly. [[maybe_unused]] const auto& config = - *Envoy::Protobuf::DynamicCastToGenerated(&message); + *Envoy::Protobuf::DynamicCastMessage(&message); return std::make_unique(); } @@ -67,7 +67,7 @@ CommandParserPtr AdditionalCommandFactory::createCommandParserFromProto( const Protobuf::Message& message, Server::Configuration::GenericFactoryContext&) { // Cast the config message to the actual type to test that it was constructed properly. [[maybe_unused]] const auto& config = - *Envoy::Protobuf::DynamicCastToGenerated(&message); + *Envoy::Protobuf::DynamicCastMessage(&message); return std::make_unique(); } @@ -86,7 +86,7 @@ FailCommandFactory::createCommandParserFromProto(const Protobuf::Message& messag Server::Configuration::GenericFactoryContext&) { // Cast the config message to the actual type to test that it was constructed properly. [[maybe_unused]] const auto& config = - *Envoy::Protobuf::DynamicCastToGenerated(&message); + *Envoy::Protobuf::DynamicCastMessage(&message); return nullptr; } diff --git a/test/common/formatter/substitution_format_string_test.cc b/test/common/formatter/substitution_format_string_test.cc index e2d01dd13336..f651b12ffb7b 100644 --- a/test/common/formatter/substitution_format_string_test.cc +++ b/test/common/formatter/substitution_format_string_test.cc @@ -7,6 +7,7 @@ #include "test/mocks/server/factory_context.h" #include "test/mocks/stream_info/mocks.h" #include "test/test_common/registry.h" +#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -78,6 +79,9 @@ TEST_F(SubstitutionFormatStringUtilsTest, TestFromProtoConfigJson) { } TEST_F(SubstitutionFormatStringUtilsTest, TestInvalidConfigs) { + TestScopedRuntime runtime; + runtime.mergeValues({{"envoy.reloadable_features.logging_with_fast_json_formatter", "false"}}); + const std::vector invalid_configs = { R"( json_format: diff --git a/test/common/formatter/substitution_formatter_test.cc b/test/common/formatter/substitution_formatter_test.cc index 9c058bdf46ef..853e0eab1f91 100644 --- a/test/common/formatter/substitution_formatter_test.cc +++ b/test/common/formatter/substitution_formatter_test.cc @@ -2699,9 +2699,7 @@ TEST(SubstitutionFormatterTest, DynamicMetadataFieldExtractor) { DynamicMetadataFormatter formatter("com.test", {"nan_val"}, absl::optional()); absl::optional value = formatter.format(stream_info); - EXPECT_EQ("google.protobuf.Value cannot encode double values for nan, because it would be " - "parsed as a string", - value.value()); + EXPECT_EQ("null", value.value()); } { @@ -2713,9 +2711,7 @@ TEST(SubstitutionFormatterTest, DynamicMetadataFieldExtractor) { DynamicMetadataFormatter formatter("com.test", {"inf_val"}, absl::optional()); absl::optional value = formatter.format(stream_info); - EXPECT_EQ("google.protobuf.Value cannot encode double values for infinity, because it would be " - "parsed as a string", - value.value()); + EXPECT_EQ("inf", value.value()); } } @@ -4475,7 +4471,6 @@ TEST(SubstitutionFormatterTest, JsonFormatterTest) { JsonFormatterImpl formatter(key_mapping, false); const std::string out_json = formatter.formatWithContext(formatter_context, stream_info); - std::cout << out_json << std::endl; EXPECT_TRUE(TestUtility::jsonStringEqual(out_json, expected)); } diff --git a/test/common/protobuf/utility_speed_test.cc b/test/common/protobuf/utility_speed_test.cc index 491d2f71ed5b..7a296a369a41 100644 --- a/test/common/protobuf/utility_speed_test.cc +++ b/test/common/protobuf/utility_speed_test.cc @@ -4,7 +4,6 @@ #include "source/common/protobuf/utility.h" #include "test/common/protobuf/deterministic_hash_test.pb.h" -#include "test/test_common/test_runtime.h" #include "benchmark/benchmark.h" @@ -53,21 +52,8 @@ static std::unique_ptr testProtoWithRepeatedFields() { return msg; } -static void bmHashByTextFormat(benchmark::State& state, std::unique_ptr msg) { - TestScopedRuntime runtime; - runtime.mergeValues({{"envoy.restart_features.use_fast_protobuf_hash", "false"}}); - uint64_t hash = 0; - for (auto _ : state) { - UNREFERENCED_PARAMETER(_); - hash += MessageUtil::hash(*msg); - } - benchmark::DoNotOptimize(hash); -} - static void bmHashByDeterministicHash(benchmark::State& state, std::unique_ptr msg) { - TestScopedRuntime runtime; - runtime.mergeValues({{"envoy.restart_features.use_fast_protobuf_hash", "true"}}); uint64_t hash = 0; for (auto _ : state) { UNREFERENCED_PARAMETER(_); @@ -76,10 +62,7 @@ static void bmHashByDeterministicHash(benchmark::State& state, benchmark::DoNotOptimize(hash); } BENCHMARK_CAPTURE(bmHashByDeterministicHash, map, testProtoWithMaps()); -BENCHMARK_CAPTURE(bmHashByTextFormat, map, testProtoWithMaps()); BENCHMARK_CAPTURE(bmHashByDeterministicHash, recursion, testProtoWithRecursion()); -BENCHMARK_CAPTURE(bmHashByTextFormat, recursion, testProtoWithRecursion()); BENCHMARK_CAPTURE(bmHashByDeterministicHash, repeatedFields, testProtoWithRepeatedFields()); -BENCHMARK_CAPTURE(bmHashByTextFormat, repeatedFields, testProtoWithRepeatedFields()); } // namespace Envoy diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index b2f0c90eb038..3a43ba8b5b80 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -194,22 +194,16 @@ TEST_F(ProtobufUtilityTest, MessageUtilHash) { a4.PackFrom(s2); a5.PackFrom(s3); - TestScopedRuntime runtime_; - for (std::string runtime_value : {"true", "false"}) { - // TODO(ravenblack): when the runtime flag is removed, keep the expects - // but remove the loop around them and the extra output. - runtime_.mergeValues({{"envoy.restart_features.use_fast_protobuf_hash", runtime_value}}); - EXPECT_EQ(MessageUtil::hash(a1), MessageUtil::hash(a2)) << runtime_value; - EXPECT_EQ(MessageUtil::hash(a2), MessageUtil::hash(a3)) << runtime_value; - EXPECT_NE(0, MessageUtil::hash(a1)) << runtime_value; - // Same keys and values but with the values in a different order should not have - // the same hash. - EXPECT_NE(MessageUtil::hash(a1), MessageUtil::hash(a4)) << runtime_value; - // Different keys with the values in the same order should not have the same hash. - EXPECT_NE(MessageUtil::hash(a1), MessageUtil::hash(a5)) << runtime_value; - // Struct without 'any' around it should not hash the same as struct inside 'any'. - EXPECT_NE(MessageUtil::hash(s), MessageUtil::hash(a1)) << runtime_value; - } + EXPECT_EQ(MessageUtil::hash(a1), MessageUtil::hash(a2)); + EXPECT_EQ(MessageUtil::hash(a2), MessageUtil::hash(a3)); + EXPECT_NE(0, MessageUtil::hash(a1)); + // Same keys and values but with the values in a different order should not have + // the same hash. + EXPECT_NE(MessageUtil::hash(a1), MessageUtil::hash(a4)); + // Different keys with the values in the same order should not have the same hash. + EXPECT_NE(MessageUtil::hash(a1), MessageUtil::hash(a5)); + // Struct without 'any' around it should not hash the same as struct inside 'any'. + EXPECT_NE(MessageUtil::hash(s), MessageUtil::hash(a1)); } TEST_F(ProtobufUtilityTest, RepeatedPtrUtilDebugString) { diff --git a/test/extensions/access_loggers/file/config_test.cc b/test/extensions/access_loggers/file/config_test.cc index 7b75312f2b3d..6a1c08d62afc 100644 --- a/test/extensions/access_loggers/file/config_test.cc +++ b/test/extensions/access_loggers/file/config_test.cc @@ -116,7 +116,7 @@ TEST_F(FileAccessLogTest, DEPRECATED_FEATURE_TEST(LegacyJsonFormat)) { R"({ "text": "plain text", "path": "/bar/foo", - "code": "200" + "code": 200 })", true); } diff --git a/test/extensions/filters/http/cache/cache_filter_logging_info_test.cc b/test/extensions/filters/http/cache/cache_filter_logging_info_test.cc index 79ba6e421097..6aa76376fed6 100644 --- a/test/extensions/filters/http/cache/cache_filter_logging_info_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_logging_info_test.cc @@ -30,8 +30,8 @@ TEST(Coverage, InsertStatusToString) { EXPECT_EQ(insertStatusToString(InsertStatus::InsertAbortedByCache), "InsertAbortedByCache"); EXPECT_EQ(insertStatusToString(InsertStatus::InsertAbortedCacheCongested), "InsertAbortedCacheCongested"); - EXPECT_EQ(insertStatusToString(InsertStatus::InsertAbortedResponseIncomplete), - "InsertAbortedResponseIncomplete"); + EXPECT_EQ(insertStatusToString(InsertStatus::FilterAbortedBeforeInsertComplete), + "FilterAbortedBeforeInsertComplete"); EXPECT_EQ(insertStatusToString(InsertStatus::HeaderUpdate), "HeaderUpdate"); EXPECT_EQ(insertStatusToString(InsertStatus::NoInsertCacheHit), "NoInsertCacheHit"); EXPECT_EQ(insertStatusToString(InsertStatus::NoInsertRequestNotCacheable), diff --git a/test/extensions/filters/http/cache/cache_filter_test.cc b/test/extensions/filters/http/cache/cache_filter_test.cc index 14edc705c36c..fad814b0f575 100644 --- a/test/extensions/filters/http/cache/cache_filter_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_test.cc @@ -1,3 +1,5 @@ +#include + #include "envoy/event/dispatcher.h" #include "source/common/http/headers.h" @@ -25,6 +27,7 @@ using ::Envoy::StatusHelpers::IsOkAndHolds; using ::testing::Gt; using ::testing::IsNull; using ::testing::NotNull; +using ::testing::Return; class CacheFilterTest : public ::testing::Test { protected: @@ -47,6 +50,31 @@ class CacheFilterTest : public ::testing::Test { } void SetUp() override { + context_.server_factory_context_.cluster_manager_.initializeThreadLocalClusters( + {"fake_cluster"}); + ON_CALL(context_.server_factory_context_.cluster_manager_.thread_local_cluster_.async_client_, + start) + .WillByDefault([this](Http::AsyncClient::StreamCallbacks& callbacks, + const Http::AsyncClient::StreamOptions&) { + int i = mock_upstreams_.size(); + mock_upstreams_.push_back(std::make_unique>()); + mock_upstreams_callbacks_.emplace_back(std::ref(callbacks)); + auto ret = mock_upstreams_.back().get(); + mock_upstreams_headers_sent_.emplace_back(); + ON_CALL(*ret, sendHeaders) + .WillByDefault([this, i](Http::RequestHeaderMap& headers, bool end_stream) { + EXPECT_EQ(mock_upstreams_headers_sent_[i], absl::nullopt) + << "headers should only be sent once"; + EXPECT_TRUE(end_stream) << "post requests should be bypassing the filter"; + mock_upstreams_headers_sent_[i] = Http::TestRequestHeaderMapImpl(); + mock_upstreams_headers_sent_[i]->copyFrom(headers); + }); + ON_CALL(*ret, reset).WillByDefault([this, i]() { + mock_upstreams_callbacks_[i].get().onReset(); + }); + return ret; + }); + ON_CALL(encoder_callbacks_, dispatcher()).WillByDefault(::testing::ReturnRef(*dispatcher_)); ON_CALL(decoder_callbacks_, dispatcher()).WillByDefault(::testing::ReturnRef(*dispatcher_)); ON_CALL(decoder_callbacks_.stream_info_, filterState()) @@ -81,24 +109,116 @@ class CacheFilterTest : public ::testing::Test { return info_or.status(); } - void testDecodeRequestMiss(CacheFilterSharedPtr filter) { - // The filter should not encode any headers or data as no cached response exists. - EXPECT_CALL(decoder_callbacks_, encodeHeaders_).Times(0); - EXPECT_CALL(decoder_callbacks_, encodeData).Times(0); + void pumpDispatcher() { dispatcher_->run(Event::Dispatcher::RunType::Block); } + + void receiveUpstreamComplete(size_t upstream_index) { + ASSERT(mock_upstreams_callbacks_.size() > upstream_index); + mock_upstreams_callbacks_[upstream_index].get().onComplete(); + } + + void + receiveUpstreamHeaders(size_t upstream_index, Http::ResponseHeaderMap& headers, bool end_stream, + testing::Matcher expected_response_headers = _) { + ASSERT(mock_upstreams_callbacks_.size() > upstream_index); + + EXPECT_CALL(decoder_callbacks_, encodeHeaders_(expected_response_headers, _)); + mock_upstreams_callbacks_[upstream_index].get().onHeaders( + std::make_unique(headers), end_stream); + ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_); + if (end_stream) { + receiveUpstreamComplete(upstream_index); + } + } + + // On successful verification, the upstream request gets reset rather than + // onComplete. + void receiveUpstreamHeadersWithReset( + size_t upstream_index, Http::ResponseHeaderMap& headers, bool end_stream, + testing::Matcher expected_response_headers = _) { + ASSERT(mock_upstreams_callbacks_.size() > upstream_index); + ASSERT(mock_upstreams_.size() > upstream_index); + EXPECT_CALL(*mock_upstreams_[upstream_index], reset()); + EXPECT_CALL(decoder_callbacks_, encodeHeaders_(expected_response_headers, _)); + mock_upstreams_callbacks_[upstream_index].get().onHeaders( + std::make_unique(headers), end_stream); + ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_); + testing::Mock::VerifyAndClearExpectations(mock_upstreams_[1].get()); + } + + void receiveUpstreamBody(size_t upstream_index, absl::string_view body, bool end_stream) { + ASSERT(mock_upstreams_callbacks_.size() > upstream_index); + EXPECT_CALL(decoder_callbacks_, encodeData); + Buffer::OwnedImpl buf{body}; + mock_upstreams_callbacks_[upstream_index].get().onData(buf, end_stream); + ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_); + if (end_stream) { + receiveUpstreamComplete(upstream_index); + } + } + + void receiveUpstreamBodyAfterFilterDestroyed(size_t upstream_index, absl::string_view body, + bool end_stream) { + // Same as receiveUpstreamBody but without expecting a call to encodeData. + ASSERT(mock_upstreams_callbacks_.size() > upstream_index); + Buffer::OwnedImpl buf{body}; + mock_upstreams_callbacks_[upstream_index].get().onData(buf, end_stream); + if (end_stream) { + receiveUpstreamComplete(upstream_index); + } + } + + void receiveUpstreamTrailers(size_t upstream_index, Http::ResponseTrailerMap& trailers) { + ASSERT(mock_upstreams_callbacks_.size() > upstream_index); + EXPECT_CALL(decoder_callbacks_, encodeTrailers_); + mock_upstreams_callbacks_[upstream_index].get().onTrailers( + std::make_unique(trailers)); + ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_); + receiveUpstreamComplete(upstream_index); + } + + void receiveUpstreamTrailersAfterFilterDestroyed(size_t upstream_index, + Http::ResponseTrailerMap& trailers) { + ASSERT(mock_upstreams_callbacks_.size() > upstream_index); + mock_upstreams_callbacks_[upstream_index].get().onTrailers( + std::make_unique(trailers)); + receiveUpstreamComplete(upstream_index); + } + + void populateCommonCacheEntry(size_t upstream_index, CacheFilterSharedPtr filter, + absl::string_view body = "", + OptRef trailers = absl::nullopt) { + testDecodeRequestMiss(upstream_index, filter); + + receiveUpstreamHeaders(upstream_index, response_headers_, + body.empty() && trailers == absl::nullopt); + + if (!body.empty()) { + receiveUpstreamBody(upstream_index, body, trailers == absl::nullopt); + } + if (trailers) { + receiveUpstreamTrailers(upstream_index, *trailers); + } + + filter->onStreamComplete(); + EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); + pumpDispatcher(); + } + + void testDecodeRequestMiss(size_t upstream_index, CacheFilterSharedPtr filter) { // The filter should stop decoding iteration when decodeHeaders is called as a cache lookup is // in progress. EXPECT_EQ(filter->decodeHeaders(request_headers_, true), Http::FilterHeadersStatus::StopAllIterationAndWatermark); - // The filter should continue decoding when the cache lookup result (miss) is ready. - EXPECT_CALL(decoder_callbacks_, continueDecoding); - // The cache lookup callback should be posted to the dispatcher. // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); - ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_); + // An upstream request should be sent. + ASSERT_THAT(mock_upstreams_.size(), Gt(upstream_index)); + ASSERT_THAT(mock_upstreams_headers_sent_.size(), Gt(upstream_index)); + EXPECT_THAT(mock_upstreams_headers_sent_[upstream_index], testing::Optional(request_headers_)); } void testDecodeRequestHitNoBody(CacheFilterSharedPtr filter) { @@ -123,7 +243,7 @@ class CacheFilterTest : public ::testing::Test { // The cache lookup callback should be posted to the dispatcher. // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_); } @@ -154,7 +274,7 @@ class CacheFilterTest : public ::testing::Test { // Run events on the dispatcher so that the callback is invoked. // The posted lookup callback will cause another callback to be posted (when getBody() is // called) which should also be invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_); } @@ -174,6 +294,9 @@ class CacheFilterTest : public ::testing::Test { {"cache-control", "public,max-age=3600"}}; NiceMock decoder_callbacks_; NiceMock encoder_callbacks_; + std::vector> mock_upstreams_; + std::vector> mock_upstreams_callbacks_; + std::vector> mock_upstreams_headers_sent_; Api::ApiPtr api_ = Api::createApiForTest(); Event::DispatcherPtr dispatcher_ = api_->allocateDispatcher("test_thread"); const Seconds delay_ = Seconds(10); @@ -193,7 +316,8 @@ TEST_F(CacheFilterTest, UncacheableRequest) { // POST requests are uncacheable request_headers_.setMethod(Http::Headers::get().MethodValues.Post); - for (int request = 1; request <= 2; request++) { + for (int request = 0; request < 2; request++) { + std::cerr << " request " << request << std::endl; // Create filter for the request CacheFilterSharedPtr filter = makeFilter(simple_cache_); @@ -221,15 +345,16 @@ TEST_F(CacheFilterTest, UncacheableResponse) { // Responses with "Cache-Control: no-store" are uncacheable response_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "no-store"); - for (int request = 1; request <= 2; request++) { + for (int request = 0; request < 2; request++) { + std::cerr << " request " << request << std::endl; // Create filter for the request. CacheFilterSharedPtr filter = makeFilter(simple_cache_); - testDecodeRequestMiss(filter); + testDecodeRequestMiss(request, filter); - // Encode response headers. - EXPECT_EQ(filter->encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue); + receiveUpstreamHeaders(request, response_headers_, true); + pumpDispatcher(); filter->onStreamComplete(); EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); EXPECT_THAT(insertStatus(), IsOkAndHolds(InsertStatus::NoInsertResponseNotCacheable)); @@ -237,24 +362,23 @@ TEST_F(CacheFilterTest, UncacheableResponse) { } TEST_F(CacheFilterTest, CacheMiss) { - for (int request = 1; request <= 2; request++) { + for (int request = 0; request < 2; request++) { + std::cerr << " request " << request << std::endl; // Each iteration a request is sent to a different host, therefore the second one is a miss - request_headers_.setHost("CacheMiss" + std::to_string(request)); + request_headers_.setHost(absl::StrCat("CacheMiss", request)); // Create filter for request 1 CacheFilterSharedPtr filter = makeFilter(simple_cache_); - testDecodeRequestMiss(filter); + testDecodeRequestMiss(request, filter); - // Encode response header - EXPECT_EQ(filter->encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue); + receiveUpstreamHeaders(request, response_headers_, true); + pumpDispatcher(); filter->onStreamComplete(); EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); EXPECT_THAT(insertStatus(), IsOkAndHolds(InsertStatus::InsertSucceeded)); } - // Clear events off the dispatcher. - dispatcher_->run(Event::Dispatcher::RunType::Block); } TEST_F(CacheFilterTest, Disabled) { @@ -267,28 +391,26 @@ TEST_F(CacheFilterTest, CacheMissWithTrailers) { request_headers_.setHost("CacheMissWithTrailers"); const std::string body = "abc"; Buffer::OwnedImpl body_buffer(body); - Http::TestResponseTrailerMapImpl trailers; + Http::TestResponseTrailerMapImpl trailers{{"somekey", "somevalue"}}; - for (int request = 1; request <= 2; request++) { + for (int request = 0; request < 2; request++) { + std::cerr << " request " << request << std::endl; // Each iteration a request is sent to a different host, therefore the second one is a miss - request_headers_.setHost("CacheMissWithTrailers" + std::to_string(request)); + request_headers_.setHost(absl::StrCat("CacheMissWithTrailers", request)); - // Create filter for request 1 CacheFilterSharedPtr filter = makeFilter(simple_cache_); - testDecodeRequestMiss(filter); + testDecodeRequestMiss(request, filter); - // Encode response header - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); - EXPECT_EQ(filter->encodeData(body_buffer, false), Http::FilterDataStatus::Continue); - EXPECT_EQ(filter->encodeTrailers(trailers), Http::FilterTrailersStatus::Continue); + receiveUpstreamHeaders(request, response_headers_, false); + receiveUpstreamBody(request, body, false); + receiveUpstreamTrailers(request, trailers); + pumpDispatcher(); filter->onStreamComplete(); EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); EXPECT_THAT(insertStatus(), IsOkAndHolds(InsertStatus::InsertSucceeded)); } - // Clear events off the dispatcher. - dispatcher_->run(Event::Dispatcher::RunType::Block); } TEST_F(CacheFilterTest, CacheMissWithTrailersWhenCacheRespondsQuickerThanUpstream) { @@ -297,46 +419,34 @@ TEST_F(CacheFilterTest, CacheMissWithTrailersWhenCacheRespondsQuickerThanUpstrea Buffer::OwnedImpl body_buffer(body); Http::TestResponseTrailerMapImpl trailers; - for (int request = 1; request <= 2; request++) { + for (int request = 0; request < 2; request++) { + std::cerr << " request " << request << std::endl; // Each iteration a request is sent to a different host, therefore the second one is a miss request_headers_.setHost("CacheMissWithTrailers" + std::to_string(request)); // Create filter for request 1 CacheFilterSharedPtr filter = makeFilter(simple_cache_); - testDecodeRequestMiss(filter); - - // Encode response header - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); - // Resolve cache response - dispatcher_->run(Event::Dispatcher::RunType::Block); - EXPECT_EQ(filter->encodeData(body_buffer, false), Http::FilterDataStatus::Continue); - // Resolve cache response - dispatcher_->run(Event::Dispatcher::RunType::Block); - EXPECT_EQ(filter->encodeTrailers(trailers), Http::FilterTrailersStatus::Continue); - // Resolve cache response - dispatcher_->run(Event::Dispatcher::RunType::Block); + testDecodeRequestMiss(request, filter); + receiveUpstreamHeaders(request, response_headers_, false); + pumpDispatcher(); + receiveUpstreamBody(request, body, false); + pumpDispatcher(); + receiveUpstreamTrailers(request, trailers); + pumpDispatcher(); filter->onStreamComplete(); EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); EXPECT_THAT(insertStatus(), IsOkAndHolds(InsertStatus::InsertSucceeded)); } // Clear events off the dispatcher. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); } TEST_F(CacheFilterTest, CacheHitNoBody) { request_headers_.setHost("CacheHitNoBody"); - { - // Create filter for request 1. - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - - testDecodeRequestMiss(filter); - - // Encode response headers. - EXPECT_EQ(filter->encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue); - } + populateCommonCacheEntry(0, makeFilter(simple_cache_)); waitBeforeSecondRequest(); { // Create filter for request 2. @@ -354,25 +464,7 @@ TEST_F(CacheFilterTest, CacheHitWithBody) { request_headers_.setHost("CacheHitWithBody"); const std::string body = "abc"; - { - // Create filter for request 1. - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - - testDecodeRequestMiss(filter); - - // Encode response. - Buffer::OwnedImpl buffer(body); - response_headers_.setContentLength(body.size()); - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); - EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue); - // The cache insertBody callback should be posted to the dispatcher. - // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); - - filter->onStreamComplete(); - EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); - EXPECT_THAT(insertStatus(), IsOkAndHolds(InsertStatus::InsertSucceeded)); - } + populateCommonCacheEntry(0, makeFilter(simple_cache_), body); waitBeforeSecondRequest(); { // Create filter for request 2 @@ -393,18 +485,8 @@ TEST_F(CacheFilterTest, WatermarkEventsAreSentIfCacheBlocksStreamAndLimitExceede // Set the buffer limit to 2 bytes to ensure we send watermark events. EXPECT_CALL(encoder_callbacks_, encoderBufferLimit()).WillRepeatedly(::testing::Return(2)); auto mock_http_cache = std::make_shared(); - auto mock_lookup_context = std::make_unique(); - auto mock_insert_context = std::make_unique(); - EXPECT_CALL(*mock_http_cache, makeLookupContext(_, _)) - .WillOnce([&](LookupRequest&&, - Http::StreamDecoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_lookup_context); - }); - EXPECT_CALL(*mock_http_cache, makeInsertContext(_, _)) - .WillOnce([&](LookupContextPtr&&, - Http::StreamEncoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_insert_context); - }); + MockLookupContext* mock_lookup_context = mock_http_cache->mockLookupContext(); + MockInsertContext* mock_insert_context = mock_http_cache->mockInsertContext(); EXPECT_CALL(*mock_lookup_context, getHeaders(_)).WillOnce([&](LookupHeadersCallback&& cb) { dispatcher_->post([cb = std::move(cb)]() mutable { std::move(cb)(LookupResult{}, false); }); }); @@ -426,35 +508,36 @@ TEST_F(CacheFilterTest, WatermarkEventsAreSentIfCacheBlocksStreamAndLimitExceede dispatcher_->post( [cb = std::move(ready_for_next_chunk)]() mutable { std::move(cb)(true); }); }); - EXPECT_CALL(*mock_insert_context, onDestroy()); - EXPECT_CALL(*mock_lookup_context, onDestroy()); { CacheFilterSharedPtr filter = makeFilter(mock_http_cache); - testDecodeRequestMiss(filter); + testDecodeRequestMiss(0, filter); // Encode response. response_headers_.setContentLength(body1.size() + body2.size()); - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); + receiveUpstreamHeaders(0, response_headers_, false); // The insertHeaders callback should be posted to the dispatcher. // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); + + // TODO(ravenblack): once watermarking is available in async upstreams + // revisit this test. + // EXPECT_CALL(encoder_callbacks_, onEncoderFilterAboveWriteBufferHighWatermark()); - EXPECT_CALL(encoder_callbacks_, onEncoderFilterAboveWriteBufferHighWatermark()); // Write the body in two pieces - the first one should exceed the watermark and // send a high watermark event. - Buffer::OwnedImpl body1buf(body1); - Buffer::OwnedImpl body2buf(body2); - EXPECT_EQ(filter->encodeData(body1buf, false), Http::FilterDataStatus::Continue); - EXPECT_EQ(filter->encodeData(body2buf, true), Http::FilterDataStatus::Continue); + receiveUpstreamBody(0, body1, false); + receiveUpstreamBody(0, body2, true); ASSERT_THAT(captured_insert_body_callback, NotNull()); + + // TODO(ravenblack): once watermarking is available in async upstreams + // revisit this test. // When the cache releases, a low watermark event should be sent. - EXPECT_CALL(encoder_callbacks_, onEncoderFilterBelowWriteBufferLowWatermark()); + // EXPECT_CALL(encoder_callbacks_, onEncoderFilterBelowWriteBufferLowWatermark()); + captured_insert_body_callback(true); - // The cache insertBody callback should be posted to the dispatcher. - // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); filter->onStreamComplete(); EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); EXPECT_THAT(insertStatus(), IsOkAndHolds(InsertStatus::InsertSucceeded)); @@ -468,18 +551,8 @@ TEST_F(CacheFilterTest, FilterDestroyedWhileWatermarkedSendsLowWatermarkEvent) { // Set the buffer limit to 2 bytes to ensure we send watermark events. EXPECT_CALL(encoder_callbacks_, encoderBufferLimit()).WillRepeatedly(::testing::Return(2)); auto mock_http_cache = std::make_shared(); - auto mock_lookup_context = std::make_unique(); - auto mock_insert_context = std::make_unique(); - EXPECT_CALL(*mock_http_cache, makeLookupContext(_, _)) - .WillOnce([&](LookupRequest&&, - Http::StreamDecoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_lookup_context); - }); - EXPECT_CALL(*mock_http_cache, makeInsertContext(_, _)) - .WillOnce([&](LookupContextPtr&&, - Http::StreamEncoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_insert_context); - }); + MockLookupContext* mock_lookup_context = mock_http_cache->mockLookupContext(); + MockInsertContext* mock_insert_context = mock_http_cache->mockInsertContext(); EXPECT_CALL(*mock_lookup_context, getHeaders(_)).WillOnce([&](LookupHeadersCallback&& cb) { dispatcher_->post([cb = std::move(cb)]() mutable { std::move(cb)(LookupResult{}, false); }); }); @@ -496,36 +569,40 @@ TEST_F(CacheFilterTest, FilterDestroyedWhileWatermarkedSendsLowWatermarkEvent) { EXPECT_THAT(captured_insert_body_callback, IsNull()); captured_insert_body_callback = std::move(ready_for_next_chunk); }); - EXPECT_CALL(*mock_insert_context, onDestroy()); - EXPECT_CALL(*mock_lookup_context, onDestroy()); { CacheFilterSharedPtr filter = makeFilter(mock_http_cache, false); - testDecodeRequestMiss(filter); + testDecodeRequestMiss(0, filter); // Encode response. response_headers_.setContentLength(body1.size() + body2.size()); - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); + receiveUpstreamHeaders(0, response_headers_, false); // The insertHeaders callback should be posted to the dispatcher. // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); - - EXPECT_CALL(encoder_callbacks_, onEncoderFilterAboveWriteBufferHighWatermark()); + pumpDispatcher(); + + // TODO(ravenblack): enable watermark testing again once the cache filter's + // watermark behavior is usable. Currently this is blocked in two ways - + // async http streams don't support watermarking so we can't slow it down anyway, + // and populating the cache and streaming to the individual client are still + // linked, which means slowing it down for the client could also ruin the cache + // behavior. I intend to make the request that triggers a cache insert turn into + // a cache streamed read operation once the cache insert begins. + // EXPECT_CALL(encoder_callbacks_, onEncoderFilterAboveWriteBufferHighWatermark()); // Write the body in two pieces - the first one should exceed the watermark and // send a high watermark event. - Buffer::OwnedImpl body1buf(body1); - Buffer::OwnedImpl body2buf(body2); - EXPECT_EQ(filter->encodeData(body1buf, false), Http::FilterDataStatus::Continue); - dispatcher_->run(Event::Dispatcher::RunType::Block); - EXPECT_EQ(filter->encodeData(body2buf, true), Http::FilterDataStatus::Continue); - dispatcher_->run(Event::Dispatcher::RunType::Block); + receiveUpstreamBody(0, body1, false); + pumpDispatcher(); + receiveUpstreamBody(0, body2, true); + pumpDispatcher(); ASSERT_THAT(captured_insert_body_callback, NotNull()); // When the filter is destroyed, a low watermark event should be sent. - EXPECT_CALL(encoder_callbacks_, onEncoderFilterBelowWriteBufferLowWatermark()); + // TODO(ravenblack): enable watermark testing once it works. + // EXPECT_CALL(encoder_callbacks_, onEncoderFilterBelowWriteBufferLowWatermark()); filter->onDestroy(); filter.reset(); captured_insert_body_callback(false); - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); } } @@ -538,12 +615,7 @@ TEST_F(CacheFilterTest, CacheEntryStreamedWithTrailersAndNoContentLengthCanDeliv request_headers_.setHost("CacheEntryStreamedWithTrailers"); const std::string body = "abcde"; auto mock_http_cache = std::make_shared(); - auto mock_lookup_context = std::make_unique(); - EXPECT_CALL(*mock_http_cache, makeLookupContext(_, _)) - .WillOnce([&](LookupRequest&&, - Http::StreamDecoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_lookup_context); - }); + MockLookupContext* mock_lookup_context = mock_http_cache->mockLookupContext(); // response_headers_ intentionally has no content length, LookupResult also has no content length. EXPECT_CALL(*mock_lookup_context, getHeaders(_)).WillOnce([&](LookupHeadersCallback&& cb) { dispatcher_->post([cb = std::move(cb), this]() mutable { @@ -569,7 +641,6 @@ TEST_F(CacheFilterTest, CacheEntryStreamedWithTrailersAndNoContentLengthCanDeliv std::move(cb)(std::make_unique()); }); }); - EXPECT_CALL(*mock_lookup_context, onDestroy()); { CacheFilterSharedPtr filter = makeFilter(mock_http_cache); EXPECT_CALL(decoder_callbacks_, encodeHeaders_(IsSupersetOfHeaders(response_headers_), false)); @@ -584,7 +655,7 @@ TEST_F(CacheFilterTest, CacheEntryStreamedWithTrailersAndNoContentLengthCanDeliv // Run events on the dispatcher so that the callback is invoked. // The posted lookup callback will cause another callback to be posted (when getBody() is // called) which should also be invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_); @@ -597,12 +668,7 @@ TEST_F(CacheFilterTest, CacheEntryStreamedWithTrailersAndNoContentLengthCanDeliv TEST_F(CacheFilterTest, OnDestroyBeforeOnHeadersAbortsAction) { request_headers_.setHost("CacheHitWithBody"); auto mock_http_cache = std::make_shared(); - auto mock_lookup_context = std::make_unique>(); - EXPECT_CALL(*mock_http_cache, makeLookupContext(_, _)) - .WillOnce([&](LookupRequest&&, - Http::StreamDecoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_lookup_context); - }); + MockLookupContext* mock_lookup_context = mock_http_cache->mockLookupContext(); EXPECT_CALL(*mock_lookup_context, getHeaders(_)).WillOnce([&](LookupHeadersCallback&& cb) { std::unique_ptr response_headers = std::make_unique(response_headers_); @@ -618,18 +684,13 @@ TEST_F(CacheFilterTest, OnDestroyBeforeOnHeadersAbortsAction) { filter->onDestroy(); // Nothing extra should happen when the posted lookup completion resolves, because // the filter was destroyed. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); } TEST_F(CacheFilterTest, OnDestroyBeforeOnBodyAbortsAction) { request_headers_.setHost("CacheHitWithBody"); auto mock_http_cache = std::make_shared(); - auto mock_lookup_context = std::make_unique>(); - EXPECT_CALL(*mock_http_cache, makeLookupContext(_, _)) - .WillOnce([&](LookupRequest&&, - Http::StreamDecoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_lookup_context); - }); + MockLookupContext* mock_lookup_context = mock_http_cache->mockLookupContext(); EXPECT_CALL(*mock_lookup_context, getHeaders(_)).WillOnce([&](LookupHeadersCallback&& cb) { std::unique_ptr response_headers = std::make_unique(response_headers_); @@ -644,13 +705,12 @@ TEST_F(CacheFilterTest, OnDestroyBeforeOnBodyAbortsAction) { .WillOnce([&](const AdjustedByteRange&, LookupBodyCallback&& cb) { body_callback = std::move(cb); }); - EXPECT_CALL(*mock_lookup_context, onDestroy()); auto filter = makeFilter(mock_http_cache, false); EXPECT_EQ(filter->decodeHeaders(request_headers_, true), Http::FilterHeadersStatus::StopAllIterationAndWatermark); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); filter->onDestroy(); - ::testing::Mock::VerifyAndClearExpectations(mock_lookup_context.get()); + ::testing::Mock::VerifyAndClearExpectations(mock_lookup_context); EXPECT_THAT(body_callback, NotNull()); // body_callback should not be called because LookupContext::onDestroy, // correctly implemented, should have aborted it. @@ -659,12 +719,7 @@ TEST_F(CacheFilterTest, OnDestroyBeforeOnBodyAbortsAction) { TEST_F(CacheFilterTest, OnDestroyBeforeOnTrailersAbortsAction) { request_headers_.setHost("CacheHitWithTrailers"); auto mock_http_cache = std::make_shared(); - auto mock_lookup_context = std::make_unique>(); - EXPECT_CALL(*mock_http_cache, makeLookupContext(_, _)) - .WillOnce([&](LookupRequest&&, - Http::StreamDecoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_lookup_context); - }); + MockLookupContext* mock_lookup_context = mock_http_cache->mockLookupContext(); EXPECT_CALL(*mock_lookup_context, getHeaders(_)).WillOnce([&](LookupHeadersCallback&& cb) { std::unique_ptr response_headers = std::make_unique(response_headers_); @@ -700,12 +755,7 @@ TEST_F(CacheFilterTest, BodyReadFromCacheLimitedToBufferSizeChunks) { // 8 bytes. EXPECT_CALL(encoder_callbacks_, encoderBufferLimit()).WillRepeatedly(::testing::Return(5)); auto mock_http_cache = std::make_shared(); - auto mock_lookup_context = std::make_unique(); - EXPECT_CALL(*mock_http_cache, makeLookupContext(_, _)) - .WillOnce([&](LookupRequest&&, - Http::StreamDecoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_lookup_context); - }); + MockLookupContext* mock_lookup_context = mock_http_cache->mockLookupContext(); EXPECT_CALL(*mock_lookup_context, getHeaders(_)).WillOnce([&](LookupHeadersCallback&& cb) { std::unique_ptr response_headers = std::make_unique(response_headers_); @@ -727,7 +777,6 @@ TEST_F(CacheFilterTest, BodyReadFromCacheLimitedToBufferSizeChunks) { std::move(cb)(std::make_unique("fgh"), true); }); }); - EXPECT_CALL(*mock_lookup_context, onDestroy()); CacheFilterSharedPtr filter = makeFilter(mock_http_cache, false); @@ -754,7 +803,7 @@ TEST_F(CacheFilterTest, BodyReadFromCacheLimitedToBufferSizeChunks) { // Run events on the dispatcher so that the callback is invoked. // The posted lookup callback will cause another callback to be posted (when getBody() is // called) which should also be invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); filter->onDestroy(); filter.reset(); @@ -764,18 +813,8 @@ TEST_F(CacheFilterTest, CacheInsertAbortedByCache) { request_headers_.setHost("CacheHitWithBody"); const std::string body = "abc"; auto mock_http_cache = std::make_shared(); - auto mock_lookup_context = std::make_unique(); - auto mock_insert_context = std::make_unique(); - EXPECT_CALL(*mock_http_cache, makeLookupContext(_, _)) - .WillOnce([&](LookupRequest&&, - Http::StreamDecoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_lookup_context); - }); - EXPECT_CALL(*mock_http_cache, makeInsertContext(_, _)) - .WillOnce([&](LookupContextPtr&&, - Http::StreamEncoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_insert_context); - }); + MockLookupContext* mock_lookup_context = mock_http_cache->mockLookupContext(); + MockInsertContext* mock_insert_context = mock_http_cache->mockInsertContext(); EXPECT_CALL(*mock_lookup_context, getHeaders(_)).WillOnce([&](LookupHeadersCallback&& cb) { dispatcher_->post([cb = std::move(cb)]() mutable { std::move(cb)(LookupResult{}, false); }); }); @@ -784,27 +823,23 @@ TEST_F(CacheFilterTest, CacheInsertAbortedByCache) { InsertCallback insert_complete, bool) { dispatcher_->post([cb = std::move(insert_complete)]() mutable { std::move(cb)(true); }); }); - EXPECT_CALL(*mock_insert_context, insertBody(_, _, true)) + EXPECT_CALL(*mock_insert_context, insertBody(_, _, false)) .WillOnce([&](const Buffer::Instance&, InsertCallback ready_for_next_chunk, bool) { dispatcher_->post( [cb = std::move(ready_for_next_chunk)]() mutable { std::move(cb)(false); }); }); - EXPECT_CALL(*mock_insert_context, onDestroy()); - EXPECT_CALL(*mock_lookup_context, onDestroy()); { - // Create filter for request 1. + // Create filter for request 0. CacheFilterSharedPtr filter = makeFilter(mock_http_cache); - testDecodeRequestMiss(filter); + testDecodeRequestMiss(0, filter); // Encode response. - Buffer::OwnedImpl buffer(body); response_headers_.setContentLength(body.size()); - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); - EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue); - // The cache insertBody callback should be posted to the dispatcher. - // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + receiveUpstreamHeaders(0, response_headers_, false); + receiveUpstreamBody(0, body, false); + EXPECT_CALL(*mock_upstreams_[0], reset()); + pumpDispatcher(); filter->onStreamComplete(); EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); @@ -812,23 +847,13 @@ TEST_F(CacheFilterTest, CacheInsertAbortedByCache) { } } -TEST_F(CacheFilterTest, FilterDeletedWhileIncompleteCacheWriteInQueueShouldAbandonWrite) { +TEST_F(CacheFilterTest, FilterDestroyedWhileIncompleteCacheWriteInQueueShouldCompleteWrite) { request_headers_.setHost("CacheHitWithBody"); const std::string body = "abc"; auto mock_http_cache = std::make_shared(); std::weak_ptr weak_cache_pointer = mock_http_cache; - auto mock_lookup_context = std::make_unique(); - auto mock_insert_context = std::make_unique(); - EXPECT_CALL(*mock_http_cache, makeLookupContext(_, _)) - .WillOnce([&](LookupRequest&&, - Http::StreamDecoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_lookup_context); - }); - EXPECT_CALL(*mock_http_cache, makeInsertContext(_, _)) - .WillOnce([&](LookupContextPtr&&, - Http::StreamEncoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_insert_context); - }); + MockLookupContext* mock_lookup_context = mock_http_cache->mockLookupContext(); + MockInsertContext* mock_insert_context = mock_http_cache->mockInsertContext(); EXPECT_CALL(*mock_lookup_context, getHeaders(_)).WillOnce([&](LookupHeadersCallback&& cb) { dispatcher_->post([cb = std::move(cb)]() mutable { std::move(cb)(LookupResult{}, false); }); }); @@ -837,44 +862,47 @@ TEST_F(CacheFilterTest, FilterDeletedWhileIncompleteCacheWriteInQueueShouldAband .WillOnce([&](const Http::ResponseHeaderMap&, const ResponseMetadata&, InsertCallback insert_complete, bool) { captured_insert_header_callback = std::move(insert_complete); }); - EXPECT_CALL(*mock_insert_context, onDestroy()); - EXPECT_CALL(*mock_lookup_context, onDestroy()); + EXPECT_CALL(*mock_insert_context, insertBody(_, _, false)) + .WillOnce([this](const Buffer::Instance&, InsertCallback insert_complete, bool) { + dispatcher_->post([cb = std::move(insert_complete)]() mutable { cb(true); }); + }); + EXPECT_CALL(*mock_insert_context, insertTrailers(_, _)) + .WillOnce([this](const Http::ResponseTrailerMap&, InsertCallback insert_complete) { + dispatcher_->post([cb = std::move(insert_complete)]() mutable { cb(true); }); + }); + { - // Create filter for request 1 and move the local shared_ptr, + // Create filter for request 0 and move the local shared_ptr, // transferring ownership to the filter. CacheFilterSharedPtr filter = makeFilter(std::move(mock_http_cache)); - testDecodeRequestMiss(filter); + testDecodeRequestMiss(0, filter); // Encode header of response. response_headers_.setContentLength(body.size()); - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); + receiveUpstreamHeaders(0, response_headers_, false); // Destroy the filter prematurely (it goes out of scope). } ASSERT_THAT(captured_insert_header_callback, NotNull()); EXPECT_THAT(weak_cache_pointer.lock(), NotNull()) << "cache instance was unexpectedly destroyed when filter was destroyed"; - // The callback should now do nothing visible, because the filter has been destroyed. - // Calling it allows the CacheInsertQueue to discard its self-ownership. + // The callback should now continue to write the cache entry. Completing the + // write allows the UpstreamRequest and CacheInsertQueue to complete and self-destruct. captured_insert_header_callback(true); + pumpDispatcher(); + receiveUpstreamBodyAfterFilterDestroyed(0, body, false); + pumpDispatcher(); + Http::TestResponseTrailerMapImpl trailers{{"somekey", "somevalue"}}; + receiveUpstreamTrailersAfterFilterDestroyed(0, trailers); + pumpDispatcher(); } TEST_F(CacheFilterTest, FilterDeletedWhileCompleteCacheWriteInQueueShouldContinueWrite) { request_headers_.setHost("CacheHitWithBody"); const std::string body = "abc"; auto mock_http_cache = std::make_shared(); - auto mock_lookup_context = std::make_unique(); - auto mock_insert_context = std::make_unique(); - EXPECT_CALL(*mock_http_cache, makeLookupContext(_, _)) - .WillOnce([&](LookupRequest&&, - Http::StreamDecoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_lookup_context); - }); - EXPECT_CALL(*mock_http_cache, makeInsertContext(_, _)) - .WillOnce([&](LookupContextPtr&&, - Http::StreamEncoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_insert_context); - }); + MockLookupContext* mock_lookup_context = mock_http_cache->mockLookupContext(); + MockInsertContext* mock_insert_context = mock_http_cache->mockInsertContext(); EXPECT_CALL(*mock_lookup_context, getHeaders(_)).WillOnce([&](LookupHeadersCallback&& cb) { dispatcher_->post([cb = std::move(cb)]() mutable { std::move(cb)(LookupResult{}, false); }); }); @@ -888,20 +916,7 @@ TEST_F(CacheFilterTest, FilterDeletedWhileCompleteCacheWriteInQueueShouldContinu .WillOnce([&](const Buffer::Instance&, InsertCallback ready_for_next_chunk, bool) { captured_insert_body_callback = std::move(ready_for_next_chunk); }); - EXPECT_CALL(*mock_insert_context, onDestroy()); - EXPECT_CALL(*mock_lookup_context, onDestroy()); - { - // Create filter for request 1. - CacheFilterSharedPtr filter = makeFilter(mock_http_cache); - - testDecodeRequestMiss(filter); - - // Encode response. - Buffer::OwnedImpl buffer(body); - response_headers_.setContentLength(body.size()); - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); - EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue); - } + populateCommonCacheEntry(0, makeFilter(mock_http_cache), body); // Header callback should be captured, body callback should not yet since the // queue has not reached that chunk. ASSERT_THAT(captured_insert_header_callback, NotNull()); @@ -911,14 +926,14 @@ TEST_F(CacheFilterTest, FilterDeletedWhileCompleteCacheWriteInQueueShouldContinu // Run events on the dispatcher so that the callback is invoked, // where it should now proceed to write the body chunk, since the // write is still completable. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); // So the mock should now be writing the body. ASSERT_THAT(captured_insert_body_callback, NotNull()); captured_insert_body_callback(true); // The callback should be posted to the dispatcher. // Run events on the dispatcher so that the callback is invoked, // where it should now do nothing due to the filter being destroyed. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); } TEST_F(CacheFilterTest, SuccessfulValidation) { @@ -926,28 +941,13 @@ TEST_F(CacheFilterTest, SuccessfulValidation) { const std::string body = "abc"; const std::string etag = "abc123"; const std::string last_modified_date = formatter_.now(time_source_); - { - // Create filter for request 1 - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - testDecodeRequestMiss(filter); - - // Encode response - // Add Etag & Last-Modified headers to the response for validation - response_headers_.setReferenceKey(Http::CustomHeaders::get().Etag, etag); - response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, last_modified_date); - - Buffer::OwnedImpl buffer(body); - response_headers_.setContentLength(body.size()); - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); - EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue); - // The cache getBody callback should be posted to the dispatcher. - // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); - - filter->onStreamComplete(); - EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); - } + // Encode response + // Add Etag & Last-Modified headers to the response for validation + response_headers_.setReferenceKey(Http::CustomHeaders::get().Etag, etag); + response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, last_modified_date); + response_headers_.setContentLength(body.size()); + populateCommonCacheEntry(0, makeFilter(simple_cache_), body); waitBeforeSecondRequest(); { // Create filter for request 2 @@ -959,12 +959,13 @@ TEST_F(CacheFilterTest, SuccessfulValidation) { // Decoding the request should find a cached response that requires validation. // As far as decoding the request is concerned, this is the same as a cache miss with the // exception of injecting validation precondition headers. - testDecodeRequestMiss(filter); + testDecodeRequestMiss(1, filter); // Make sure validation conditional headers are added const Http::TestRequestHeaderMapImpl injected_headers = { {"if-none-match", etag}, {"if-modified-since", last_modified_date}}; - EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers)); + EXPECT_THAT(mock_upstreams_headers_sent_[1], + testing::Optional(IsSupersetOfHeaders(injected_headers))); // Encode 304 response // Advance time to make sure the cached date is updated with the 304 date @@ -972,33 +973,26 @@ TEST_F(CacheFilterTest, SuccessfulValidation) { Http::TestResponseHeaderMapImpl not_modified_response_headers = {{":status", "304"}, {"date", not_modified_date}}; - // The filter should stop encoding iteration when encodeHeaders is called as a cached response - // is being fetched and added to the encoding stream. StopIteration does not stop encodeData of - // the same filter from being called - EXPECT_EQ(filter->encodeHeaders(not_modified_response_headers, true), - Http::FilterHeadersStatus::StopIteration); + // Receiving the 304 response should result in sending the merged headers with + // updated date. + Http::TestResponseHeaderMapImpl expected_response_headers = response_headers_; + expected_response_headers.setDate(not_modified_date); - // Check for the cached response headers with updated date - Http::TestResponseHeaderMapImpl updated_response_headers = response_headers_; - updated_response_headers.setDate(not_modified_date); - EXPECT_THAT(not_modified_response_headers, IsSupersetOfHeaders(updated_response_headers)); + // The upstream should be reset on not_modified + receiveUpstreamHeadersWithReset(1, not_modified_response_headers, true, + IsSupersetOfHeaders(expected_response_headers)); - // A 304 response should not have a body, so encodeData should not be called - // However, if a body is present by mistake, encodeData should stop iteration until - // encoding the cached response is done - Buffer::OwnedImpl not_modified_body; - EXPECT_EQ(filter->encodeData(not_modified_body, true), - Http::FilterDataStatus::StopIterationAndBuffer); + // It should be impossible for onData to be called on the upstream after reset + // has been called on it. // The filter should add the cached response body to encoded data. - Buffer::OwnedImpl buffer(body); EXPECT_CALL( - encoder_callbacks_, - addEncodedData(testing::Property(&Buffer::Instance::toString, testing::Eq(body)), true)); + decoder_callbacks_, + encodeData(testing::Property(&Buffer::Instance::toString, testing::Eq(body)), true)); // The cache getBody callback should be posted to the dispatcher. // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); ::testing::Mock::VerifyAndClearExpectations(&encoder_callbacks_); @@ -1013,32 +1007,16 @@ TEST_F(CacheFilterTest, SuccessfulValidationWithFilterDestroyedDuringContinueEnc const std::string body = "abc"; const std::string etag = "abc123"; const std::string last_modified_date = formatter_.now(time_source_); - { - // Create filter for request 1 - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - - testDecodeRequestMiss(filter); - - // Encode response - // Add Etag & Last-Modified headers to the response for validation - response_headers_.setReferenceKey(Http::CustomHeaders::get().Etag, etag); - response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, last_modified_date); - - Buffer::OwnedImpl buffer(body); - response_headers_.setContentLength(body.size()); - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); - EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue); - // The cache getBody callback should be posted to the dispatcher. - // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); - - filter->onStreamComplete(); - EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); - } + // Encode response + // Add Etag & Last-Modified headers to the response for validation + response_headers_.setReferenceKey(Http::CustomHeaders::get().Etag, etag); + response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, last_modified_date); + response_headers_.setContentLength(body.size()); + populateCommonCacheEntry(0, makeFilter(simple_cache_), body); waitBeforeSecondRequest(); { // Create filter for request 2 - CacheFilterSharedPtr filter = makeFilter(simple_cache_, /*auto_destroy=*/false); + CacheFilterSharedPtr filter = makeFilter(simple_cache_); // Make request require validation request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "no-cache"); @@ -1046,12 +1024,13 @@ TEST_F(CacheFilterTest, SuccessfulValidationWithFilterDestroyedDuringContinueEnc // Decoding the request should find a cached response that requires validation. // As far as decoding the request is concerned, this is the same as a cache miss with the // exception of injecting validation precondition headers. - testDecodeRequestMiss(filter); + testDecodeRequestMiss(1, filter); // Make sure validation conditional headers are added const Http::TestRequestHeaderMapImpl injected_headers = { {"if-none-match", etag}, {"if-modified-since", last_modified_date}}; - EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers)); + EXPECT_THAT(mock_upstreams_headers_sent_[1], + testing::Optional(IsSupersetOfHeaders(injected_headers))); // Encode 304 response // Advance time to make sure the cached date is updated with the 304 date @@ -1059,34 +1038,24 @@ TEST_F(CacheFilterTest, SuccessfulValidationWithFilterDestroyedDuringContinueEnc Http::TestResponseHeaderMapImpl not_modified_response_headers = {{":status", "304"}, {"date", not_modified_date}}; - // The filter should stop encoding iteration when encodeHeaders is called as a cached response - // is being fetched and added to the encoding stream. StopIteration does not stop encodeData of - // the same filter from being called - EXPECT_EQ(filter->encodeHeaders(not_modified_response_headers, true), - Http::FilterHeadersStatus::StopIteration); - // Check for the cached response headers with updated date - Http::TestResponseHeaderMapImpl updated_response_headers = response_headers_; - updated_response_headers.setDate(not_modified_date); - EXPECT_THAT(not_modified_response_headers, IsSupersetOfHeaders(updated_response_headers)); + Http::TestResponseHeaderMapImpl expected_response_headers = response_headers_; + expected_response_headers.setDate(not_modified_date); - // A 304 response should not have a body, so encodeData should not be called - // However, if a body is present by mistake, encodeData should stop iteration until - // encoding the cached response is done - Buffer::OwnedImpl not_modified_body; - EXPECT_EQ(filter->encodeData(not_modified_body, true), - Http::FilterDataStatus::StopIterationAndBuffer); + // The upstream should be reset on not_modified + receiveUpstreamHeadersWithReset(1, not_modified_response_headers, true, + IsSupersetOfHeaders(expected_response_headers)); + + // It should be impossible for onBody to be called after reset was called. // The filter should add the cached response body to encoded data. - Buffer::OwnedImpl buffer(body); EXPECT_CALL( - encoder_callbacks_, - addEncodedData(testing::Property(&Buffer::Instance::toString, testing::Eq(body)), true)); - EXPECT_CALL(encoder_callbacks_, continueEncoding()).WillOnce([&]() { filter->onDestroy(); }); + decoder_callbacks_, + encodeData(testing::Property(&Buffer::Instance::toString, testing::Eq(body)), true)); // The cache getBody callback should be posted to the dispatcher. // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); ::testing::Mock::VerifyAndClearExpectations(&encoder_callbacks_); } @@ -1097,31 +1066,15 @@ TEST_F(CacheFilterTest, UnsuccessfulValidation) { const std::string body = "abc"; const std::string etag = "abc123"; const std::string last_modified_date = formatter_.now(time_source_); - { - // Create filter for request 1 - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - - testDecodeRequestMiss(filter); - - // Encode response - // Add Etag & Last-Modified headers to the response for validation. - response_headers_.setReferenceKey(Http::CustomHeaders::get().Etag, etag); - response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, last_modified_date); - - Buffer::OwnedImpl buffer(body); - response_headers_.setContentLength(body.size()); - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); - EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue); - // The cache getBody callback should be posted to the dispatcher. - // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); - - filter->onStreamComplete(); - EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); - } + // Encode response + // Add Etag & Last-Modified headers to the response for validation. + response_headers_.setReferenceKey(Http::CustomHeaders::get().Etag, etag); + response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, last_modified_date); + response_headers_.setContentLength(body.size()); + populateCommonCacheEntry(0, makeFilter(simple_cache_), body); waitBeforeSecondRequest(); { - // Create filter for request 2. + // Create filter for request 1. CacheFilterSharedPtr filter = makeFilter(simple_cache_); // Make request require validation @@ -1130,21 +1083,22 @@ TEST_F(CacheFilterTest, UnsuccessfulValidation) { // Decoding the request should find a cached response that requires validation. // As far as decoding the request is concerned, this is the same as a cache miss with the // exception of injecting validation precondition headers. - testDecodeRequestMiss(filter); + testDecodeRequestMiss(1, filter); // Make sure validation conditional headers are added. const Http::TestRequestHeaderMapImpl injected_headers = { {"if-none-match", etag}, {"if-modified-since", last_modified_date}}; - EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers)); + EXPECT_THAT(mock_upstreams_headers_sent_[1], + testing::Optional(IsSupersetOfHeaders(injected_headers))); // Encode new response. // Change the status code to make sure new headers are served, not the cached ones. response_headers_.setStatus(204); // The filter should not stop encoding iteration as this is a new response. - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); - Buffer::OwnedImpl new_body; - EXPECT_EQ(filter->encodeData(new_body, true), Http::FilterDataStatus::Continue); + receiveUpstreamHeaders(1, response_headers_, false); + std::string new_body = ""; + receiveUpstreamBody(1, new_body, true); // The response headers should have the new status. EXPECT_THAT(response_headers_, HeaderHasValueRef(Http::Headers::get().Status, "204")); @@ -1154,7 +1108,7 @@ TEST_F(CacheFilterTest, UnsuccessfulValidation) { // If a cache getBody callback is made, it should be posted to the dispatcher. // Run events on the dispatcher so that any available callbacks are invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); ::testing::Mock::VerifyAndClearExpectations(&encoder_callbacks_); @@ -1167,25 +1121,8 @@ TEST_F(CacheFilterTest, UnsuccessfulValidation) { TEST_F(CacheFilterTest, SingleSatisfiableRange) { request_headers_.setHost("SingleSatisfiableRange"); const std::string body = "abc"; - - { - // Create filter for request 1. - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - - testDecodeRequestMiss(filter); - - // Encode response. - Buffer::OwnedImpl buffer(body); - response_headers_.setContentLength(body.size()); - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); - EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue); - // The cache getBody callback should be posted to the dispatcher. - // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); - - filter->onStreamComplete(); - EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); - } + response_headers_.setContentLength(body.size()); + populateCommonCacheEntry(0, makeFilter(simple_cache_), body); waitBeforeSecondRequest(); { // Add range info to headers. @@ -1215,7 +1152,7 @@ TEST_F(CacheFilterTest, SingleSatisfiableRange) { // Run events on the dispatcher so that the callback is invoked. // The posted lookup callback will cause another callback to be posted (when getBody() is // called) which should also be invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_); @@ -1228,25 +1165,8 @@ TEST_F(CacheFilterTest, SingleSatisfiableRange) { TEST_F(CacheFilterTest, MultipleSatisfiableRanges) { request_headers_.setHost("MultipleSatisfiableRanges"); const std::string body = "abc"; - - { - // Create filter for request 1 - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - - testDecodeRequestMiss(filter); - - // Encode response header - Buffer::OwnedImpl buffer(body); - response_headers_.setContentLength(body.size()); - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); - EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue); - // The cache getBody callback should be posted to the dispatcher. - // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); - - filter->onStreamComplete(); - EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); - } + response_headers_.setContentLength(body.size()); + populateCommonCacheEntry(0, makeFilter(simple_cache_), body); waitBeforeSecondRequest(); { // Add range info to headers @@ -1273,7 +1193,7 @@ TEST_F(CacheFilterTest, MultipleSatisfiableRanges) { // Run events on the dispatcher so that the callback is invoked. // The posted lookup callback will cause another callback to be posted (when getBody() is // called) which should also be invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_); @@ -1285,25 +1205,8 @@ TEST_F(CacheFilterTest, MultipleSatisfiableRanges) { TEST_F(CacheFilterTest, NotSatisfiableRange) { request_headers_.setHost("NotSatisfiableRange"); const std::string body = "abc"; - - { - // Create filter for request 1 - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - - testDecodeRequestMiss(filter); - - // Encode response header - Buffer::OwnedImpl buffer(body); - response_headers_.setContentLength(body.size()); - EXPECT_EQ(filter->encodeHeaders(response_headers_, false), Http::FilterHeadersStatus::Continue); - EXPECT_EQ(filter->encodeData(buffer, true), Http::FilterDataStatus::Continue); - // The cache getBody callback should be posted to the dispatcher. - // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); - - filter->onStreamComplete(); - EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); - } + response_headers_.setContentLength(body.size()); + populateCommonCacheEntry(0, makeFilter(simple_cache_), body); waitBeforeSecondRequest(); { // Add range info to headers @@ -1335,7 +1238,7 @@ TEST_F(CacheFilterTest, NotSatisfiableRange) { // Run events on the dispatcher so that the callback is invoked. // The posted lookup callback will cause another callback to be posted (when getBody() is // called) which should also be invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); // This counts as a cache hit: we served an HTTP error, but we // correctly got that info from the cache instead of upstream. @@ -1375,22 +1278,11 @@ TEST_F(CacheFilterTest, GetRequestWithBodyAndTrailers) { // reliably fail if the CacheFilter is accessed after being deleted. TEST_F(CacheFilterTest, FilterDeletedBeforePostedCallbackExecuted) { request_headers_.setHost("FilterDeletedBeforePostedCallbackExecuted"); + populateCommonCacheEntry(0, makeFilter(simple_cache_)); { // Create filter for request 1. CacheFilterSharedPtr filter = makeFilter(simple_cache_); - testDecodeRequestMiss(filter); - - // Encode response headers. - EXPECT_EQ(filter->encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue); - - filter->onStreamComplete(); - EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); - } - { - // Create filter for request 2. - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - // Call decode headers to start the cache lookup, which should immediately post the callback to // the dispatcher. EXPECT_EQ(filter->decodeHeaders(request_headers_, true), @@ -1406,29 +1298,18 @@ TEST_F(CacheFilterTest, FilterDeletedBeforePostedCallbackExecuted) { EXPECT_CALL(decoder_callbacks_, encodeHeaders_).Times(0); // Run events on the dispatcher so that the callback is invoked after the filter deletion. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); ::testing::Mock::VerifyAndClearExpectations(&decoder_callbacks_); } TEST_F(CacheFilterTest, LocalReplyDuringLookup) { request_headers_.setHost("LocalReplyDuringLookup"); + populateCommonCacheEntry(0, makeFilter(simple_cache_)); { // Create filter for request 1. CacheFilterSharedPtr filter = makeFilter(simple_cache_); - testDecodeRequestMiss(filter); - - // Encode response headers. - EXPECT_EQ(filter->encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue); - - filter->onStreamComplete(); - EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); - } - { - // Create filter for request 2. - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - // Call decode headers to start the cache lookup, which should immediately post the callback to // the dispatcher. EXPECT_EQ(filter->decodeHeaders(request_headers_, true), @@ -1445,7 +1326,7 @@ TEST_F(CacheFilterTest, LocalReplyDuringLookup) { EXPECT_CALL(decoder_callbacks_, encodeHeaders_).Times(0); // Run events on the dispatcher so that the lookup callback is invoked after the local reply. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); filter->onStreamComplete(); EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::RequestIncomplete)); @@ -1459,12 +1340,7 @@ using CacheFilterDeathTest = CacheFilterTest; TEST_F(CacheFilterDeathTest, BadRangeRequestLookup) { request_headers_.setHost("BadRangeRequestLookup"); auto mock_http_cache = std::make_shared(); - auto mock_lookup_context = std::make_unique(); - EXPECT_CALL(*mock_http_cache, makeLookupContext(_, _)) - .WillOnce([&](LookupRequest&&, - Http::StreamDecoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_lookup_context); - }); + MockLookupContext* mock_lookup_context = mock_http_cache->mockLookupContext(); EXPECT_CALL(*mock_lookup_context, getHeaders(_)).WillOnce([&](LookupHeadersCallback&& cb) { dispatcher_->post([cb = std::move(cb), this]() mutable { // LookupResult with unknown length and an unsatisfiable RangeDetails is invalid. @@ -1476,7 +1352,6 @@ TEST_F(CacheFilterDeathTest, BadRangeRequestLookup) { false); }); }); - EXPECT_CALL(*mock_lookup_context, onDestroy()); { CacheFilterSharedPtr filter = makeFilter(mock_http_cache); // encodeHeaders can be called when ENVOY_BUG doesn't exit. @@ -1490,7 +1365,7 @@ TEST_F(CacheFilterDeathTest, BadRangeRequestLookup) { // The cache lookup callback should be posted to the dispatcher. // Run events on the dispatcher so that the callback is invoked. EXPECT_ENVOY_BUG( - dispatcher_->run(Event::Dispatcher::RunType::Block), + pumpDispatcher(), "handleCacheHitWithRangeRequest() should not be called with satisfiable_=false"); } } @@ -1499,12 +1374,7 @@ TEST_F(CacheFilterTest, RangeRequestSatisfiedBeforeLengthKnown) { request_headers_.setHost("RangeRequestSatisfiedBeforeLengthKnown"); std::string body = "abcde"; auto mock_http_cache = std::make_shared(); - auto mock_lookup_context = std::make_unique(); - EXPECT_CALL(*mock_http_cache, makeLookupContext(_, _)) - .WillOnce([&](LookupRequest&&, - Http::StreamDecoderFilterCallbacks&) -> std::unique_ptr { - return std::move(mock_lookup_context); - }); + MockLookupContext* mock_lookup_context = mock_http_cache->mockLookupContext(); EXPECT_CALL(*mock_lookup_context, getHeaders(_)).WillOnce([&](LookupHeadersCallback&& cb) { // LookupResult with unknown length and an unsatisfiable RangeDetails is invalid. dispatcher_->post([cb = std::move(cb), this]() mutable { @@ -1522,7 +1392,6 @@ TEST_F(CacheFilterTest, RangeRequestSatisfiedBeforeLengthKnown) { cb(std::make_unique(body), false); }); }); - EXPECT_CALL(*mock_lookup_context, onDestroy()); { CacheFilterSharedPtr filter = makeFilter(mock_http_cache); response_headers_ = {{":status", "206"}, {"content-range", "bytes 0-4/*"}}; @@ -1534,24 +1403,13 @@ TEST_F(CacheFilterTest, RangeRequestSatisfiedBeforeLengthKnown) { // The cache lookup callback should be posted to the dispatcher. // Run events on the dispatcher so that the callback is invoked. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); } } TEST_F(CacheFilterDeathTest, StreamTimeoutDuringLookup) { request_headers_.setHost("StreamTimeoutDuringLookup"); - { - // Create filter for request 1. - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - - testDecodeRequestMiss(filter); - - // Encode response headers. - EXPECT_EQ(filter->encodeHeaders(response_headers_, true), Http::FilterHeadersStatus::Continue); - - filter->onStreamComplete(); - EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::CacheMiss)); - } + populateCommonCacheEntry(0, makeFilter(simple_cache_)); Envoy::Http::TestResponseHeaderMapImpl local_response_headers{{":status", "408"}}; EXPECT_ENVOY_BUG( { @@ -1574,7 +1432,7 @@ TEST_F(CacheFilterDeathTest, StreamTimeoutDuringLookup) { // As a death test when ENVOY_BUG crashes, as in debug builds, this will exit here, // so we must not perform any required cleanup operations below this point in the block. // When ENVOY_BUG does not crash, we can still validate additional things. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); filter->onStreamComplete(); EXPECT_THAT(lookupStatus(), IsOkAndHolds(LookupStatus::RequestIncomplete)); @@ -1582,7 +1440,7 @@ TEST_F(CacheFilterDeathTest, StreamTimeoutDuringLookup) { "Request timed out while cache lookup was outstanding."); // Clear out captured lookup lambdas from the dispatcher. - dispatcher_->run(Event::Dispatcher::RunType::Block); + pumpDispatcher(); } TEST(LookupStatusDeathTest, ResolveLookupStatusRequireValidationAndInitialIsBug) { @@ -1591,12 +1449,6 @@ TEST(LookupStatusDeathTest, ResolveLookupStatusRequireValidationAndInitialIsBug) "Unexpected filter state in requestCacheStatus"); } -TEST(LookupStatusDeathTest, ResolveLookupStatusRequireValidationAndDecodeServingFromCacheIsBug) { - EXPECT_ENVOY_BUG(CacheFilter::resolveLookupStatus(CacheEntryStatus::RequiresValidation, - FilterState::DecodeServingFromCache), - "Unexpected filter state in requestCacheStatus"); -} - TEST(LookupStatusDeathTest, ResolveLookupStatusRequireValidationAndDestroyedIsBug) { EXPECT_ENVOY_BUG(CacheFilter::resolveLookupStatus(CacheEntryStatus::RequiresValidation, FilterState::Destroyed), @@ -1612,9 +1464,7 @@ TEST(LookupStatusTest, ResolveLookupStatusReturnsCorrectStatuses) { LookupStatus::Unknown); EXPECT_EQ(CacheFilter::resolveLookupStatus(absl::nullopt, FilterState::ValidatingCachedResponse), LookupStatus::Unknown); - EXPECT_EQ(CacheFilter::resolveLookupStatus(absl::nullopt, FilterState::DecodeServingFromCache), - LookupStatus::Unknown); - EXPECT_EQ(CacheFilter::resolveLookupStatus(absl::nullopt, FilterState::EncodeServingFromCache), + EXPECT_EQ(CacheFilter::resolveLookupStatus(absl::nullopt, FilterState::ServingFromCache), LookupStatus::Unknown); EXPECT_EQ(CacheFilter::resolveLookupStatus(absl::nullopt, FilterState::Destroyed), LookupStatus::Unknown); @@ -1622,7 +1472,7 @@ TEST(LookupStatusTest, ResolveLookupStatusReturnsCorrectStatuses) { FilterState::ValidatingCachedResponse), LookupStatus::RequestIncomplete); EXPECT_EQ(CacheFilter::resolveLookupStatus(CacheEntryStatus::RequiresValidation, - FilterState::EncodeServingFromCache), + FilterState::ServingFromCache), LookupStatus::StaleHitWithSuccessfulValidation); EXPECT_EQ(CacheFilter::resolveLookupStatus(CacheEntryStatus::RequiresValidation, FilterState::ResponseServedFromCache), @@ -1643,143 +1493,156 @@ using ValidationHeadersTest = CacheFilterTest; TEST_F(ValidationHeadersTest, EtagAndLastModified) { request_headers_.setHost("EtagAndLastModified"); const std::string etag = "abc123"; - - // Make request 1 to insert the response into cache - { - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - testDecodeRequestMiss(filter); - - // Add validation headers to the response - response_headers_.setReferenceKey(Http::CustomHeaders::get().Etag, etag); - response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, - formatter_.now(time_source_)); - - filter->encodeHeaders(response_headers_, true); - } - // Make request 2 to test for added conditional headers + response_headers_.setReferenceKey(Http::CustomHeaders::get().Etag, etag); + response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, + formatter_.now(time_source_)); + populateCommonCacheEntry(0, makeFilter(simple_cache_)); { + // Make request 1 to test for added conditional headers CacheFilterSharedPtr filter = makeFilter(simple_cache_); // Make sure the request requires validation request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "no-cache"); - testDecodeRequestMiss(filter); + testDecodeRequestMiss(1, filter); // Make sure validation conditional headers are added const Http::TestRequestHeaderMapImpl injected_headers = { {"if-none-match", "abc123"}, {"if-modified-since", formatter_.now(time_source_)}}; - EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers)); + EXPECT_THAT(mock_upstreams_headers_sent_[1], + testing::Optional(IsSupersetOfHeaders(injected_headers))); } } TEST_F(ValidationHeadersTest, EtagOnly) { request_headers_.setHost("EtagOnly"); const std::string etag = "abc123"; - - // Make request 1 to insert the response into cache - { - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - testDecodeRequestMiss(filter); - - // Add validation headers to the response - response_headers_.setReferenceKey(Http::CustomHeaders::get().Etag, etag); - - filter->encodeHeaders(response_headers_, true); - } - // Make request 2 to test for added conditional headers + response_headers_.setReferenceKey(Http::CustomHeaders::get().Etag, etag); + populateCommonCacheEntry(0, makeFilter(simple_cache_)); { + // Make request 1 to test for added conditional headers CacheFilterSharedPtr filter = makeFilter(simple_cache_); // Make sure the request requires validation request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "no-cache"); - testDecodeRequestMiss(filter); + testDecodeRequestMiss(1, filter); // Make sure validation conditional headers are added // If-Modified-Since falls back to date const Http::TestRequestHeaderMapImpl injected_headers = { {"if-none-match", "abc123"}, {"if-modified-since", formatter_.now(time_source_)}}; - EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers)); + EXPECT_THAT(mock_upstreams_headers_sent_[1], + testing::Optional(IsSupersetOfHeaders(injected_headers))); } } TEST_F(ValidationHeadersTest, LastModifiedOnly) { request_headers_.setHost("LastModifiedOnly"); - - // Make request 1 to insert the response into cache - { - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - testDecodeRequestMiss(filter); - - // Add validation headers to the response - response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, - formatter_.now(time_source_)); - - filter->encodeHeaders(response_headers_, true); - } - // Make request 2 to test for added conditional headers + response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, + formatter_.now(time_source_)); + populateCommonCacheEntry(0, makeFilter(simple_cache_)); { + // Make request 2 to test for added conditional headers CacheFilterSharedPtr filter = makeFilter(simple_cache_); // Make sure the request requires validation request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "no-cache"); - testDecodeRequestMiss(filter); + testDecodeRequestMiss(1, filter); // Make sure validation conditional headers are added const Http::TestRequestHeaderMapImpl injected_headers = { {"if-modified-since", formatter_.now(time_source_)}}; - EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers)); + EXPECT_THAT(mock_upstreams_headers_sent_[1], + testing::Optional(IsSupersetOfHeaders(injected_headers))); } } TEST_F(ValidationHeadersTest, NoEtagOrLastModified) { request_headers_.setHost("NoEtagOrLastModified"); - - // Make request 1 to insert the response into cache - { - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - testDecodeRequestMiss(filter); - filter->encodeHeaders(response_headers_, true); - } - // Make request 2 to test for added conditional headers + populateCommonCacheEntry(0, makeFilter(simple_cache_)); { + // Make request 2 to test for added conditional headers CacheFilterSharedPtr filter = makeFilter(simple_cache_); // Make sure the request requires validation request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "no-cache"); - testDecodeRequestMiss(filter); + testDecodeRequestMiss(1, filter); // Make sure validation conditional headers are added // If-Modified-Since falls back to date const Http::TestRequestHeaderMapImpl injected_headers = { {"if-modified-since", formatter_.now(time_source_)}}; - EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers)); + EXPECT_THAT(mock_upstreams_headers_sent_[1], + testing::Optional(IsSupersetOfHeaders(injected_headers))); } } TEST_F(ValidationHeadersTest, InvalidLastModified) { request_headers_.setHost("InvalidLastModified"); - - // Make request 1 to insert the response into cache - { - CacheFilterSharedPtr filter = makeFilter(simple_cache_); - testDecodeRequestMiss(filter); - - // Add validation headers to the response - response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, "invalid-date"); - filter->encodeHeaders(response_headers_, true); - } - // Make request 2 to test for added conditional headers + response_headers_.setReferenceKey(Http::CustomHeaders::get().LastModified, "invalid-date"); + populateCommonCacheEntry(0, makeFilter(simple_cache_)); { + // Make request 1 to test for added conditional headers CacheFilterSharedPtr filter = makeFilter(simple_cache_); // Make sure the request requires validation request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, "no-cache"); - testDecodeRequestMiss(filter); + testDecodeRequestMiss(1, filter); // Make sure validation conditional headers are added // If-Modified-Since falls back to date const Http::TestRequestHeaderMapImpl injected_headers = { {"if-modified-since", formatter_.now(time_source_)}}; - EXPECT_THAT(request_headers_, IsSupersetOfHeaders(injected_headers)); + EXPECT_THAT(mock_upstreams_headers_sent_[1], + testing::Optional(IsSupersetOfHeaders(injected_headers))); + } +} + +TEST_F(CacheFilterTest, NoRouteShouldLocalReply) { + request_headers_.setHost("NoRoute"); + EXPECT_CALL(decoder_callbacks_, route()).WillOnce(Return(nullptr)); + { + CacheFilterSharedPtr filter = makeFilter(simple_cache_); + // The filter should stop decoding iteration when decodeHeaders is called as a cache lookup is + // in progress. + EXPECT_EQ(filter->decodeHeaders(request_headers_, true), + Http::FilterHeadersStatus::StopAllIterationAndWatermark); + EXPECT_CALL(decoder_callbacks_, + sendLocalReply(Http::Code::NotFound, _, _, _, "cache_no_route")); + // The cache lookup callback should be posted to the dispatcher. + // Run events on the dispatcher so that the callback is invoked. + pumpDispatcher(); + } +} + +TEST_F(CacheFilterTest, NoClusterShouldLocalReply) { + request_headers_.setHost("NoCluster"); + EXPECT_CALL(context_.server_factory_context_.cluster_manager_, getThreadLocalCluster(_)) + .WillOnce(Return(nullptr)); + { + CacheFilterSharedPtr filter = makeFilter(simple_cache_); + // The filter should stop decoding iteration when decodeHeaders is called as a cache lookup is + // in progress. + EXPECT_EQ(filter->decodeHeaders(request_headers_, true), + Http::FilterHeadersStatus::StopAllIterationAndWatermark); + EXPECT_CALL(decoder_callbacks_, + sendLocalReply(Http::Code::ServiceUnavailable, _, _, _, "cache_no_cluster")); + // The cache lookup callback should be posted to the dispatcher. + // Run events on the dispatcher so that the callback is invoked. + pumpDispatcher(); + } +} + +TEST_F(CacheFilterTest, UpstreamResetMidResponseShouldLocalReply) { + request_headers_.setHost("UpstreamResetMidResponse"); + { + CacheFilterSharedPtr filter = makeFilter(simple_cache_); + testDecodeRequestMiss(0, filter); + receiveUpstreamHeaders(0, response_headers_, false); + pumpDispatcher(); + EXPECT_CALL(decoder_callbacks_, + sendLocalReply(Http::Code::ServiceUnavailable, _, _, _, "cache_upstream_reset")); + mock_upstreams_callbacks_[0].get().onReset(); + pumpDispatcher(); } } diff --git a/test/extensions/filters/http/cache/http_cache_test.cc b/test/extensions/filters/http/cache/http_cache_test.cc index 3d63e2087ba4..13de60b4011c 100644 --- a/test/extensions/filters/http/cache/http_cache_test.cc +++ b/test/extensions/filters/http/cache/http_cache_test.cc @@ -7,7 +7,6 @@ #include "test/mocks/http/mocks.h" #include "test/mocks/server/server_factory_context.h" #include "test/test_common/simulated_time_system.h" -#include "test/test_common/test_runtime.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" @@ -336,22 +335,11 @@ TEST_F(LookupRequestTest, PragmaNoFallback) { } TEST(HttpCacheTest, StableHashKey) { - TestScopedRuntime runtime; - runtime.mergeValues({{"envoy.restart_features.use_fast_protobuf_hash", "true"}}); Key key; key.set_host("example.com"); ASSERT_EQ(stableHashKey(key), 6153940628716543519u); } -TEST(HttpCacheTest, StableHashKeyWithSlowHash) { - // TODO(ravenblack): This test should be removed when the runtime guard is removed. - TestScopedRuntime runtime; - runtime.mergeValues({{"envoy.restart_features.use_fast_protobuf_hash", "false"}}); - Key key; - key.set_host("example.com"); - ASSERT_EQ(stableHashKey(key), 9582653837550152292u); -} - TEST_P(LookupRequestTest, ResultWithBodyAndTrailersMatchesExpectation) { request_headers_.setReferenceKey(Http::CustomHeaders::get().CacheControl, GetParam().request_cache_control); diff --git a/test/extensions/filters/http/cache/mocks.h b/test/extensions/filters/http/cache/mocks.h index d1739f958448..57ee46266be8 100644 --- a/test/extensions/filters/http/cache/mocks.h +++ b/test/extensions/filters/http/cache/mocks.h @@ -9,18 +9,6 @@ namespace Extensions { namespace HttpFilters { namespace Cache { -class MockHttpCache : public HttpCache { -public: - MOCK_METHOD(LookupContextPtr, makeLookupContext, - (LookupRequest && request, Http::StreamDecoderFilterCallbacks& callbacks)); - MOCK_METHOD(InsertContextPtr, makeInsertContext, - (LookupContextPtr && lookup_context, Http::StreamEncoderFilterCallbacks& callbacks)); - MOCK_METHOD(void, updateHeaders, - (const LookupContext& lookup_context, const Http::ResponseHeaderMap& response_headers, - const ResponseMetadata& metadata, absl::AnyInvocable on_complete)); - MOCK_METHOD(CacheInfo, cacheInfo, (), (const)); -}; - class MockLookupContext : public LookupContext { public: MOCK_METHOD(void, getHeaders, (LookupHeadersCallback && cb)); @@ -42,6 +30,47 @@ class MockInsertContext : public InsertContext { MOCK_METHOD(void, onDestroy, ()); }; +class MockHttpCache : public HttpCache { +public: + MOCK_METHOD(LookupContextPtr, makeLookupContext, + (LookupRequest && request, Http::StreamFilterCallbacks& callbacks)); + MOCK_METHOD(InsertContextPtr, makeInsertContext, + (LookupContextPtr && lookup_context, Http::StreamFilterCallbacks& callbacks)); + MOCK_METHOD(void, updateHeaders, + (const LookupContext& lookup_context, const Http::ResponseHeaderMap& response_headers, + const ResponseMetadata& metadata, absl::AnyInvocable on_complete)); + MOCK_METHOD(CacheInfo, cacheInfo, (), (const)); + MockLookupContext* mockLookupContext() { + ASSERT(mock_lookup_context_ == nullptr); + mock_lookup_context_ = std::make_unique(); + EXPECT_CALL(*mock_lookup_context_, onDestroy()); + EXPECT_CALL(*this, makeLookupContext) + .WillOnce([this](LookupRequest&&, + Http::StreamFilterCallbacks&) -> std::unique_ptr { + auto ret = std::move(mock_lookup_context_); + mock_lookup_context_ = nullptr; + return ret; + }); + return mock_lookup_context_.get(); + } + MockInsertContext* mockInsertContext() { + ASSERT(mock_insert_context_ == nullptr); + mock_insert_context_ = std::make_unique(); + EXPECT_CALL(*mock_insert_context_, onDestroy()); + EXPECT_CALL(*this, makeInsertContext) + .WillOnce([this](LookupContextPtr&& lookup_context, + Http::StreamFilterCallbacks&) -> std::unique_ptr { + lookup_context->onDestroy(); + auto ret = std::move(mock_insert_context_); + mock_insert_context_ = nullptr; + return ret; + }); + return mock_insert_context_.get(); + } + std::unique_ptr mock_lookup_context_; + std::unique_ptr mock_insert_context_; +}; + } // namespace Cache } // namespace HttpFilters } // namespace Extensions diff --git a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc index 8c4d1992cab4..d7a95da98415 100644 --- a/test/extensions/filters/http/common/fuzz/uber_per_filter.cc +++ b/test/extensions/filters/http/common/fuzz/uber_per_filter.cc @@ -38,7 +38,7 @@ void addFileDescriptorsRecursively(const Protobuf::FileDescriptor& descriptor, void addBookstoreProtoDescriptor(Protobuf::Message* message) { envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder& config = - *Envoy::Protobuf::DynamicCastToGenerated< + *Envoy::Protobuf::DynamicCastMessage< envoy::extensions::filters::http::grpc_json_transcoder::v3::GrpcJsonTranscoder>(message); config.clear_services(); config.add_services("bookstore.Bookstore"); @@ -82,8 +82,7 @@ void UberFilterFuzzer::guideAnyProtoType(test::fuzz::HttpData* mutable_data, uin void cleanTapConfig(Protobuf::Message* message) { envoy::extensions::filters::http::tap::v3::Tap& config = - *Envoy::Protobuf::DynamicCastToGenerated( - message); + *Envoy::Protobuf::DynamicCastMessage(message); if (config.common_config().config_type_case() == envoy::extensions::common::tap::v3::CommonExtensionConfig::ConfigTypeCase::kStaticConfig) { auto const& output_config = config.common_config().static_config().output_config(); @@ -111,7 +110,7 @@ void cleanTapConfig(Protobuf::Message* message) { void cleanFileSystemBufferConfig(Protobuf::Message* message) { envoy::extensions::filters::http::file_system_buffer::v3::FileSystemBufferFilterConfig& config = - *Envoy::Protobuf::DynamicCastToGenerated< + *Envoy::Protobuf::DynamicCastMessage< envoy::extensions::filters::http::file_system_buffer::v3::FileSystemBufferFilterConfig>( message); if (config.manager_config().thread_pool().thread_count() > kMaxAsyncFileManagerThreadCount) { diff --git a/test/extensions/filters/http/ext_authz/ext_authz_fuzz_lib.cc b/test/extensions/filters/http/ext_authz/ext_authz_fuzz_lib.cc index 018c06df26a9..8eafd4cd6c69 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_fuzz_lib.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_fuzz_lib.cc @@ -37,6 +37,9 @@ ReusableFilterFactory::newFilter(FilterConfigSharedPtr config, const envoy::config::core::v3::Metadata& metadata) { decoding_buffer_ = std::make_unique(); metadata_ = metadata; + decoder_callbacks_.stream_info_.filter_state_ = + std::make_shared(StreamInfo::FilterState::LifeSpan::FilterChain); + std::unique_ptr filter = std::make_unique(std::move(config), std::move(client)); filter->setDecoderFilterCallbacks(decoder_callbacks_); filter->setEncoderFilterCallbacks(encoder_callbacks_); diff --git a/test/extensions/filters/http/ext_authz/ext_authz_grpc_corpus/emit_filter_state_stats b/test/extensions/filters/http/ext_authz/ext_authz_grpc_corpus/emit_filter_state_stats new file mode 100644 index 000000000000..a97e758d47c7 --- /dev/null +++ b/test/extensions/filters/http/ext_authz/ext_authz_grpc_corpus/emit_filter_state_stats @@ -0,0 +1,9 @@ +base { + config { + stat_prefix: "#" + failure_mode_allow_header_add: true + emit_filter_state_stats: true + } + request_data { + } +} diff --git a/test/extensions/filters/listener/http_inspector/BUILD b/test/extensions/filters/listener/http_inspector/BUILD index 09eace060f93..5ca1e27e9aaf 100644 --- a/test/extensions/filters/listener/http_inspector/BUILD +++ b/test/extensions/filters/listener/http_inspector/BUILD @@ -50,6 +50,19 @@ envoy_extension_cc_test( ], ) +envoy_extension_cc_test( + name = "http_inspector_parser_callbacks_test", + srcs = ["http_inspector_parser_callbacks_test.cc"], + extension_names = ["envoy.filters.listener.http_inspector"], + rbe_pool = "2core", + deps = [ + "//source/extensions/filters/listener/http_inspector:http_inspector_lib", + "//test/mocks/api:api_mocks", + "//test/mocks/stats:stats_mocks", + "//test/test_common:threadsafe_singleton_injector_lib", + ], +) + envoy_cc_fuzz_test( name = "http_inspector_fuzz_test", srcs = ["http_inspector_fuzz_test.cc"], diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_parser_callbacks_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_parser_callbacks_test.cc new file mode 100644 index 000000000000..57e973f5f461 --- /dev/null +++ b/test/extensions/filters/listener/http_inspector/http_inspector_parser_callbacks_test.cc @@ -0,0 +1,72 @@ +#include "source/extensions/filters/listener/http_inspector/http_inspector.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace ListenerFilters { +namespace HttpInspector { +namespace { + +/** + * These tests are written specifically for the NoOpParserCallbacks class to verify + * that it's callback methods work as expected. This is necessary because the + * HttpInspector filter only parses the request up to the first line break to determine the HTTP + * version. As a result, not all callback methods are triggered in the HttpInspector tests. + */ +class NoOpParserCallbacksTest : public ::testing::Test { +protected: + NoOpParserCallbacks callbacks_; +}; + +TEST_F(NoOpParserCallbacksTest, OnMessageBeginTest) { + EXPECT_EQ(callbacks_.onMessageBegin(), Http::Http1::CallbackResult::Success); +} + +TEST_F(NoOpParserCallbacksTest, OnUrlTest) { + const absl::string_view url = "/index.html"; + EXPECT_EQ(callbacks_.onUrl(url.data(), url.size()), Http::Http1::CallbackResult::Success); +} + +TEST_F(NoOpParserCallbacksTest, OnStatusTest) { + const absl::string_view status = "200 OK"; + EXPECT_EQ(callbacks_.onStatus(status.data(), status.size()), + Http::Http1::CallbackResult::Success); +} + +TEST_F(NoOpParserCallbacksTest, OnHeaderFieldTest) { + const absl::string_view header_field = "Content-Type"; + EXPECT_EQ(callbacks_.onHeaderField(header_field.data(), header_field.size()), + Http::Http1::CallbackResult::Success); +} + +TEST_F(NoOpParserCallbacksTest, OnHeaderValueTest) { + const absl::string_view header_value = "text/html"; + EXPECT_EQ(callbacks_.onHeaderValue(header_value.data(), header_value.size()), + Http::Http1::CallbackResult::Success); +} + +TEST_F(NoOpParserCallbacksTest, OnHeadersCompleteTest) { + EXPECT_EQ(callbacks_.onHeadersComplete(), Http::Http1::CallbackResult::Success); +} + +TEST_F(NoOpParserCallbacksTest, BufferBodyTest) { + const absl::string_view buffer_body = "buffer_body"; + callbacks_.bufferBody(buffer_body.data(), buffer_body.size()); +} + +TEST_F(NoOpParserCallbacksTest, OnMessageCompleteTest) { + EXPECT_EQ(callbacks_.onMessageComplete(), Http::Http1::CallbackResult::Success); +} + +TEST_F(NoOpParserCallbacksTest, OnChunkHeaderTest) { + callbacks_.onChunkHeader(true); + callbacks_.onChunkHeader(false); +} + +} // namespace +} // namespace HttpInspector +} // namespace ListenerFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc b/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc index 16b5e17faa1d..01a79e0a5150 100644 --- a/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc +++ b/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc @@ -103,7 +103,7 @@ void UberFilterFuzzer::checkInvalidInputForFuzzer(const std::string& filter_name // HttpConnectionManager} on which we have constraints. if (filter_name == NetworkFilterNames::get().DirectResponse) { envoy::extensions::filters::network::direct_response::v3::Config& config = - *Envoy::Protobuf::DynamicCastToGenerated< + *Envoy::Protobuf::DynamicCastMessage< envoy::extensions::filters::network::direct_response::v3::Config>(config_message); if (config.response().specifier_case() == envoy::config::core::v3::DataSource::SpecifierCase::kFilename) { @@ -112,7 +112,7 @@ void UberFilterFuzzer::checkInvalidInputForFuzzer(const std::string& filter_name } } else if (filter_name == NetworkFilterNames::get().LocalRateLimit) { envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit& config = - *Envoy::Protobuf::DynamicCastToGenerated< + *Envoy::Protobuf::DynamicCastMessage< envoy::extensions::filters::network::local_ratelimit::v3::LocalRateLimit>( config_message); if (config.token_bucket().fill_interval().seconds() > SecondsPerDay) { @@ -125,7 +125,7 @@ void UberFilterFuzzer::checkInvalidInputForFuzzer(const std::string& filter_name } } else if (filter_name == NetworkFilterNames::get().HttpConnectionManager) { envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& - config = *Envoy::Protobuf::DynamicCastToGenerated< + config = *Envoy::Protobuf::DynamicCastMessage< envoy::extensions::filters::network::http_connection_manager::v3:: HttpConnectionManager>(config_message); if (config.codec_type() == envoy::extensions::filters::network::http_connection_manager::v3:: @@ -158,7 +158,7 @@ void UberFilterFuzzer::checkInvalidInputForFuzzer(const std::string& filter_name } } else if (filter_name == NetworkFilterNames::get().EnvoyMobileHttpConnectionManager) { envoy::extensions::filters::network::http_connection_manager::v3:: - EnvoyMobileHttpConnectionManager& config = *Envoy::Protobuf::DynamicCastToGenerated< + EnvoyMobileHttpConnectionManager& config = *Envoy::Protobuf::DynamicCastMessage< envoy::extensions::filters::network::http_connection_manager::v3:: EnvoyMobileHttpConnectionManager>(config_message); if (config.config().codec_type() == diff --git a/test/extensions/http/cache/file_system_http_cache/file_system_http_cache_test.cc b/test/extensions/http/cache/file_system_http_cache/file_system_http_cache_test.cc index 29c87ea43a9b..190764361752 100644 --- a/test/extensions/http/cache/file_system_http_cache/file_system_http_cache_test.cc +++ b/test/extensions/http/cache/file_system_http_cache/file_system_http_cache_test.cc @@ -101,7 +101,6 @@ class FileSystemCacheTestContext { std::string cache_path_; NiceMock context_; std::shared_ptr cache_; - LogLevelSetter log_level_ = LogLevelSetter(spdlog::level::debug); HttpCacheFactory* http_cache_factory_; }; diff --git a/test/extensions/network/dns_resolver/apple/apple_dns_impl_test.cc b/test/extensions/network/dns_resolver/apple/apple_dns_impl_test.cc index e1f10f409f93..23700a2f8add 100644 --- a/test/extensions/network/dns_resolver/apple/apple_dns_impl_test.cc +++ b/test/extensions/network/dns_resolver/apple/apple_dns_impl_test.cc @@ -337,7 +337,7 @@ TEST_F(AppleDnsImplTest, DnsIpAddressVersionAllSupportsV6Only) { [=](DnsResolver::ResolutionStatus status, absl::string_view details, std::list&& results) -> void { EXPECT_EQ(DnsResolver::ResolutionStatus::Completed, status); - EXPECT_EQ(details, "apple_dns_success"); + EXPECT_THAT(details, StartsWith("apple_dns_completed")); // On v4 only networks, there will be no results. for (const auto& result : results) { const auto& addrinfo = result.addrInfo(); @@ -368,7 +368,7 @@ TEST_F(AppleDnsImplTest, DnsIpAddressVersionV6OnlySupportsV4Only) { [=](DnsResolver::ResolutionStatus status, absl::string_view details, std::list&& results) -> void { EXPECT_EQ(DnsResolver::ResolutionStatus::Completed, status); - EXPECT_EQ(details, "apple_dns_success"); + EXPECT_THAT(details, StartsWith("apple_dns_completed")); for (const auto& result : results) { const auto& addrinfo = result.addrInfo(); EXPECT_THAT(addrinfo.address_->ip()->ipv6(), NotNull()); @@ -386,7 +386,7 @@ TEST_F(AppleDnsImplTest, DnsIpAddressVersionV6OnlySupportsV6Only) { [=](DnsResolver::ResolutionStatus status, absl::string_view details, std::list&& results) -> void { EXPECT_EQ(DnsResolver::ResolutionStatus::Completed, status); - EXPECT_EQ(details, "apple_dns_success"); + EXPECT_THAT(details, StartsWith("apple_dns_completed")); EXPECT_FALSE(results.empty()); for (const auto& result : results) { const auto& addrinfo = result.addrInfo(); @@ -411,7 +411,7 @@ TEST_F(AppleDnsImplTest, DnsIpAddressVersionAutoSupportsV6Only) { [=](DnsResolver::ResolutionStatus status, absl::string_view details, std::list&& results) -> void { EXPECT_EQ(DnsResolver::ResolutionStatus::Completed, status); - EXPECT_EQ(details, "apple_dns_success"); + EXPECT_THAT(details, StartsWith("apple_dns_completed")); // On v4 only networks, there will be no results. for (const auto& result : results) { const auto& addrinfo = result.addrInfo(); @@ -436,7 +436,7 @@ TEST_F(AppleDnsImplTest, DnsIpAddressVersionV4PreferredSupportsV6Only) { [=](DnsResolver::ResolutionStatus status, absl::string_view details, std::list&& results) -> void { EXPECT_EQ(DnsResolver::ResolutionStatus::Completed, status); - EXPECT_EQ(details, "apple_dns_success"); + EXPECT_THAT(details, StartsWith("apple_dns_completed")); // On v4 only networks, there will be no results. for (const auto& result : results) { const auto& addrinfo = result.addrInfo(); @@ -660,7 +660,7 @@ class AppleDnsImplFakeApiTest : public testing::Test { expected_address_size](DnsResolver::ResolutionStatus status, absl::string_view details, std::list&& response) -> void { EXPECT_EQ(DnsResolver::ResolutionStatus::Completed, status); - EXPECT_EQ(details, "apple_dns_success"); + EXPECT_THAT(details, StartsWith("apple_dns_completed")); EXPECT_EQ(expected_address_size, response.size()); if (dns_lookup_family == DnsLookupFamily::Auto) { @@ -918,7 +918,7 @@ TEST_F(AppleDnsImplFakeApiTest, QuerySynchronousCompletionUnroutableFamilies) { absl::string_view details, std::list&& response) -> void { EXPECT_EQ(DnsResolver::ResolutionStatus::Completed, status); - EXPECT_EQ(details, "apple_dns_success"); + EXPECT_THAT(details, StartsWith("apple_dns_completed")); EXPECT_EQ(1, response.size()); EXPECT_EQ("1.2.3.4:0", response.front().addrInfo().address_->asString()); EXPECT_EQ(std::chrono::seconds(30), response.front().addrInfo().ttl_); @@ -958,7 +958,7 @@ TEST_F(AppleDnsImplFakeApiTest, QuerySynchronousCompletion) { absl::string_view details, std::list&& response) -> void { EXPECT_EQ(DnsResolver::ResolutionStatus::Completed, status); - EXPECT_EQ(details, "apple_dns_success"); + EXPECT_THAT(details, StartsWith("apple_dns_completed")); EXPECT_EQ(1, response.size()); EXPECT_EQ("1.2.3.4:0", response.front().addrInfo().address_->asString()); EXPECT_EQ(std::chrono::seconds(30), response.front().addrInfo().ttl_); @@ -1015,7 +1015,7 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleAddresses) { [&dns_callback_executed](DnsResolver::ResolutionStatus status, absl::string_view details, std::list&& response) -> void { EXPECT_EQ(DnsResolver::ResolutionStatus::Completed, status); - EXPECT_EQ(details, "apple_dns_success"); + EXPECT_THAT(details, StartsWith("apple_dns_completed")); EXPECT_EQ(2, response.size()); dns_callback_executed.Notify(); }); @@ -1148,7 +1148,7 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleQueries) { [&dns_callback_executed](DnsResolver::ResolutionStatus status, absl::string_view details, std::list&& response) -> void { EXPECT_EQ(DnsResolver::ResolutionStatus::Completed, status); - EXPECT_EQ(details, "apple_dns_success"); + EXPECT_THAT(details, StartsWith("apple_dns_completed")); EXPECT_EQ(1, response.size()); EXPECT_EQ("1.2.3.4:0", response.front().addrInfo().address_->asString()); EXPECT_EQ(std::chrono::seconds(30), response.front().addrInfo().ttl_); @@ -1170,7 +1170,7 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleQueries) { [&dns_callback_executed2](DnsResolver::ResolutionStatus status, absl::string_view details, std::list&& response) -> void { EXPECT_EQ(DnsResolver::ResolutionStatus::Completed, status); - EXPECT_EQ(details, "apple_dns_success"); + EXPECT_THAT(details, StartsWith("apple_dns_completed")); EXPECT_EQ(1, response.size()); EXPECT_EQ("5.6.7.8:0", response.front().addrInfo().address_->asString()); EXPECT_EQ(std::chrono::seconds(30), response.front().addrInfo().ttl_); @@ -1225,7 +1225,7 @@ TEST_F(AppleDnsImplFakeApiTest, MultipleQueriesOneFails) { // Even though the second query will fail, this one will flush with the // state it had. EXPECT_EQ(DnsResolver::ResolutionStatus::Completed, status); - EXPECT_EQ(details, "apple_dns_success"); + EXPECT_THAT(details, StartsWith("apple_dns_completed")); EXPECT_EQ(1, response.size()); EXPECT_EQ("1.2.3.4:0", response.front().addrInfo().address_->asString()); EXPECT_EQ(std::chrono::seconds(30), response.front().addrInfo().ttl_); @@ -1289,7 +1289,7 @@ TEST_F(AppleDnsImplFakeApiTest, ResultWithOnlyNonAdditiveReplies) { [&dns_callback_executed](DnsResolver::ResolutionStatus status, absl::string_view details, std::list&& response) -> void { EXPECT_EQ(DnsResolver::ResolutionStatus::Completed, status); - EXPECT_EQ(details, "apple_dns_success"); + EXPECT_THAT(details, StartsWith("apple_dns_completed")); EXPECT_TRUE(response.empty()); dns_callback_executed.Notify(); }); @@ -1367,7 +1367,7 @@ TEST_F(AppleDnsImplFakeApiTest, DeallocateOnDestruction) { [&dns_callback_executed](DnsResolver::ResolutionStatus status, absl::string_view details, std::list&& response) -> void { EXPECT_EQ(DnsResolver::ResolutionStatus::Completed, status); - EXPECT_EQ(details, "apple_dns_success"); + EXPECT_THAT(details, StartsWith("apple_dns_completed")); EXPECT_EQ(1, response.size()); dns_callback_executed.Notify(); }); diff --git a/test/fuzz/mutable_visitor.cc b/test/fuzz/mutable_visitor.cc index 6668c1f1f4f6..b02fd51c5cc2 100644 --- a/test/fuzz/mutable_visitor.cc +++ b/test/fuzz/mutable_visitor.cc @@ -28,7 +28,7 @@ void traverseMessageWorkerExt(ProtoVisitor& visitor, Protobuf::Message& message, absl::string_view target_type_url; if (message.GetDescriptor()->full_name() == "google.protobuf.Any") { - auto* any_message = Protobuf::DynamicCastToGenerated(&message); + auto* any_message = Protobuf::DynamicCastMessage(&message); inner_message = Helper::typeUrlToMessage(any_message->type_url()); target_type_url = any_message->type_url(); if (inner_message) { diff --git a/test/fuzz/validated_input_generator.cc b/test/fuzz/validated_input_generator.cc index 6bd1873ee17d..2d53ac0f406a 100644 --- a/test/fuzz/validated_input_generator.cc +++ b/test/fuzz/validated_input_generator.cc @@ -164,7 +164,7 @@ void ValidatedInputGenerator::handleAnyRules( if (any_rules.has_required() && any_rules.required()) { // Stop creating any message when a certain depth is reached if (max_depth_ > 0 && current_depth_ > max_depth_) { - auto* any_message = Protobuf::DynamicCastToGenerated(msg); + auto* any_message = Protobuf::DynamicCastMessage(msg); any_message->PackFrom(ProtobufWkt::Struct()); return; } @@ -178,7 +178,7 @@ void ValidatedInputGenerator::handleAnyRules( const std::string field_name = std::string(message_path_.back()); FieldToTypeUrls::const_iterator field_to_typeurls_cand = field_to_typeurls.find(field_name); if (field_to_typeurls_cand != any_map_cand->second.end()) { - auto* any_message = Protobuf::DynamicCastToGenerated(msg); + auto* any_message = Protobuf::DynamicCastMessage(msg); inner_message = ProtobufMessage::Helper::typeUrlToMessage(any_message->type_url()); if (!inner_message || !any_message->UnpackTo(inner_message.get())) { const TypeUrlAndFactory& randomed_typeurl = field_to_typeurls_cand->second.at( @@ -415,7 +415,7 @@ void ValidatedInputGenerator::onEnterMessage(Protobuf::Message& msg, const Protobuf::Descriptor* descriptor = msg.GetDescriptor(); message_path_.push_back(field_name); if (descriptor->full_name() == kAny) { - auto* any_message = Protobuf::DynamicCastToGenerated(&msg); + auto* any_message = Protobuf::DynamicCastMessage(&msg); std::unique_ptr inner_message = ProtobufMessage::Helper::typeUrlToMessage(any_message->type_url()); if (!inner_message || !any_message->UnpackTo(inner_message.get())) { diff --git a/test/integration/access_log_integration_test.cc b/test/integration/access_log_integration_test.cc index c62d220b189f..13ec6f506a24 100644 --- a/test/integration/access_log_integration_test.cc +++ b/test/integration/access_log_integration_test.cc @@ -54,6 +54,6 @@ TEST_P(AccessLogIntegrationTest, ShouldReplaceInvalidUtf8) { }); testRouterDownstreamDisconnectBeforeRequestComplete(); const std::string log = waitForAccessLog(access_log_name_); - EXPECT_THAT(log, HasSubstr("x_forwarded_for\":\"\xEF\xBF\xBD")); + EXPECT_THAT(log, HasSubstr("x_forwarded_for\":\"\\u00ec")); } } // namespace Envoy diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 510cd6924d9c..8b4b84f84396 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -4770,8 +4770,6 @@ class AllowForceFail : public Api::OsSysCallsImpl { }; TEST_P(ProtocolIntegrationTest, HandleUpstreamSocketCreationFail) { - config_helper_.addRuntimeOverride("envoy.restart_features.allow_client_socket_creation_failure", - "true"); AllowForceFail fail_socket_n_; TestThreadsafeSingletonInjector os_calls{&fail_socket_n_}; diff --git a/tools/code/BUILD b/tools/code/BUILD index 9c5fd6c0e379..cbeae8f3c67b 100644 --- a/tools/code/BUILD +++ b/tools/code/BUILD @@ -61,6 +61,7 @@ genrule( -l warn \ -v warn \ -x mobile/dist/envoy-pom.xml \ + -x bazel/repo.bzl \ --codeowners=$(location //:CODEOWNERS) \ --owners=$(location //:OWNERS.md) \ --extensions_build_config=$(location :extensions_build_config) \