diff --git a/README.md b/README.md index a6643f59..dbabae50 100644 --- a/README.md +++ b/README.md @@ -152,7 +152,7 @@ Please refer to `ddapm-test-agent-fmt --help` for more information. - `PORT` [`8126`]: Port to listen on. -- `DISABLED_CHECKS` [`""`]: Comma-separated values of checks to disable. +- `ENABLED_CHECKS` [`""`]: Comma-separated values of checks to enable. Valid values can be found in [trace invariant checks](#Trace-invariant-checks) - `LOG_LEVEL` [`"INFO"`]: Log level to use. DEBUG, INFO, WARNING, ERROR, CRITICAL. @@ -325,13 +325,56 @@ The keys of the JSON body are `path` and `msg` curl -X POST 'http://0.0.0.0:8126/test/session/responses/config/path' -d '{"path": "datadog/2/ASM_DATA/blocked_users/config", "msg": {"rules_data": []}}' ``` + ## /test/trace_check/failures (GET) -Get any Trace Check failures that occured. Returns a `` if no Trace Check failures occurred, and a `` with the Trace Check Failure messages included in the response body. To be used in combination with `DD_POOL_TRACE_CHECK_FAILURES`, or else failures will not be saved within Test-Agent memory and a `` will always be returned. +Get Trace Check failures that occured. If a token is included, trace failures for only that session token are returned unless used in conjuction with `return_all`, which can be used to return all failures regardless of inputted token. This method returns a `` if no Trace Check failures are being returned and a `` if Trace Check failures are being returned. Trace Check failures are returned as a content type of text, with failure messages concatenated in the response body. Optionally, set the `use_json` query string parameter to `true` to return Trace Check failures as a JSON response in the following format: +```json +response = { + "" : ["", ""] +} +``` + +NOTE: To be used in combination with `DD_POOL_TRACE_CHECK_FAILURES`, or else failures will not be saved within Test-Agent memory and a `` will always be returned. + +#### [optional] `?test_session_token=` +#### [optional] `X-Datadog-Test-Session-Token` +#### [optional] `?use_json=` +#### [optional] `?return_all=` ``` curl -X GET 'http://0.0.0.0:8126/test/trace_check/failures' ``` +## /test/trace_check/clear (GET) +Clear Trace Check failures that occured. If a token is included, trace failures for only that session token are cleared unless used in conjuction with `clear_all`. This argument can be used to clear all failures (regardless of inputted session token). + +#### [optional] `?test_session_token=` +#### [optional] `X-Datadog-Test-Session-Token` +#### [optional] `?clear_all=` + +``` +curl -X GET 'http://0.0.0.0:8126/test/trace_check/clear' +``` + +## /test/trace_check/summary (GET) +Get Trace Check summary results. If a token is included, returns summary results only for Trace Checks run during the session. The `return_all` optional query string parameter can be used to return all trace check results (regardless of inputted session token). The method returns Trace Check results in the following JSON format: +```json +summary = { + "trace_content_length" : { + "Passed_Checks": 10, + "Failed_Checks": 0, + "Skipped_Checks": 4, + } ... +} +``` + +#### [optional] `?test_session_token=` +#### [optional] `X-Datadog-Test-Session-Token` +#### [optional] `?return_all=` + +``` +curl -X GET 'http://0.0.0.0:8126/test/trace_check/summary' + ### /v0.1/pipeline_stats Mimics the pipeline_stats endpoint of the agent, but always returns OK, and logs a line everytime it's called. diff --git a/ddapm_test_agent/agent.py b/ddapm_test_agent/agent.py index 7297ff5f..720d265e 100644 --- a/ddapm_test_agent/agent.py +++ b/ddapm_test_agent/agent.py @@ -16,6 +16,7 @@ from typing import Literal from typing import Optional from typing import Set +from typing import Tuple from typing import cast from urllib.parse import urlparse from urllib.parse import urlunparse @@ -152,6 +153,22 @@ def update_trace_agent_port(url, new_port): return new_url +def default_value_trace_check_results_by_check(): + return defaultdict(default_value_trace_results_summary) + + +def default_value_trace_failures(): + return [] + + +def default_value_trace_results_summary(): + return { + "Passed_Checks": 0, + "Failed_Checks": 0, + "Skipped_Checks": 0, + } + + class Agent: def __init__(self): """Only store the requests sent to the agent. There are many representations @@ -163,8 +180,10 @@ def __init__(self): # Token to be used if running test cases synchronously self._requests: List[Request] = [] self._rc_server = RemoteConfigServer() - self._trace_failures: Dict[str, List[str]] = defaultdict(list) - self._trace_check_results_by_check: Dict[str, Dict[str, int]] = {} + self._trace_failures: Dict[str, List[Tuple[CheckTrace, str]]] = defaultdict(default_value_trace_failures) + self._trace_check_results_by_check: Dict[str, Dict[str, Dict[str, int]]] = defaultdict( + default_value_trace_check_results_by_check + ) self._forward_endpoints: List[str] = [ "/v0.4/traces", "/v0.5/traces", @@ -193,36 +212,76 @@ async def traces(self) -> TraceMap: return _traces async def clear_trace_check_failures(self, request: Request) -> web.Response: - """Clear traces by session token or all traces if none is provided.""" - token = request.get("session_token", "default") - - trace_failures = self._trace_failures.get(token, []) - log.info(f"Clearing {len(trace_failures)} Trace Check Failures for Token {token}") + """Clear traces by session token provided.""" + token = request["session_token"] + clear_all = "clear_all" in request.query and request.query["clear_all"].lower() == "true" + if clear_all: + failures_by_token = self._trace_failures + trace_failures = [value for sublist in failures_by_token.values() for value in sublist] + self._trace_failures = defaultdict(default_value_trace_failures) + self._trace_check_results_by_check = defaultdict(default_value_trace_check_results_by_check) + else: + trace_failures = self._trace_failures[token] + del self._trace_failures[token] + del self._trace_check_results_by_check[token] + log.info(f"Clearing {len(trace_failures)} Trace Check Failures for Token {token}, clear_all={clear_all}") log.info(trace_failures) - del self._trace_failures[token] - self._trace_check_results_by_check = {} return web.HTTPOk() def get_trace_check_failures(self, request: Request) -> web.Response: - """Return the Trace Check failures that occurred, if pooling is enabled as a request.""" - token = request.get("session_token", "default") - - trace_check_failures = self._trace_failures.get(token, []) - n_failures = len(trace_check_failures) - log.info(f"{n_failures} Trace Failures Occurred for Token {token}.") - trace_check_full_results = self._trace_check_results_by_check + """Return the Trace Check failures that occurred, if pooling is enabled, + returned as either a Text (by default) or JSON response. + """ + token = request["session_token"] + return_all = "return_all" in request.query and request.query["return_all"].lower() == "true" + + if return_all: + # check for whether to return all results + trace_check_failures = [] + for f in self._trace_failures.values(): + trace_check_failures.extend(f) + n_failures = len(trace_check_failures) + log.info(f"{n_failures} Trace Failures Occurred in Total") + else: + # or return results by token + trace_check_failures = self._trace_failures.get(token, []) + n_failures = len(trace_check_failures) + log.info(f"{n_failures} Trace Failures Occurred for Token {token}") if n_failures > 0: - failure_message = f"APM Test Agent Validation failed with {n_failures} Trace Check failures.\n" - for trace_check_message in trace_check_failures: - failure_message += trace_check_message - failure_message += f"\nAPM Test Agent Trace Check Results by Check -------------------------------" - failure_message += f"\n{json.dumps(trace_check_full_results, indent=4)}" - return web.HTTPBadRequest(text=failure_message) + if "use_json" in request.query and request.query["use_json"].lower() == "true": + # check what response type to use + results: Dict[str, List[str]] = {} + for check_trace, failure_message in trace_check_failures: + results = check_trace.get_failures_by_check(results) + json_summary = json.dumps(results) + return web.HTTPBadRequest(body=json_summary, content_type="application/json") + else: + # or use default response of text + msg = f"APM Test Agent Validation failed with {n_failures} Trace Check failures.\n" + for check_trace, failure_message in trace_check_failures: + msg += failure_message + return web.HTTPBadRequest(text=msg) else: message = f"APM Test Agent Trace Check Results by Check --------------------------------" message += f"\n{json.dumps(trace_check_full_results, indent=4)}" return web.HTTPOk(text=message) + def get_trace_check_summary(self, request: Request) -> web.Response: + token = request["session_token"] + summary: Dict[str, Dict[str, int]] = defaultdict(default_value_trace_results_summary) + return_all = "return_all" in request.query and request.query["return_all"].lower() == "true" + + if return_all: + for token, token_results in self._trace_check_results_by_check.items(): + for check_name, check_results in token_results.items(): + summary[check_name]["Passed_Checks"] += check_results["Passed_Checks"] + summary[check_name]["Failed_Checks"] += check_results["Failed_Checks"] + summary[check_name]["Skipped_Checks"] += check_results["Skipped_Checks"] + else: + summary = self._trace_check_results_by_check.get(token, {}) + json_summary = json.dumps(summary) + return web.HTTPOk(body=json_summary, content_type="application/json") + async def apmtelemetry(self) -> List[TelemetryEvent]: """Return the telemetry events stored by the agent""" _events: List[TelemetryEvent] = [] @@ -420,6 +479,8 @@ async def _handle_traces(self, request: Request, version: Literal["v0.4", "v0.5" checks: Checks = request.app["checks"] headers = request.headers + # TODO: This method requires all checks are hard coded + await checks.check("trace_stall", headers=headers, request=request) with CheckTrace.add_frame("headers") as f: @@ -602,6 +663,7 @@ async def handle_session_requests(self, request: Request) -> web.Response: self.handle_v01_pipelinestats, self.handle_v2_apmtelemetry, self.handle_v1_profiling, + self.handle_v07_remoteconfig, ): continue resp.append( @@ -753,26 +815,34 @@ async def check_failure_middleware(self, request: Request, handler: _Handler) -> try: response = await handler(request) except AssertionError as e: - token = request.get("session_token", "default") - self._trace_check_results_by_check = trace.get_results(self._trace_check_results_by_check) + token = request["session_token"] + + # update trace_check results + trace.update_results(self._trace_check_results_by_check[token]) + # only save trace failures to memory if necessary msg = str(trace) + str(e) if request.app["pool_trace_check_failures"]: log.info(f"Storing Trace Check Failure for Session Token: {token}.") - self._trace_failures[token].append(msg) + # append failure to trace failures + self._trace_failures[token].append((trace, msg)) log.error(msg) return web.HTTPBadRequest(body=msg) else: token = request["session_token"] - self._trace_check_results_by_check = trace.get_results(self._trace_check_results_by_check) + # update trace_check results + trace.update_results(self._trace_check_results_by_check[token]) if trace.has_fails(): # only save trace failures to memory if necessary pool_failures = request.app["pool_trace_check_failures"] - log.error(f"Trace had the following failures, using config: token={token}, DD_POOL_TRACE_CHECK_FAILURES={pool_failures}") + log.error( + f"Trace had the following failures, using config: token={token}, DD_POOL_TRACE_CHECK_FAILURES={pool_failures}" + ) msg = str(trace) if request.app["pool_trace_check_failures"]: log.info(f"Storing Trace Check Failure for Session Token: {token}.") - self._trace_failures[token].append(msg) + # append failure to trace failures + self._trace_failures[token].append((trace, msg)) log.error(msg) if request.app["disable_error_responses"]: return response @@ -782,7 +852,7 @@ async def check_failure_middleware(self, request: Request, handler: _Handler) -> def make_app( - disabled_checks: List[str], + enabled_checks: List[str], log_span_fmt: str, snapshot_dir: str, snapshot_ci_mode: bool, @@ -833,6 +903,7 @@ def make_app( web.get("/test/trace/analyze", agent.handle_trace_analyze), web.get("/test/trace_check/failures", agent.get_trace_check_failures), web.get("/test/trace_check/clear", agent.clear_trace_check_failures), + web.get("/test/trace_check/summary", agent.get_trace_check_summary), ] ) checks = Checks( @@ -844,7 +915,7 @@ def make_app( CheckTracePeerService, CheckTraceDDService ], - disabled=disabled_checks, + enabled=enabled_checks, ) app["checks"] = checks app["snapshot_dir"] = snapshot_dir @@ -908,11 +979,11 @@ def main(args: Optional[List[str]] = None) -> None: ), ) parser.add_argument( - "--disabled-checks", + "--enabled-checks", type=List[str], - default=_parse_csv(os.environ.get("DISABLED_CHECKS", "")), + default=_parse_csv(os.environ.get("ENABLED_CHECKS", "")), help=( - "Comma-separated values of checks to disable. None are disabled " + "Comma-separated values of checks to enable. None are enabled " " by default. For the list of values see " "https://github.com/datadog/dd-trace-test-agent" ), @@ -992,7 +1063,7 @@ def main(args: Optional[List[str]] = None) -> None: os.path.abspath(parsed_args.snapshot_dir), ) app = make_app( - disabled_checks=parsed_args.disabled_checks, + enabled_checks=parsed_args.enabled_checks, log_span_fmt=parsed_args.log_span_fmt, snapshot_dir=parsed_args.snapshot_dir, snapshot_ci_mode=parsed_args.snapshot_ci_mode, diff --git a/ddapm_test_agent/checks.py b/ddapm_test_agent/checks.py index 557660dc..f2dceb3b 100644 --- a/ddapm_test_agent/checks.py +++ b/ddapm_test_agent/checks.py @@ -40,34 +40,18 @@ def has_fails(self) -> bool: return True return False - def get_results(self, results: Dict[str, Dict[str, int]]) -> Dict[str, Dict[str, int]]: - """ - results = { - check.name: { - "Passed_Checks": int - "Failed_Checks": int - "Skipped_Checks": int - } - ... - } + def update_results(self, results: Dict[str, Dict[str, int]]) -> Dict[str, Dict[str, int]]: + """Return as follows + output = { check.name: { "Passed_Checks": int, "Failed_Checks": int, "Skipped_Checks": int}} """ for c in self._checks: if c.failed: - if c.name in results: - results[c.name]["Failed_Checks"] += 1 - else: - results[c.name] = {"Passed_Checks": 0, "Failed_Checks": 1, "Skipped_Checks": 0} + results[c.name]["Failed_Checks"] += 1 elif c.skipped: - if c.name in results: - results[c.name]["Skipped_Checks"] += 1 - else: - results[c.name] = {"Passed_Checks": 0, "Failed_Checks": 0, "Skipped_Checks": 1} + results[c.name]["Skipped_Checks"] += 1 else: - if c.name in results: - results[c.name]["Passed_Checks"] += 1 - else: - results[c.name] = {"Passed_Checks": 1, "Failed_Checks": 0, "Skipped_Checks": 0} + results[c.name]["Passed_Checks"] += 1 return results def __repr__(self) -> str: @@ -116,11 +100,31 @@ def frames_dfs(self) -> Generator[Tuple[CheckTraceFrame, int], None, None]: def has_fails(self) -> bool: return len([f for f in self.frames() if f.has_fails()]) > 0 - def get_results(self, results: Dict[str, Dict[str, int]]) -> Dict[str, Dict[str, int]]: + def update_results(self, results: Dict[str, Dict[str, int]]) -> Dict[str, Dict[str, int]]: for f in self.frames(): - results = f.get_results(results) + f.update_results(results) return results + def get_failures_by_check(self, failures_by_check: Dict[str, List[str]]) -> Dict[str, List[str]]: + # TODO?: refactor so this code isnt duplicated with __str__ + frame_s = "" + for frame, depth in self.frames_dfs(): + indent = " " * (depth + 2) if depth > 0 else "" + + frame_s += f"{indent}At {frame._name}:\n" + for item in frame._items: + frame_s += textwrap.indent(f"- {item}", prefix=f" {indent}") + frame_s += "\n" + + for c in frame._checks: + if c.failed: + check_s = f"{indent}❌ Check '{c.name}' failed: {c._msg}\n" + if c.name in failures_by_check: + failures_by_check[c.name].append(frame_s + check_s) + else: + failures_by_check[c.name] = [frame_s + check_s] + return failures_by_check + def __str__(self) -> str: s = "" # TODO?: only include frames that have fails @@ -146,9 +150,6 @@ class Check: with error messages returned to the test case. """ - default_enabled: bool - """Whether the check is enabled by default or not.""" - def __init__(self): self._failed: bool = False self._skipped: bool = False @@ -181,7 +182,7 @@ def check(self, *args, **kwargs): @dataclasses.dataclass() class Checks: checks: List[Type[Check]] = dataclasses.field(init=True) - disabled: List[str] = dataclasses.field(init=True) + enabled: List[str] = dataclasses.field(init=True) def _get_check(self, name: str) -> Type[Check]: for c in self.checks: @@ -192,9 +193,9 @@ def _get_check(self, name: str) -> Type[Check]: def is_enabled(self, name: str) -> bool: check = self._get_check(name) - if check.name in self.disabled: - return False - return check.default_enabled + if check.name in self.enabled: + return True + return False async def check(self, name: str, *args: Any, **kwargs: Any) -> None: """Find and run the check with the given ``name`` if it is enabled.""" diff --git a/ddapm_test_agent/trace_checks.py b/ddapm_test_agent/trace_checks.py index b509cbbc..d6949928 100644 --- a/ddapm_test_agent/trace_checks.py +++ b/ddapm_test_agent/trace_checks.py @@ -20,7 +20,6 @@ class CheckTraceCountHeader(Check): X-Datadog-Trace-Count http header with each payload. The value of the header must match the number of traces included in the payload. """.strip() - default_enabled = True def check(self, headers: CIMultiDictProxy, num_traces: int) -> None: if "X-Datadog-Trace-Count" not in headers: @@ -41,7 +40,6 @@ def check(self, headers: CIMultiDictProxy, num_traces: int) -> None: class CheckMetaTracerVersionHeader(Check): name = "meta_tracer_version_header" description = """v0.4 payloads must include the Datadog-Meta-Tracer-Version header.""" - default_enabled = True def check(self, headers: CIMultiDictProxy) -> None: if "Datadog-Meta-Tracer-Version" not in headers: @@ -53,7 +51,6 @@ class CheckTraceContentLength(Check): description = """ The max content size of a trace payload is 50MB. """.strip() - default_enabled = True def check(self, headers: CIMultiDictProxy) -> None: if "Content-Length" not in headers: @@ -75,7 +72,6 @@ class CheckTraceStallAsync(Check): Note that only the request for this trace is stalled, subsequent requests will not be affected. """.strip() - default_enabled = True async def check(self, headers: CIMultiDictProxy, request: Request) -> None: if "X-Datadog-Test-Stall-Seconds" in headers: diff --git a/releasenotes/notes/Checks-changed-to-opt-in-8716cac4ecdbb1c2.yaml b/releasenotes/notes/Checks-changed-to-opt-in-8716cac4ecdbb1c2.yaml new file mode 100644 index 00000000..9dcea159 --- /dev/null +++ b/releasenotes/notes/Checks-changed-to-opt-in-8716cac4ecdbb1c2.yaml @@ -0,0 +1,17 @@ +--- +features: + - | + Checks have been changed to opt-in. This allows adding new checks before + all languages have implemented the feature. +upgrade: + - | + Checks have been changed to opt-in. In order to maintain current + functionality users should update either their command or their + environment. + `ENABLED_CHECKS=trace_count_header,meta_tracer_version_header,trace_content_length` + or + `--enabled-checks=trace_count_header,meta_tracer_version_header,trace_content_length` +deprecations: + - | + `DISABLED_CHECKS` and `--disabled-checks` have been removed in favor of + making checks opt-in. diff --git a/releasenotes/notes/improve-trace-check-results-endpoints-6ab4132628520793.yaml b/releasenotes/notes/improve-trace-check-results-endpoints-6ab4132628520793.yaml new file mode 100644 index 00000000..d0b2a667 --- /dev/null +++ b/releasenotes/notes/improve-trace-check-results-endpoints-6ab4132628520793.yaml @@ -0,0 +1,4 @@ +features: + - | + Adds new endpoints to the Test-Agent API: `/test/trace_check/failures`, `/test/trace_check/clear`, and `/test/trace_check/summary`. + These endpoints allow users to retrieve trace check failures, clear trace check failures, and get trace check summary results respectively. diff --git a/releasenotes/notes/remoteconfig-requests-1af7abb99c61c069.yaml b/releasenotes/notes/remoteconfig-requests-1af7abb99c61c069.yaml new file mode 100644 index 00000000..41e4ceb9 --- /dev/null +++ b/releasenotes/notes/remoteconfig-requests-1af7abb99c61c069.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Remote config requests are now returned via ``/test/session/requests``. diff --git a/tests/conftest.py b/tests/conftest.py index b312e86f..468a2d35 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -28,8 +28,8 @@ @pytest.fixture -def agent_disabled_checks() -> Generator[List[str], None, None]: - yield [] +def agent_enabled_checks() -> Generator[List[str], None, None]: + yield ["trace_content_length", "meta_tracer_version_header", "trace_count_header", "trace_stall"] @pytest.fixture @@ -69,7 +69,7 @@ def suppress_trace_parse_errors() -> Generator[bool, None, None]: @pytest.fixture def pool_trace_check_failures() -> Generator[bool, None, None]: - yield False + yield True @pytest.fixture @@ -85,7 +85,7 @@ def snapshot_removed_attrs() -> Generator[Set[str], None, None]: @pytest.fixture async def agent_app( aiohttp_server, - agent_disabled_checks, + agent_enabled_checks, log_span_fmt, snapshot_dir, snapshot_ci_mode, @@ -99,7 +99,7 @@ async def agent_app( ): app = await aiohttp_server( make_app( - agent_disabled_checks, + agent_enabled_checks, log_span_fmt, str(snapshot_dir), snapshot_ci_mode, diff --git a/tests/test_agent.py b/tests/test_agent.py index e38c1732..8f932eee 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -102,3 +102,196 @@ async def test_apmtelemetry( ) assert resp.status == 200 assert await resp.text() == "[]" + + +async def test_get_trace_check_summary_full_results_and_clear( + agent, v04_reference_http_trace_payload_data, v04_reference_http_trace_payload_headers +): + expected_output = { + "trace_stall": {"Passed_Checks": 3, "Failed_Checks": 0, "Skipped_Checks": 0}, + "meta_tracer_version_header": {"Passed_Checks": 3, "Failed_Checks": 0, "Skipped_Checks": 0}, + "trace_content_length": {"Passed_Checks": 3, "Failed_Checks": 0, "Skipped_Checks": 0}, + "trace_count_header": {"Passed_Checks": 3, "Failed_Checks": 0, "Skipped_Checks": 0}, + } + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + v04_reference_http_trace_payload_headers["X-Datadog-Test-Session-Token"] = "other" + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + + response = await agent.get("/test/trace_check/summary?return_all=true") + assert response.status == 200 + assert await response.json() == expected_output + + +async def test_get_trace_check_summary_partial_results( + agent, v04_reference_http_trace_payload_data, v04_reference_http_trace_payload_headers +): + expected_output = { + "trace_stall": {"Passed_Checks": 1, "Failed_Checks": 0, "Skipped_Checks": 0}, + "meta_tracer_version_header": {"Passed_Checks": 1, "Failed_Checks": 0, "Skipped_Checks": 0}, + "trace_content_length": {"Passed_Checks": 1, "Failed_Checks": 0, "Skipped_Checks": 0}, + "trace_count_header": {"Passed_Checks": 1, "Failed_Checks": 0, "Skipped_Checks": 0}, + } + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + v04_reference_http_trace_payload_headers["X-Datadog-Test-Session-Token"] = "token123" + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + response = await agent.get("/test/trace_check/summary", headers={"X-Datadog-Test-Session-Token": "token123"}) + assert response.status == 200 + assert await response.json() == expected_output + + +async def test_get_trace_check_summary_no_results( + agent, v04_reference_http_trace_payload_data, v04_reference_http_trace_payload_headers +): + expected_output = {} + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + response = await agent.get( + "/test/trace_check/summary?return_all=false", headers={"X-Datadog-Test-Session-Token": "token123"} + ) + assert response.status == 200 + assert await response.json() == expected_output + + +async def test_get_trace_check_results_and_clear( + agent, v04_reference_http_trace_payload_data, v04_reference_http_trace_payload_headers +): + expected_output = { + "trace_stall": {"Passed_Checks": 4, "Failed_Checks": 0, "Skipped_Checks": 0}, + "meta_tracer_version_header": {"Passed_Checks": 4, "Failed_Checks": 0, "Skipped_Checks": 0}, + "trace_content_length": {"Passed_Checks": 4, "Failed_Checks": 0, "Skipped_Checks": 0}, + "trace_count_header": {"Passed_Checks": 0, "Failed_Checks": 4, "Skipped_Checks": 0}, + } + v04_reference_http_trace_payload_headers["X-Datadog-Trace-Count"] = "2" + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + v04_reference_http_trace_payload_headers["X-Datadog-Test-Session-Token"] = "other" + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + v04_reference_http_trace_payload_headers["X-Datadog-Test-Session-Token"] = "other2" + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + + response = await agent.get("/test/trace_check/summary?return_all=true") + assert response.status == 200 + assert await response.json() == expected_output + + response = await agent.get("/test/trace_check/summary?return_all=true") + assert response.status == 200 + assert await response.json() == expected_output + + response = await agent.get("/test/trace_check/clear?test_session_token=other") + assert response.status == 200 + + response = await agent.get("/test/trace_check/summary?return_all=true") + assert response.status == 200 + response_json = await response.json() + assert response_json["trace_count_header"]["Failed_Checks"] == 2 + + response = await agent.get("/test/trace_check/clear?clear_all=true") + assert response.status == 200 + + response = await agent.get("/test/trace_check/summary?return_all=true") + assert response.status == 200 + response_json = await response.json() + assert response_json == {} + + +async def test_get_trace_failures_and_clear_json( + agent, v04_reference_http_trace_payload_data, v04_reference_http_trace_payload_headers +): + results = { + "trace_count_header": [ + "At request :\n At headers:\n - {'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate',\n 'Content-Length': '371',\n 'Content-Type': 'application/msgpack',\n 'Datadog-Meta-Tracer-Version': 'v0.1',\n 'Host': '127.0.0.1:.*',\n 'User-Agent': 'Python/3.11 aiohttp/3.8.4',\n 'X-Datadog-Trace-Count': '2'}\n At payload (1 traces):\n ❌ Check 'trace_count_header' failed: X-Datadog-Trace-Count value (2) does not match actual number of traces (1)\n", + "At request :\n At headers:\n - {'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate',\n 'Content-Length': '371',\n 'Content-Type': 'application/msgpack',\n 'Datadog-Meta-Tracer-Version': 'v0.1',\n 'Host': '127.0.0.1:.*',\n 'User-Agent': 'Python/3.11 aiohttp/3.8.4',\n 'X-Datadog-Test-Session-Token': 'other',\n 'X-Datadog-Trace-Count': '2'}\n At payload (1 traces):\n ❌ Check 'trace_count_header' failed: X-Datadog-Trace-Count value (2) does not match actual number of traces (1)\n", + "At request :\n At headers:\n - {'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate',\n 'Content-Length': '371',\n 'Content-Type': 'application/msgpack',\n 'Datadog-Meta-Tracer-Version': 'v0.1',\n 'Host': '127.0.0.1:.*',\n 'User-Agent': 'Python/3.11 aiohttp/3.8.4',\n 'X-Datadog-Test-Session-Token': 'other',\n 'X-Datadog-Trace-Count': '2'}\n At payload (1 traces):\n ❌ Check 'trace_count_header' failed: X-Datadog-Trace-Count value (2) does not match actual number of traces (1)\n", + "At request :\n At headers:\n - {'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate',\n 'Content-Length': '371',\n 'Content-Type': 'application/msgpack',\n 'Datadog-Meta-Tracer-Version': 'v0.1',\n 'Host': '127.0.0.1:.*',\n 'User-Agent': 'Python/3.11 aiohttp/3.8.4',\n 'X-Datadog-Test-Session-Token': 'other2',\n 'X-Datadog-Trace-Count': '2'}\n At payload (1 traces):\n ❌ Check 'trace_count_header' failed: X-Datadog-Trace-Count value (2) does not match actual number of traces (1)\n", + ] + } + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + v04_reference_http_trace_payload_headers["X-Datadog-Trace-Count"] = "2" + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + v04_reference_http_trace_payload_headers["X-Datadog-Test-Session-Token"] = "other" + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + v04_reference_http_trace_payload_headers["X-Datadog-Test-Session-Token"] = "other2" + await agent.put( + "/v0.4/traces", + headers=v04_reference_http_trace_payload_headers, + data=v04_reference_http_trace_payload_data, + ) + + response = await agent.get("/test/trace_check/failures?return_all=true&use_json=true") + assert response.status == 400 + resp = await response.json() + assert resp.keys() == results.keys() + assert len(resp["trace_count_header"]) == 4 + + response = await agent.get("/test/trace_check/clear?test_session_token=other") + assert response.status == 200 + + response = await agent.get("/test/trace_check/failures?return_all=true&use_json=true") + assert response.status == 400 + resp = await response.json() + assert resp.keys() == results.keys() + assert len(resp["trace_count_header"]) == 2 + + response = await agent.get("/test/trace_check/clear?clear_all=true") + assert response.status == 200 + + response = await agent.get("/test/trace_check/summary?return_all=true&use_json=true") + assert response.status == 200 + assert await response.json() == {} diff --git a/tests/test_checks.py b/tests/test_checks.py index 06553aa1..c9d04476 100644 --- a/tests/test_checks.py +++ b/tests/test_checks.py @@ -35,12 +35,12 @@ async def test_reference_case_insensitive( assert resp.status == 200, await resp.text() -@pytest.mark.parametrize("agent_disabled_checks", [[], ["trace_count_header"]]) +@pytest.mark.parametrize("agent_enabled_checks", [["trace_count_header"], []]) async def test_trace_count_header( agent, v04_reference_http_trace_payload_headers, v04_reference_http_trace_payload_data, - agent_disabled_checks, + agent_enabled_checks, ): del v04_reference_http_trace_payload_headers["X-Datadog-Trace-Count"] resp = await agent.put( @@ -48,17 +48,19 @@ async def test_trace_count_header( headers=v04_reference_http_trace_payload_headers, data=v04_reference_http_trace_payload_data, ) - if "trace_count_header" in agent_disabled_checks: - assert resp.status == 200, await resp.text() - else: + if "trace_count_header" in agent_enabled_checks: assert resp.status == 400, await resp.text() assert "Check 'trace_count_header' failed" in await resp.text() + else: + assert resp.status == 200, await resp.text() +@pytest.mark.parametrize("agent_enabled_checks", ["trace_count_header"]) async def test_trace_count_header_mismatch( agent, v04_reference_http_trace_payload_headers, v04_reference_http_trace_payload_data, + agent_enabled_checks, ): v04_reference_http_trace_payload_headers["X-Datadog-Trace-Count"] += "1" resp = await agent.put( @@ -70,12 +72,12 @@ async def test_trace_count_header_mismatch( assert "Check 'trace_count_header' failed" in await resp.text() -@pytest.mark.parametrize("agent_disabled_checks", [[], ["meta_tracer_version_header"]]) +@pytest.mark.parametrize("agent_enabled_checks", [["meta_tracer_version_header"], []]) async def test_meta_tracer_version_header( agent, v04_reference_http_trace_payload_headers, v04_reference_http_trace_payload_data, - agent_disabled_checks, + agent_enabled_checks, ): del v04_reference_http_trace_payload_headers["Datadog-Meta-Tracer-Version"] resp = await agent.put( @@ -83,14 +85,15 @@ async def test_meta_tracer_version_header( headers=v04_reference_http_trace_payload_headers, data=v04_reference_http_trace_payload_data, ) - if "meta_tracer_version_header" in agent_disabled_checks: - assert resp.status == 200, await resp.text() - else: + if "meta_tracer_version_header" in agent_enabled_checks: assert resp.status == 400, await resp.text() assert "Check 'meta_tracer_version_header' failed" in await resp.text() + else: + assert resp.status == 200, await resp.text() -async def test_trace_content_length(agent): +@pytest.mark.parametrize("agent_enabled_checks", ["trace_content_length"]) +async def test_trace_content_length(agent, agent_enabled_checks): # Assume a trace will be at least 100 bytes each s = span() trace = [s for _ in range(int(5e7 / 100))] @@ -99,11 +102,12 @@ async def test_trace_content_length(agent): assert "Check 'trace_content_length' failed: content length" in await resp.text() +@pytest.mark.parametrize("agent_enabled_checks", ["trace_stall"]) async def test_trace_stall( agent, v04_reference_http_trace_payload_headers, v04_reference_http_trace_payload_data, - agent_disabled_checks, + agent_enabled_checks, ): v04_reference_http_trace_payload_headers["X-Datadog-Test-Stall-Seconds"] = "0.8" start = time.monotonic_ns() diff --git a/tests/test_remoteconfig.py b/tests/test_remoteconfig.py index 81013ae1..bc3522a9 100644 --- a/tests/test_remoteconfig.py +++ b/tests/test_remoteconfig.py @@ -1,3 +1,4 @@ +import base64 import json import pytest @@ -155,3 +156,15 @@ async def test_remoteconfig_session( await _request_update_and_get_data_with_session(rc_agent, "token_1", data, {"a": "b", "c": "d"}) data = {"e": "f"} await _request_update_and_get_data_with_session(rc_agent, "token_2", data, data) + + +async def test_remoteconfig_requests(rc_agent): + resp = await rc_agent.post("/v0.7/config") + assert resp.status == 200, await resp.text() + + resp = await rc_agent.get("/test/session/requests") + content = await resp.json() + assert resp.status == 200, content + assert len(content) == 1 + assert content[0]["method"] == "POST" + assert base64.b64decode(content[0]["body"]) == b""