From 9b5992748bf368dfab3e15de2c1ffc76c3900c1b Mon Sep 17 00:00:00 2001 From: Connor Adams Date: Thu, 23 Jul 2020 15:38:38 -0400 Subject: [PATCH 1/6] Integrate Exemplars with Python SDK --- .../statistical_exemplars-checkpoint.ipynb | 359 ++++++++++++++ docs/examples/exemplars/README.rst | 40 ++ docs/examples/exemplars/semantic_exemplars.py | 68 +++ .../exemplars/statistical_exemplars.ipynb | 340 +++++++++++++ .../exemplars/statistical_exemplars.py | 132 +++++ .../sdk/metrics/export/__init__.py | 3 +- .../sdk/metrics/export/aggregate.py | 53 +- .../sdk/metrics/export/exemplars.py | 277 +++++++++++ .../src/opentelemetry/sdk/metrics/view.py | 5 +- .../tests/metrics/export/test_exemplars.py | 460 ++++++++++++++++++ 10 files changed, 1725 insertions(+), 12 deletions(-) create mode 100644 docs/examples/exemplars/.ipynb_checkpoints/statistical_exemplars-checkpoint.ipynb create mode 100644 docs/examples/exemplars/README.rst create mode 100644 docs/examples/exemplars/semantic_exemplars.py create mode 100644 docs/examples/exemplars/statistical_exemplars.ipynb create mode 100644 docs/examples/exemplars/statistical_exemplars.py create mode 100644 opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py create mode 100644 opentelemetry-sdk/tests/metrics/export/test_exemplars.py diff --git a/docs/examples/exemplars/.ipynb_checkpoints/statistical_exemplars-checkpoint.ipynb b/docs/examples/exemplars/.ipynb_checkpoints/statistical_exemplars-checkpoint.ipynb new file mode 100644 index 00000000000..deb0f27457f --- /dev/null +++ b/docs/examples/exemplars/.ipynb_checkpoints/statistical_exemplars-checkpoint.ipynb @@ -0,0 +1,359 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This example will build an exemplar sample set on a \"bytes in\" counter aggregator, which just sums up the number of bytes sent into our \"application\".\n", + "We will use these statistical exemplars to generate insights into the data that was aggregated away.\n", + "\n", + "We'll start by importing everything we will need from opentelemetry to create the metrics:" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import random\n", + "\n", + "from collections import defaultdict\n", + "\n", + "from opentelemetry import metrics\n", + "from opentelemetry.sdk.metrics import Counter, MeterProvider\n", + "from opentelemetry.sdk.metrics.export.aggregate import SumAggregator\n", + "from opentelemetry.sdk.metrics.export.controller import PushController\n", + "from opentelemetry.sdk.metrics.export.in_memory_metrics_exporter import InMemoryMetricsExporter\n", + "from opentelemetry.sdk.metrics.view import View, ViewConfig" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can then set up an in-memory metrics exporter so we can analyze the exemplar data in-service:" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Overriding current MeterProvider\n" + ] + } + ], + "source": [ + "## set up opentelemetry\n", + "\n", + "# Sets the global MeterProvider instance\n", + "metrics.set_meter_provider(MeterProvider())\n", + "\n", + "meter = metrics.get_meter(__name__)\n", + "\n", + "# Export to a python list so we can do stats with the data\n", + "exporter = InMemoryMetricsExporter()\n", + "\n", + "# instead of waiting for the controller to tick over time, we will just tick it ourselves\n", + "controller = PushController(meter, exporter, 500)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We now need to create the bytes in metric, and assign it a view (this is where we set up exemplars):" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Create the metric that we will use\n", + "bytes_counter = meter.create_metric(\n", + " name=\"bytes_counter\",\n", + " description=\"Number of bytes received by service\",\n", + " unit=\"By\",\n", + " value_type=int,\n", + " metric_type=Counter,\n", + ")\n", + "\n", + "# Every time interval we will collect 100 exemplars statistically (selected without bias)\n", + "aggregator_config = {\"num_exemplars\": 100, \"statistical_exemplars\": True}\n", + "\n", + "# Assign a Sum aggregator to `bytes_counter` that collects exemplars\n", + "counter_view = View(\n", + " bytes_counter,\n", + " SumAggregator(config=aggregator_config),\n", + " label_keys=[\"environment\"],\n", + " config=ViewConfig.LABEL_KEYS,\n", + ")\n", + "\n", + "meter.register_view(counter_view)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The last thing we need to do before we can start working with exemplars is generating a large set of data for metrics.\n", + "If the dataset is too small, we won't be able to collect a large enough subset of the input to analyze with exemplars.\n", + "\n", + "If this was a real application, the data would be generated through requests to/from the server." + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "metadata": {}, + "outputs": [], + "source": [ + "## generate the random metric data\n", + "\n", + "def unknown_customer_calls():\n", + " \"\"\"Generate customer call data to our application\"\"\"\n", + "\n", + " # set a random seed for consistency of data for example purposes\n", + " np.random.seed(1)\n", + " # Make exemplar selection consistent for example purposes\n", + " random.seed(1)\n", + "\n", + " # customer 123 is a big user, and made 1000 requests in this timeframe\n", + " requests = np.random.normal(1000, 250, 1000) # 1000 requests with average 1000 bytes, covariance 100\n", + "\n", + " for request in requests:\n", + " bytes_counter.add(int(request), {\"environment\": \"production\", \"method\": \"REST\", \"customer_id\": 123})\n", + "\n", + " # customer 247 is another big user, making fewer, but bigger requests\n", + " requests = np.random.normal(5000, 1250, 200) # 200 requests with average size of 5k bytes\n", + "\n", + " for request in requests:\n", + " bytes_counter.add(int(request), {\"environment\": \"production\", \"method\": \"REST\", \"customer_id\": 247})\n", + "\n", + " # There are many other smaller customers\n", + " for customer_id in range(250):\n", + " requests = np.random.normal(1000, 250, np.random.randint(1, 10))\n", + " method = \"REST\" if np.random.randint(2) else \"gRPC\"\n", + " for request in requests:\n", + " bytes_counter.add(int(request), {\"environment\": \"production\", \"method\": method, \"customer_id\": customer_id})\n", + "\n", + "unknown_customer_calls()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Analyzing the Exemplars" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's export our metric and collect the exemplars from the outputted aggregation:" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": {}, + "outputs": [], + "source": [ + "# Tick the controller so it sends metrics to the exporter\n", + "controller.tick()\n", + "\n", + "# collect metrics from our exporter\n", + "metric_data = exporter.get_exported_metrics()\n", + "\n", + "# get the exemplars from the bytes_in counter aggregator\n", + "aggregator = metric_data[0].aggregator\n", + "exemplars = aggregator.checkpoint_exemplars" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "One of the key values of exemplars is its ability to handle dropped labels (labels that are too high cardinality to create a new metric record for each value). \n", + "In our application, we drop the \"customer_id\" label since there is an unbounded number of possible labels. However, with exemplars, we can still estimate stats related\n", + "to the customer ids, for example the relative size of each customer:" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUAAAADnCAYAAABv/o9IAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO2deXhU5dn/P/dkY09kDYsaQUFxQ8Wl4optbUVbtCquVbvY9tXWJbYGa+3U+ra0atFfrVX72la7GbSuE9dWUURlUcGwIxAEQUXBsGabuX9/PGd0SCZkITNnZs79ua5zZc5ztu+ZnPmeZ70fUVUMwzCCSMhvAYZhGH5hBmgYRmAxAzQMI7CYARqGEVjMAA3DCCxmgIZhBBYzQMMwAosZoGEYgcUM0DCMwGIGaBhGYDEDNAwjsJgBGoYRWMwADcMILGaAhmEEFjNAwzACixmgYRiBxQzQMIzAYgZoGEZgMQM0DCOwmAEahhFYzAANwwgsZoCGYQQWM0DDMAKLGaBhGIHFDNAwUoyIlIrIQyKyQkTeFJGnRWRkJ84zUURGp0JjBzTcKiJLROQdEXlMREqabd9LRLaKyHXe+igRmZewbBaRq/1R3xIzQMNIISIiwGPAdFUdoapHAJOBQZ043UQgrQYoInnNkl4ADlLVQ4BluHtJ5HfAM/EVVV2qqmNUdQxwBLAd931kBGaAhpFaTgYaVfWeeIKqzlfVGSJykohE4ukicpeIXOp9niIii7yc1m0icizwNeBWLyc1QkTGiMgbCbmxPbxjp4vIVBGZKyKLReRIEXlURJaLyC0J17tIRGZ757s3bnZeDu52EZkPfCHxZlT1eVVt8lbfAIYlnG8isApY2Mp3cQqwQlVXd/K77HLy/RZgGGUVVUOB4bgf01CgFBgIDAB6A0UJS6H3NwpsS7J8DKwB1np/1wBra6ZM2JG+O9qJg4A3O3KAiPQDzgT2V1UVkRJV/VREngQiqvqIt987wA9V9WURuRn4ORAvXjao6lgRuQp4Apf72gisEJGpuO93EjBOVRtF5G7gQuBBoCcwS1XL25D6LaDS09ILuB74EnBdK/ufB/yrI99FqjEDNNJGWUVVP+Aw4MBmS3Earr0GmN9sebdmyoRYqq/dCWqBOuB+L4cYab6DiBQDJar6spf0APBwwi5Pen+rgYWqut47biWwJ3AczhTnuFI63YGPvGOiwL93JVBEfgo0Af/wksLAVFXd6p2v+f6FuBxs8yKzr5gBGimjrKJqGHCCtxwPHAC0/HWkhz295fSEtG1lFVWzgBeBl4DZNVMmNCU7eDdYCJzdyrYmdq6G6gagqk0ichSuyHg2cCUwvoPXrff+xhI+x9fzcf+HB1Q1mSHVqWq0tRN7xfTTgVNUVb3ko4GzReS3QAkQE5E6Vb3L2/5V4C1V/bCD95FSzACNLqOsoqoQ90OdCJwKlPkqqG164vTGzWVrWUXVq8B/gcdrpkx4twuu8SLwKxG5XFXvAxCRQ3C53hpgtIgU4XJgpwCvesXJHqr6tIjMBFZ659qCqxJAVWtFZJOIHK+qM4CLgZdpP/8FnhCRqar6kYj0BXq3VT8nIl8BfgKcqKrb4+mqenzCPmFga4L5AZxPhhV/wQzQ2E3KKqp6AafhTO800lCcTSG9gK94y61lFVXzgEeAh2umTFjWmRN6dXhnAneIyPW4om0NcLWqrhGRacACXOPB295hvXHm1A2XU7vWS38I+JOI/AiXM7wEuEdEeuBM8rIO6FokIjcCz4tICGgErgDaaqC4C1cH+4JX1H1DVb+/qwNEpCeubvB77dWXLuTzHKxhtI+yiioBTgK+A5yFV3TLcd7BGdCfa6ZMyKhinNF5zACNdlNWUTUIuBRnfPv6q8Y3GnH92P5YM2XCdJ+1GLuJGaDRJmUVVYfjujichVWbJLIYuAf4S82UCVv8FmN0HDNAo1XKKqpOAG7ANWgYrfMp8HvgzpopEz7xW4zRfswAjRaUVVSdhjO+cX5ryTK24Yzw1popEzb6LcZoGzNA4zO8ou7vgBP91pLlbMF9j7+tmTJhe1s7G/5hBmjEh6L9CteXzK+OyrnIGuDHNVMmVPotxEiOGWCAKauo6g5U4MZu9vBZTi7zCvCjmikT5vstxNgZM8CAUlZRdSLwfwS3O0u6iQL3Addbi3HmYAYYMLyRG1OA/8GKu36wGrisZsqEl/wWYpgBBoqyiqovAn8i88fo5jqKay2u8DFMl4EZYCDwghTcCvzIby3GTiwFLqmZMmGW30KCihlgjlNWUTUcmIaL/WZkHk24luI7/BYSRMwAc5lw8YQt2j18ZP3dB9ZR1N1vOcYu+SfwXes3mF7MAHORcLEAP8NF6ZWFsb1fndDw6+P8FWW0g3eAM2umTFjZ5p5Gl2CTIuUa4eI+uGglv8Br5T0wtPq4q/L+/aqvuoz2cAgwt6yiysZepwnLAeYS4eJBwH9wE/HshCo7JjbcvGa+7tvh+WiNtNMEfLtmyoQH/RaS61gOMFcIFw/FhURvYX4AInR/uPDmwl5s35xeYUYnyAf+WlZR9WO/heQ6lgPMBcLFe+Pmnhje1q41sUGvn9Qw9Qtt7WdkDLfjWonth5oCLAeY7YSL98WNNW3T/ADKQh9+4Rf5f+3I5DmGv5QDD5ZVVOX5LSQXMQPMZsLFB+CKvXt15LBv5j1/7LjQggWpEWWkgIuAB8oqquz32sVYEThbCRcfArwADOzM4U0aWndU/R+KNlLcr2uFGSnkT8D3rDjcddgbJRsJFx+Bm8i7U+YHkC+xIc8WTV4lxGJdJ8xIMd8FbMRIF2IGmG24Or/ngb67e6qB8unYOwv+MGP3RRlp5EdlFVW/8ltErmAGmE24Ts5P0gXmF+eM0OvHnxaa9VZXnc9IC5PLKqossEUXYHWA2UK4OIQzvwldfeqYysfH1d/ZuI7+g7v63EbKiAITaqZMeM5vIdmM5QCzh1+TAvMDCIn2f6aoYkMe0aZUnN9ICXlAZVlF1QF+C8lmzACzgXDxhcBPUnmJYtl+yF8Kfjszldcwupxi4Kmyiiprye8kZoCZTrj4SNzcHSnnhLzqEy/I+88b6biW0WWMAB4pq6jK91tINmIGmMmEiwcDjwPd0nXJ/83/8wHDZd3qdF3P6BJOAn7ut4hsxBpBMpVwcRFulMfR6b70di1aclj9vWX1FKbNeI3dJgaMr5kywYY5dgDLAWYuN+GD+QH0kPr9pxXePMePaxudJgT8vayiqsu6SAUBM8BMxA1zS2mjR1scGlp5/BV5j1sQ1exiGG64nNFOzAAzjXBxHnA/Liacr1yXP+3wg2Xlcr91GB3irLKKqu/6LSJbMAPMPK4GxvotAkCEHo8UhvN7smOL31qMDnFbWUXVUL9FZANmgJlEuHg4cLPfMhIpkqZ9niq8sdpvHUaH6APc5beIbMAMMLO4D+jht4jmDA+tP/Zn+X+z1sXsYmJZRdVEv0VkOmaAmUK4+DLgFL9ltMa38p75wjGhhQv91mF0iDvLKqoy7oWaSZgBZgLh4lLc3A8ZiwiFfy/4dUkJWzb5rcVoN3sBN/otIpMxA8wMbgX28FtEW+RLbOizRRXvgvWezyKuKauoGua3iEzFDNBvwsUHAhf4LaO9lMqmI6cW3P2K3zqMdtMN16neSIIZoP/8giz7P0wMzTzuy6E5b/utw2g3l5VVVI30W0QmklU/vJwjXHwYcJbfMjqKCHl/LLhjWCkbP/Rbi9Eu8oFf+i0iEzED9JebAfFbRGfIEx3wTFHFByFiUb+1GO3inLKKqsP8FpFpmAH6Rbj4UOB0v2XsDnvI1kPvL7jVxgtnB4KFzGqBGaB/+BrsoKs4KTT/hHPyps/2W4fRLs4oq6ga4beITMIM0A/CxfsAk/yW0RWIIL/Jv29kmaxf47cWo01CwFV+i8gkzAD9oRw3qU1OEBJKqgp/urWQxnq/tRhtcllZRVWJ3yIyBTPAdBMuLga+5beMrqan1B3wUOEvZ/mtw2iTXoCFy/IwA0w/3wC6+y0iFRweeveE7+U9ZTPLZT4/LKuoypkSyO5gBph+LvRbQCqpyP/XmNFSs8JvHcYu2RP4ot8iMgEzwHQSLh6Cm8ErZxGh52OFP6cHddv81mLskov8FpAJmAGml/MIwHdeJI0jnii8cZ7fOoxdcmZZRVVPv0X4Tc7/GDOMrAl6sLvsF1o3bnL+P2f4rcNolZ7AmX6L8BszwHQRLh4FHOG3jHRyeV7kqKNk8SK/dRitcrHfAvzGDDB9BCb3F0eEon8U/qpPMVs/9VuLkZRTyiqq+vstwk/MANNH4AwQoECiw54umrzMgqhmJHnAqX6L8BMzwHQQLj4S2NdvGX4xVD456rf599qkSpnJV/0W4CdmgOkh0G9ZgHPyXjn+lNCb1jKceZxaVlEVWB8I7I2nmXF+C/AbEfLuK/jdkEFs/MhvLcZO9AfG+i3CL8wAU024WIBj/JaRCeSJDnymaPI6C6KacQS2GGwGmHoOBCz6hkdf2TLmvoLfWRDVzGK83wL8wgww9Rzrt4BM45TQWyd8I/TKHL91GJ9xRFCDI5gBph4zwGaIILcW3LPvXvLhWr+1GIAbFXKg3yL8wAww9QS+ASQZIWGPpwsnby6gqcFvLQYAR/ktwA/MAFNJuHggAe7/1xa9pG70Pwv/9w2/dRiAGaCRAqz42wZHhpae8O28p1/zW4dhBmh0Pdb9pR3cmP/3Q/aX91b6rSPgjA5iQ4gZYGqxKQjbgQi9Hi/8Waw79dv91hJgCoC9/RaRbswAU8swvwVkC92kcd/HC3/2tt86Ak7g6qvNAFOLGWAHGBVaO+7H+ZUWRNU/zACNLiJcnAeU+i0j2/ifvCeOOlyWLfFbR0AxAzS6jFIg328R2YYIRZWFv+zRh621fmsJIGaARpdhxd9OUiDRvZ4uusFygelniN8C0o0ZYOowA9wNhsnHR/86/08WRDW99PVbQLoxA0wdZoC7yXl5L407KTTvHb91BAgzQKPLMAPcTUTIv7/g1oH9+XSD31oCQp+gdYY2A0wdgatPSQV5oqXPFlWsEWIxv7UEAAH28FtEOjEDTB3d/BaQK/SXzYf/seDOV/zWERACVQxu0wBFpFREHhKRFSLypog8LSIjO3ohEZkoIqM7J7NrEJFzRGShiMREZGxC+pe8e6v2/o5P2PasiMz3jrtHRNpbRAhUUSLVnBqac+LXQzPn+q0jAPTwW0A62aUBiogAjwHTVXWEqh4BTAYGdeJaE4G0GmASs1oAnAU0z018DJyhqgcDlwB/S9h2rqoeChwEDADOaeflzQC7EBFkasHdw4fJhnV+a8lxAvXctpUDPBloVNV74gmqOl9VZ4jISSISiaeLyF0icqn3eYqILBKRd0TkNhE5FvgacKuIzBORESIyRkTe8PZ5TET28I6dLiJTRWSuiCwWkSNF5FERWS4ityRc7yIRme2d79642YnIVhG5XUTmA19IvBlVXayqS5vfpKq+rarxH9ZCoLuIFHnbNnvp+UAh0N4JvgP1IKWDkGjfpwsrNubT1Oi3lhwmUM9tWyMVDgLe7MgJRaQfcCawv6qqiJSo6qci8iQQUdVHvP3eAX6oqi+LyM3Az4GrvdM0qOpYEbkKeAI4AtgIrBCRqcBAYBIwTlUbReRu4ELgQVx471mqWt4R3Ql8A3hLVesT7uk5XLy0Z4BH2nmeQD1I6aI+Gu39g1V/Wq0Bq6tKFw1SqDDBbxlpIxVDtWqBOuB+L4cYab6DiBQDJaoa7+j6APBwwi5Pen+rgYWqut47biWwJ3AczhTnuFI63YH4fLNR4N+dES4iBwK/Ab6cmK6qp4pIN+AfuBm0XmjH6Zo6o8FonUZofGT9kbOBc8RvMTlKkTYE6rltqwi8EGc0yWhqdnw3AFVtwuWWHgFOB57thK547iuW8Dm+no9rrn9AVcd4yyhVDXv71Klqh+edFZFhuPrOb6rqiubbVbUOlxv9egfvwegi/tjY9z8NW4v7+a0jxwnUHC1tGeCLQJGIXB5PEJFDROR4YDUwWkSKRKQEOMXb3gsoVtWngWuAQ71DtwC9AVS1FtjknQfgYqAjw57+C5wtIgO9a/YVkU4Hc/T0VwEVqjozIb2XiAz2PufjygbtHaNqBtiFPNar5+yey09uamLLIX5ryXECVb+6SwNUVcXV533R6wazEPg18IGqrgGm4VpWpwHxYJa9gYhXx/cqcK2X/hDwYxF5W0RG4Fpbb/X2GwPc3F7RqroIuBF43jv+BWBwW8eJyJkishbXOFLl1e0BXImLhHGT16gyzzPXnsCT3jXm4YrZ9yQ7dxLMALuI9/Pz1r34YUljTAbnAf391pPjBMoAxXmc0eWEi+8Bvue3jGynCZrGDxtSfePf9t28YGj/PI1uOM5vTTnOwPLKSGCGHtpIkNTxgd8CcoGrB/WfedqrsnXd0DPR6IaD/daT4+wIkvmBGWAqqfFbQLbzTM8eb86RboeeMr9kQG1hbQFQ7LemHGeN3wLSjRlg6ljtt4Bs5qO8vI+uH9Bvr6uf0Pkr9p30QbT+Lb8lBYHAPbNmgKmjxm8B2UoMYmcPLX1/8Ea2H7i64OCP9xg5SmObDm37SGM3ec9vAenGDDB1rMF1yjY6yPUD+s3YlJd32OTK6Aerhn+tOtqwsAbXIm+kFssBGl1EuLYJsIH7HWR6j+7znu3Z47ixy2LzBtbK2PeHHL9PtP4te07Tgxmg0aXU+C0gm/gkFPr4qoH9SwXkR0/Gur0/5Lg5MYmWaGzzGL+1BQQrAhtdSuDeqJ1FQc8ZWloTEyk9+9XYzG6N7L9yn6/1jNa9XQ0U+a0vIATueTUDTC01fgvIFm7q3/eVDfn5Y7s16NZvzNRRG0tGLWgq6HFwU/18M7/0EAPW+i0i3ZgBppZVfgvIBl7r1q368V49xwH88MnY3JAycOnI87ZobPtGdJsVf9PD2vLKSKCGwYEZYKqZ7beATKc2FPr0f0oH9EUkf9AmXTt2uR6zvXv/tTu6Dziqqe7NhaQmZJvRktf8FuAHZoCpZSHwid8iMplzhpQujYoMBZg8LfqeQLclIy9cgUhetGFBb7/1BYhATjplBphKwrUKzPBbRqbyq757vLK+IP9ogENWxqqHbOTYxrzutZ+W7HeExrZ8iO6w0FfpwwzQSAmBfLDa4s2iosX/6tPrGABUtfyxWB7Au/ue+TYivZrq5izFns908TGwyG8RfmAPWOoxA2zGFpHN3xk8sAcihQATX9fXujcwOiahxvWlx4wCiDYssjk/0ser5ZWRQMbFMwNMPfOAzW3uFSDOH1q6oMmL4F3YqNsnvRIbDrBm2PjZSN7gWHTTWrThIH9VBorAvqTNAFNNuDZKQFvYknHHHsUzVhcUHBtfvyISm5OnLpp3TdlpfQGidbNbzMlipBQzQCOlBPYBS2RBYeHy+4v7HBlf71er649ZokcCbOh3yLxoXtEBANGGpaV+aQwgm3GllEBiBpgeAm+A20W2XTJkUB5uelEAJk+LrhToAbBsv3MbAGLRDaugaZRfOgPIa+WVkcBGLTIDTA9zCHg94EVDBs1rEBkeXx+9Whft+THHAmztOWRVfVHJkQBNO2YFbkC+z7zotwA/MQNMB+HaBnae+D1Q3FvS59XlhYXjEtN+/O9oVNz8ziwZdcEavBnuY40rhvmhMaDEcLM1BhYzwPTxoN8C/GBJYcGKu0qKD0tMO2127LWe9RwM0FDQ65PNvcuOBIg1rV8G0RF+6AwoL5ZXRgI3D0giZoDpYwYBC45QJ7LjosGDYoh8Fs25oEnrLn4ptld8fdl+51Yj0h2gqe6N9X7oDDAP+C3Ab8wA04UbFvc3v2Wkk8sGD5xbHwrtl5j2vadjb+TFGAYQk/z6jwYcdmB8W6xx9fDm5zBSxhbgUb9F+I0ZYHoJTDH4wT69X1tQVHR8YtoeW/Sj4xfq2Ph6zd6nzkZCAwCije8thNie6dYZYB4ur4xs91uE35gBppNw7Qpgpt8yUs3KgvzVt/YtaTGJ+fUPR5cK9AIXAfq9Pb80JL4tWjfr43RqNKz4C2aAfpDTucAGaDhvSOl2RHYKZTVyrS7Z50M+awn+cNCRb8byCkYAqGos1rTW+v6lj5VYlCLADNAPKoE6v0WkistLB76xIxQ6oHn69Y9E6yTheVs+4qy8+OdY48pqUBv9kT4eDGrwg+aYAaabcG0t8ITfMlLBw717znqze7cTmqd/6a3YrN47+Cy0fW3vsqWNhX0+6xrTVDc70J3E04yS46WQjmAG6A93+C2gq1mTn7/2l/36tijG5ke14bIXYjvl7paMumBD/LNqrEmj61vkGI2U8Xx5ZSRQ3bF2hRmgH4Rr3yCHhiA1QdO5Q0s/VZGS5tu+/Vzs9fwYe8fX64pKPtjWc8jR8fVY47L5QP80STXgl34LyCTMAP3jFr8FdBVXDhowc2so1CJ+X/E2/Xj8fN1pFMjS/c5bgkhBfL2pbs6OdGg0AHipvDKS870QOoIZoF+Ea18iB+IERnr2mDszSb0fwE8eiS4W6BNfj4YKt33S76DP6gJVow0a3dCiu4yRMiz31wwzQH+52W8Bu8P6vLz1Nwzot088kEEiI9br8n3XcWxi2sp9zphLQjE52rBoHlCcBqkGzCyvjLzkt4hMwwzQT8K1zwEv+y2jM0Qhes7Q0o9UpF+y7ddPi24R+KyriyKxtUNPKNvpHHVzm1Is0/icG/wWkImYAfpPhd8COkP5wP6v1ublHZps20nzY3NKtnN4Ytq6weNmayj/s8YQ1cbtGtuU9Hijy6kqr4wEPihvMswA/ca1CGdVv8AXenR/6789uh+fbFteVBu/+1ysRavuiuFf75G4Hq2vng/0bL6f0eXEyNKXbDowA8wMbgCyIiz5hrzQhusG9h+GSNJn55L/xF4viLJPYtrGkpELmwp67DTJebT+LXv20sOD5ZWRBX6LyFTsIcwEwrWLgKl+y2iLGMTOHjp4TUxkYLLtvbbrplPf0kOapy8def5OIz1U6zdrbPOY5vsZXc5W4Ca/RWQyZoCZw8+ApX6L2BU3DOg3Y2Ne3uGtbf/xo9FqgZ06Q2/v3n/tju4DjkpMi9a9XQ0UpUim8TnlQY/43BZmgJlCuLYO+BauzibjmNG92ztVPXsc19r2vT/UFfuv2bnbC8CSkResQCQvMa2pfr6ZX+p5rrwycp/fIjIdM8BMIlz7GnCn3zKasykU2njloAEDmhtZIpOnRTcJ5CemNeZ1r/20ZOROOUaNbd+IbrPib2qpBb7jt4hswAww8/gpsNxvEXEU9Jyhpe/GRAa3ts+4hbG5fbcytnn6ihFnzmseF7Cp7s2FNDNKo8u5qrwystZvEdmAGWCmEa7dQQYVhX/Zb49XPszPP6q17aGYRn9QFdujeXpMQk3rBh+zX/P0aMOC3s3TjC7lqfLKiEV7bidmgJlIuPZV4C6/ZczuVrTw4d69WtTrJXLhS7GZhVFaTGW5Ztj4WUjekMQ0jW35EN3RopXY6DI2Apf7LSKbMAPMXCYDK/y6eG1Iai8vHVicGLmlOT13aO2E2Xpgsm01e3+1b/O0pro5S7FnLpVcUV4Z+cBvEdmEPYyZSrh2O3ARUO/H5c8bUro4KjJsV/tc83hsXghajAXe0O/gedH8bi2CnEYbFrUwRaPLeKS8MvKQ3yKyDTPATMYNk/t2ui97a9+SV9YWFByzq32GbdCag2s0afF42X7nNjRPi0U3rUUbWsQMNLqExcB3/RaRjZgBZjrh2n+QxuCp84oKlzzYp/fRbe03eVr0Q4EWxeOtPYesqi/a48jm6dG62b4V53OcD4HTyisjn/otJBsxA8wObgKmpfoiW0W2fGvwoG6I7LKj8lFLY28P2ExSk1wy6oK1yeIDRhuW2qxvXc924IzyykiN30KyFTPAbCBcq8ClwOxUXuaCIaXVjSJlu9pHVGM/fDLWI9m2hoJen2zuXdaiP2AsumEVNNm8v11LDLiwvDIyx28h2YwZYLbg+gd+HUjJ2M7flxS/uqqwYJddXgAmvRKbWdREUjNbtu851Yh0b57etGPWe12h0diJ68orI4/7LSLbMQPMJsK1HwBn4KJ8dBmLCgveva+kzxFt7de9XrdMfF33T7YtJvn1Hw08PGmXmFjjil22Jhsd5q7yykjGRw/KBswAs41w7XzgArpopMgOke0XDy6VZDm35lz1ROytkDIg2baavb88Gwm12BZrWr8Moi06Shud5ingar9F5ApmgNlIuPYp4EKgcXdP9c3Bg95qCEmbBlW6UdcctkJb7Rrz3p5fSjpWuKnujfW7o8/YiTeB88srI1kRPDcbMAPMVsK1DwFnAnWdPcX9xX1mLikqbDXEVSKTp0Xfl1Zi+H0wcOzcWF7hvsm2xRpXD++sPmMnZgCnlFdGtvktJJcwA8xmwrVVwFeBLR099N2CglV37FHcrkmJDns3Nn/wJlrN/S3f9xtJn6No43sLIbZnR7UZLXgC+HJ5ZaTWbyG5hhlgthOunQ6cAnzS3kPqhbrzhwxqQKRXmzur6tWPxwpb27y5997LGgv7JI0SHa2b9XF7NRmtcj/wjfLKSKdz+kbrmAHmAuHaOcCJQLvq275dOmh2XSjUrn55Z72mM7s30mJcb5wloy74KFm6qsZiTWut79/uMaW8MvIdq/NLHWaAuUK4diFwHLBqV7v9s3ev1+d3KzqhPacsatBt58yIJa3bA6grLPlwa8+hSWMFxhpXVoPa6I/OocA15ZWRyX4LyXXMAHOJcO1K4HhgUbLNq/Pz1/y63x6j23u6K5+KzclTWjWxpSMnLUEkafG4qW725mTpRps0AheXV0bu8FtIEDADzDXCte8DxwCPJCY3QuOkoaVbECluz2kGfKrrjlqmrQZFiIYKt3/S7+CkwU1VY00aXd9qsdlola3A18orI//wW0hQMAPMRcK1WwjXngNcCzQB/KB04GvbQqF25/4mT4uuEmi1c/TKfU6fi0iLUPgAscZl84H+HVQddOYDR5RXRp71W0iQMAPMZcK1U4GTK3v3enZW924ntvewg1fFFgz9pOUUl3EUia0deuLerW1vqpuzo4NKg84fgWPKKyPL/BYSNGx2rlwnXPvqLQo/6FkAAA3pSURBVA8c/E3gr8Bpbe6vqtc+FkOgRUirOOsGHztHQ/lJi8eq0QaNbji4s3IDxmbgO+WVkYf9FhJULAcYAKovqd4AnA5cBewyd3bGLH29Zz27jNy8YvjXWy0aRxsWzQPaVc8YcF4EDjbz8xczwIBQfUm1Vl9S/f+AQ4GZyfYpbNQdF0yPle3qPJtK9lvYVNCz1ZndonVzm3ZLaO6zA/gR8MXyyoiFCfMZM8CAUX1J9XLgBOBKmg2h+/7Tsdl5ypCkB3osGXl+q91bVBu3a2xTu4bXBZTXgTHllZHfl1dG1G8xhhlgIKm+pDpWfUn1H4DRwMMA/TbrB+MWaYtozons6NZ/7Y7uA1udJD1aXz0f6NmlYnODlcD5wLj2NnSIyDAReUJElovIChG5U7w+lyIyRkROS9g3LCLXdaVgEblORJaIyDwRmSMi3+zEOcpE5IKu1NXVmAEGmOpLqtdWX1J9LnDyNY9FX5Y2zGvJqAtWIJLX2vZo/Vv2PO3MJ8A1wAHllZGH2pvrEzenyqPA46q6HzAS6AX8r7fLGNrToNVOpNn/VES+D3wJOEpVx+DGmrfaKLYLynCxK9OGiHSoYTfnHlgRKRWRh7y35psi8rSIjOzEeSaKSLv7zaUCETlHRBaKSExExiakH+W9meeJyHwROTNh21dEZKmIvCsiFe25TvUl1dNHruMC4FvA+8n2aczrXrupZGTSoAcAqvWbNbZ5TPvvLqepA34DjCivjNxRXhlpMU1oG4wH6lT1LwCqGsUZ6bdEpA9wMzDJ+/9P8o4ZLSLTRWSliPwofiIRuUhEZnv73hs3OxHZKiK3i8h84AvNrn8D8ANV3exdf7OqPuAdVyMi/b3PY0Vkuvf5xIRn8m0R6Q1MAY730q4RkW4i8hcRqfb2Odk79lIReVxEXvDOf6WIXOvt84aI9PX2GyEiz3q/6xkisr+X/lcRuUdEZgG/bUVLUnLKAL0352PAdFUdoapHAJOBQZ043URcETFtNH8TAwuAs4BXkqSP9d7OXwHuFZF87/g/4EJkjQbOb6+JH7BkceyAJYv/AuyL+7F9mLh9xYiJ89jFgxSte7uaVuIFBogY8AAwsrwyUrEb4asOxAU//QzPjN7D5apuAipVdYyqVnq77A+cChwF/FxECkTkAGASMM57VqK4QLrgcvuzVPVQVX01fh3PYHur6soOar4OuMK7zvG4xp4KYIancypwhbsVPRhXJfCAiHTzjj8I96wficvpblfVw3D1pvHi933AD73f9XXA3QnXHwYcq6rXtqIlKTllgMDJQKOq3hNPUNX5qjpDRE4SkUg8XUTuEpFLvc9TRGSRiLwjIreJyLHA14BbvbfICK/e5Q1vn8fEGwXhvXWnishcEVksIkeKyKNe3c0tCdfr8JtYVRer6tLmN6mq21U13traDTd4HtzD/66qrlTVBuAh3ERK7eaAJYvrDliy+A5gH9xIkrUxCTWtG/yF/XZ1XFP9/CCbnwJPA4eVV0YuLa+MpGTiqjaoUtV6Vf0Y+Aj30j8FOAKYIyLzvPV4gNoo8O8uvP5M4Hde7rMk4flM5Djg7wCqugRYjSveA7ykqltUdQNQiwv9D1ANlIkL3XYs8LB3L/cCiVHIH/Zyyu3VAuReR+iDaPbmbAsR6YeLrLy/qqqIlKjqpyLyJBBR1Ue8/d7BvX1eFpGbgZ/z+dwMDao6VkSuwgWvPALYCKwQkanAQD5/EzeKyN24N/GDfP4mLu+g7qOBPwN7AxerapOIDGXnWePWQvL5e9vigCWLdwBTF+9/wF0fDRw7Ecm7AZK3EGts+0Z0WxCLvxuAvwD3lVdGunLi90XA2YkJXs5sL+BdIFlVRH3C5yjuty3AA6qaLKpMXYJhfIaqbvZeysNbyQU28XnGqVvCcVNEpApXNzlTRE5t9e6Sk6g/lrAe8+4lBHzq5eqS8Vmk7GRaPMNtQa7lADtDLa7O5n4ROQs32fROiAsgUKKqL3tJD+C6ksR50vtbDSxU1fWqWo9r/duTFLyJVXWWqh6IKzJMTihKdCkHLFncePLLDzx8xT3jD8PFHHwI2KlOq6nuzYXk3su0NRR4CTgPGFZeGbm+i80P4L9AD/FaXr3Swu3AX1V1O677UqvVEc3Oc7aIDPTO01dEWh3CmMCvgT94pouI9JLPW4FrcM8ywDfiB4jICFWtVtXfAHNwRfLmOmfgFcHF1cvvBbQo4STDqwJYJSLneMeLiCTtctWKlqTkmgEu5PN/TnM+e3OJSCmunuy3wCxcINHXcaMl2j0YXUQmAj3Y+W3V/E2W+CYe4y2jVDXs7ZP0TdwBDsLV923HPWx7etouxFXEn+UVu2MiMsbbdr5XEf2OV6ncrsAFV9wz/pUr7hl/PjAUV084HyDasKA9P8Zs52PgNmBUeWVkfHllpLITjRvtQlUVVyo5R0SWA8twL+kbvF1ewjV6JDaCJDvPIuBG4HmvBPMCOxcbW+OP3jXmiMgCnHHFZyH8BXCniMzFvbzjXC0iC7zrNALPAO8AUXENddfg6uxCIlINVAKXehmF9nIh8G2vumghrVfvJNOSFHHfdW7gNYK8Adyvqvd5aYfghmbV4P6Ro7y/w3GVpY8AY3H/zHeAlaraT0R+D7wVb4nzvvQrvfrEsHfOPXA5sEtUda6InARcp6qne8dM966xHVc0HqeqH4lr1eqtqqtFZKuq7jI0ffw83jXycG/ONV6x9xTgn7ii0bXAv3A5zPdxb78LcMb/uKqOENdNYB0wWlU/FpHf4iqcwx3/xuEP339xdN2m352NK7Ll2hjgbcB04B/Ao+WVkY78WI0sIKeKLV4d3pnAHSJyPe6tWQNcraprRGQasAJX7/aSd1hvXE6wG66vVbwO7SHgKc/sxuPqFp/zTHYtbiTFv3Aho/4hrmPqvsA4782zgs+/37txb/EaEQnhojZ/ICLDgM8CiorIRbhhUoW4nOnzwJ24HNfLIhLF1WvsA1SISCPuzfw9XH1k1NP1HJAH/FlVF4rIr7z7AZcbFaCniHwC9MGZZ6e44p7xi2D8zcDNt086fT9cS95ZuJdKtpUwYsBc3Pf+AvB6eWVkt6ceNTKXnMoBtgevZWgfVb0mybaT2DkHdxfuB/EU8BotG0r+yq4bSvqo6tVeDm6Wql7vNZRcT0JDCW587kCcEZ+V0FDyhqo+KCIKTFLVabu4r+me9rlJtq0Avq6qC7z1s3ENKNuA5cDJu1kMb8Htk07vjcsdH52wZGKI/JU4s3sBeLG8MrLJZz1GGsmpHGBX4dUR3oGrJzwd14IbwzWURIBIkmOSNZQ8LyL3eestGkq84+INJcfxeUMJuJxlfMKhTndZEJG/43KQ//SM8LvAD3A5xatwfc7eE5GLVfVF75hC4C7gJO++f6qqHbp+eWVkCy7iyYvxtNsnnb43Oxvi4ewi6GoXE8PlvBd5y0JgZnllpKP93YwcIogGuJBmXQwSiDeUPIYzsK3Aq8DbQD9cMflsnHmMb8e1Svi8M3V7G0ra3WUhGSKS12zf7sAtqnqLiPwG15oIzojPwBXbbwH+hjNKgJ8CH6nqSK/I3rc9126L8srIalzfr2kAt086PR/Xjae0jWUQUOCdRnEV2/W41uiGhM/1uGqP93FVH/FlFbC8vDJigVqNnVHVQC04o5kFXJ6Qdgiux/iewAe4RpIS3A/nUlzd4EBcjuhZ4BPvuHeA//M+T8H9+N7FtRbe762vwhnp13FjOBfhAmE+hmtEmY6rX3zA238Zruj4lHfsLcBW7xoXAbOBeEfQPC99K67uchlwXMJ9hXBmMNxbPxM3xnQ9MMBL+yXOFDcCRV7aGqCn3/+r+HLbuRPktnMn9Lzt3AkFfmuxJbeWwNUBAojIEFwR9wh2bihZLiL/8dLn4IzlSVyjwhO4XOAAXD3fAyLyCG70xic4kzwLuMf7/C6u9fdRXI7xOly92/24geZv4RogxuAaYv7jpd2Oy/Es4vOGlhKcKbaoI8T1tXoUl3vdCMxT1VO9+zwJmKKqx3jrT+G6H/TCFX8bcTmyfwMXqeoXRaQElzt8GGf4K3Ct3zsNjTOMnMBvB860BdcKO7WVbSfhGj3i63fhcoj5uD5xf8aZYKG3/a/A2d7nYuC9hGNH4LrZgMsFjvM+jwdeSNjvFZxJXonrvjLPW5YCYW+fJrzc4C7u66e4XKc0Sz8QZ3IjvPX+uGJmXPe1wN/8/r/YYksqlmzrppAO2tWZ2qMbgLqxhkfh+hR2qDN1AinrTC1uzPPpwIWqqgnpw3Cm+E1VjY9m+ITPc67gcoKtRoExjGzGDLAlLwJFInJ5PEFEDhGR43HFxdEiUuQVFU/xtvcCilX1adwIifgQnc+GAqlqLbDJOw/AxUC8xbg9dGpYk4h8BfgJ8DV1w6ji6SVAFVChqp+FyPcM8ilcbhfvHpNOtG4Y2U4QW4F3iWq7OlMvwDVQvO0d1ht4whuPK7hiI7jOx3/y+h6eDVwC3CMiPXD9zy7rgK5FIhIf1hTC1d9dgTPlXXEXLkzVC173mjdU9fu4IvW+wE0icpO375dV9SNcP8W/icgduAH/7dZpGNlEIBtBDMMwwIrAhmEEGDNAwzACixmgYRiBxQzQMIzAYgZoGEZgMQM0DCOwmAEahhFYzAANwwgsZoCGYQQWM0DDMAKLGaBhGIHFDNAwjMBiBmgYRmAxAzQMI7CYARqGEVjMAA3DCCxmgIZhBBYzQMMwAosZoGEYgcUM0DCMwGIGaBhGYDEDNAwjsPx/V3+7gKAhjRMAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Sum up the total bytes in per customer from all of the exemplars collected\n", + "customer_bytes_map = defaultdict(int)\n", + "for exemplar in exemplars:\n", + " customer_bytes_map[exemplar.dropped_labels] += exemplar.value\n", + "\n", + "\n", + "customer_bytes_list = sorted(list(customer_bytes_map.items()), key=lambda t: t[1], reverse=True)\n", + "\n", + "# Save our top 5 customers and sum all of the rest into \"Others\".\n", + "top_5_customers = [(\"Customer {}\".format(dict(val[0])[\"customer_id\"]), val[1]) for val in customer_bytes_list[:5]] + [(\"Other Customers\", sum([val[1] for val in customer_bytes_list[5:]]))]\n", + "\n", + "# unzip the data into X (sizes of each customer's contribution) and labels\n", + "labels, X = zip(*top_5_customers)\n", + "\n", + "# create the chart with matplotlib and show it\n", + "plt.pie(X, labels=labels)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Running this shows that the usage of our service is relatively closely split between customer 247, customer 123, and everyone else, which lines up closely with the data that was generated.\n", + "The more exemplars we sample, the more accurate this data will be, but also the more costly (in terms of memory usage) the metric would be.\n", + "\n", + "We can use the \"sample_count\" property of exemplars to predict the actual number of bytes customers sent (vs the percentage)\n", + "For example, to predict the number of bytes customer 123 sent:" + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "sample count 25.33 custmer 40474\n", + "Customer 123 sent about 1025206 bytes this interval\n" + ] + } + ], + "source": [ + "# Estimate how many bytes customer 123 sent\n", + "customer_123_bytes = customer_bytes_map[((\"customer_id\", 123), (\"method\", \"REST\"))]\n", + "\n", + "# Since the exemplars were randomly sampled, all sample_counts will be the same\n", + "sample_count = exemplars[0].sample_count\n", + "print(\"sample count\", sample_count, \"custmer\", customer_123_bytes)\n", + "full_customer_123_bytes = sample_count * customer_123_bytes\n", + "\n", + "# With seed == 1 we get 1025206 - quite close to the statistical mean of 1000000! (more exemplars would make this estimation even more accurate)\n", + "print(\"Customer 123 sent about {} bytes this interval\".format(int(full_customer_123_bytes)))\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "We could also estimate the percentage of our top 25 customers that use gRPC (another dropped label):" + ] + }, + { + "cell_type": "code", + "execution_count": 58, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "~44% of the top 25 customers (by bytes in) used gRPC this interval\n" + ] + } + ], + "source": [ + "# Determine the top 25 customers by how many bytes they sent in exemplars\n", + "top_25_customers = customer_bytes_list[:25]\n", + "\n", + "# out of those 25 customers, determine how many used grpc, and come up with a ratio\n", + "percent_grpc = len(list(filter(lambda customer_value: customer_value[0][1][1] == \"gRPC\", top_25_customers))) / len(top_25_customers)\n", + "\n", + "print(\"~{}% of the top 25 customers (by bytes in) used gRPC this interval\".format(int(percent_grpc*100)))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The value of exemplars goes beyond just handling dropped labels, however. We can also estimate the input distribution to the `bytes_counter` metric, through histograms or quantiles:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 57, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "50th Percentile Bytes In: 1031\n", + "90th Percentile Bytes In: 1624\n", + "99th Percentile Bytes In: 6094\n" + ] + } + ], + "source": [ + "# Determine the 50th, 90th, and 99th percentile of byte size sent in\n", + "quantiles = np.quantile([exemplar.value for exemplar in exemplars], [0.5, 0.9, 0.99])\n", + "print(\"50th Percentile Bytes In:\", int(quantiles[0]))\n", + "print(\"90th Percentile Bytes In:\", int(quantiles[1]))\n", + "print(\"99th Percentile Bytes In:\", int(quantiles[2]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is only a small subset of the things that can be done with exemplars - almost any statistic \n", + "that could be created through an aggregator on the original data can be estimated through exemplars." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "jupyter3_Python_3", + "language": "python", + "name": "jupyter3_python_3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/examples/exemplars/README.rst b/docs/examples/exemplars/README.rst new file mode 100644 index 00000000000..b49a02b8de6 --- /dev/null +++ b/docs/examples/exemplars/README.rst @@ -0,0 +1,40 @@ +OpenTelemetry Exemplars Example +=============================== + +Exemplars are example measurements for aggregations. While they are simple conceptually, exemplars can estimate any statistic about the input distribution, can provide links to sample traces for high latency requests, and much more. +For more information about exemplars and how they work in OpenTelemetry, see the `spec `_ + +Examples +-------- + +Installation + +.. code-block:: sh + + pip install opentelemetry-api + pip install opentelemetry-sdk + pip install matplotlib # may have to install Qt as well + pip install numpy + + pip install opentelemetry-exporter-cloud-monitoring # if you want to export exemplars to cloud monitoring + +Statistical exemplars +^^^^^^^^^^^^^^^^^^^^^ + +The opentelemetry SDK provides a way to sample exemplars statistically: + + - Exemplars will be picked to represent the input distribution, without unquantifiable bias + - A "sample_count" attribute will be set on each exemplar to quantify how many measurements each exemplar represents + +See 'statistical_exemplars.ipynb' for the example (TODO: how do I link this?) + +Semantic exemplars +^^^^^^^^^^^^^^^^^^ + +Semantic exemplars are exemplars that have not been sampled statistically, +but instead aim to provide value as individual exemplars. +They will have a trace id/span id attached for the active trace when the exemplar was recorded, +and they may focus on measurements with abnormally high/low values. + +'semantic_exemplars.py' shows how to generate exemplars for a histogram aggregation. +Currently only the Google Cloud Monitoring exporter supports uploading these exemplars. diff --git a/docs/examples/exemplars/semantic_exemplars.py b/docs/examples/exemplars/semantic_exemplars.py new file mode 100644 index 00000000000..5d14dd3bea5 --- /dev/null +++ b/docs/examples/exemplars/semantic_exemplars.py @@ -0,0 +1,68 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +This example shows how to generate "semantic" exemplars for a histogram, and how to export them to Google Cloud Monitoring. +""" + +import random +import time + +from opentelemetry import metrics +from opentelemetry.sdk.metrics import ( + MeterProvider, + ValueRecorder, +) +from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter +from opentelemetry.sdk.metrics.export.aggregate import ( + HistogramAggregator, +) +from opentelemetry.sdk.metrics.view import View, ViewConfig + +# Set up OpenTelemetry metrics +metrics.set_meter_provider(MeterProvider(stateful=False)) +meter = metrics.get_meter(__name__) + +# Use the Google Cloud Monitoring Metrics Exporter since its the only one that currently supports exemplars +metrics.get_meter_provider().start_pipeline(meter, ConsoleMetricsExporter(), 10) + +# Create our duration metric +request_duration = meter.create_metric( + name="request_duration", + description="duration (ms) of incoming requests", + unit="ms", + value_type=int, + metric_type=ValueRecorder, +) + +# Add a Histogram view to our duration metric, and make it generate 1 exemplars per bucket +duration_view = View( + request_duration, + # Latency in buckets: + # [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s] + # We want to generate 1 exemplar per bucket, where each exemplar has a linked trace that was recorded. + # So we need to set num_exemplars to 1 and not specify statistical_exemplars (defaults to false) + HistogramAggregator, + aggregator_config={"bounds": [0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000], + "num_exemplars": 1}, + label_keys=["environment"], + view_config=ViewConfig.LABEL_KEYS, +) + +meter.register_view(duration_view) + +for i in range(100): + # Generate some random data for the histogram with a dropped label "customer_id" + request_duration.record(random.randint(1, 8000), {"environment": "staging", "customer_id": random.randint(1, 100)}) + time.sleep(1) diff --git a/docs/examples/exemplars/statistical_exemplars.ipynb b/docs/examples/exemplars/statistical_exemplars.ipynb new file mode 100644 index 00000000000..5f3659e41e8 --- /dev/null +++ b/docs/examples/exemplars/statistical_exemplars.ipynb @@ -0,0 +1,340 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This example will build an exemplar sample set on a \"bytes in\" counter aggregator, which just sums up the number of bytes sent into our \"application\".\n", + "We will use these statistical exemplars to generate insights into the data that was aggregated away.\n", + "\n", + "We'll start by importing everything we will need from opentelemetry to create the metrics:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import random\n", + "\n", + "from collections import defaultdict\n", + "\n", + "from opentelemetry import metrics\n", + "from opentelemetry.sdk.metrics import Counter, MeterProvider\n", + "from opentelemetry.sdk.metrics.export.aggregate import SumAggregator\n", + "from opentelemetry.sdk.metrics.export.controller import PushController\n", + "from opentelemetry.sdk.metrics.export.in_memory_metrics_exporter import InMemoryMetricsExporter\n", + "from opentelemetry.sdk.metrics.view import View, ViewConfig" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can then set up an in-memory metrics exporter so we can analyze the exemplar data in-service:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "## set up opentelemetry\n", + "\n", + "# Sets the global MeterProvider instance\n", + "metrics.set_meter_provider(MeterProvider())\n", + "\n", + "meter = metrics.get_meter(__name__)\n", + "\n", + "# Export to a python list so we can do stats with the data\n", + "exporter = InMemoryMetricsExporter()\n", + "\n", + "# instead of waiting for the controller to tick over time, we will just tick it ourselves\n", + "controller = PushController(meter, exporter, 500)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We now need to create the bytes in metric, and assign it a view (this is where we set up exemplars):" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Create the metric that we will use\n", + "bytes_counter = meter.create_metric(\n", + " name=\"bytes_counter\",\n", + " description=\"Number of bytes received by service\",\n", + " unit=\"By\",\n", + " value_type=int,\n", + " metric_type=Counter,\n", + ")\n", + "\n", + "# Every time interval we will collect 100 exemplars statistically (selected without bias)\n", + "aggregator_config = {\"num_exemplars\": 100, \"statistical_exemplars\": True}\n", + "\n", + "# Assign a Sum aggregator to `bytes_counter` that collects exemplars\n", + "counter_view = View(\n", + " bytes_counter,\n", + " SumAggregator,\n", + " aggregator_config=aggregator_config,\n", + " label_keys=[\"environment\"],\n", + " view_config=ViewConfig.LABEL_KEYS,\n", + ")\n", + "\n", + "meter.register_view(counter_view)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The last thing we need to do before we can start working with exemplars is generating a large set of data for metrics.\n", + "If the dataset is too small, we won't be able to collect a large enough subset of the input to analyze with exemplars.\n", + "\n", + "If this was a real application, the data would be generated through requests to/from the server." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "## generate the random metric data\n", + "\n", + "def unknown_customer_calls():\n", + " \"\"\"Generate customer call data to our application\"\"\"\n", + "\n", + " # set a random seed for consistency of data for example purposes\n", + " np.random.seed(1)\n", + " # Make exemplar selection consistent for example purposes\n", + " random.seed(1)\n", + "\n", + " # customer 123 is a big user, and made 1000 requests in this timeframe\n", + " requests = np.random.normal(1000, 250, 1000) # 1000 requests with average 1000 bytes, covariance 100\n", + "\n", + " for request in requests:\n", + " bytes_counter.add(int(request), {\"environment\": \"production\", \"method\": \"REST\", \"customer_id\": 123})\n", + "\n", + " # customer 247 is another big user, making fewer, but bigger requests\n", + " requests = np.random.normal(5000, 1250, 200) # 200 requests with average size of 5k bytes\n", + "\n", + " for request in requests:\n", + " bytes_counter.add(int(request), {\"environment\": \"production\", \"method\": \"REST\", \"customer_id\": 247})\n", + "\n", + " # There are many other smaller customers\n", + " for customer_id in range(250):\n", + " requests = np.random.normal(1000, 250, np.random.randint(1, 10))\n", + " method = \"REST\" if np.random.randint(2) else \"gRPC\"\n", + " for request in requests:\n", + " bytes_counter.add(int(request), {\"environment\": \"production\", \"method\": method, \"customer_id\": customer_id})\n", + "\n", + "unknown_customer_calls()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Analyzing the Exemplars" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's export our metric and collect the exemplars from the outputted aggregation:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "# Tick the controller so it sends metrics to the exporter\n", + "controller.tick()\n", + "\n", + "# collect metrics from our exporter\n", + "metric_data = exporter.get_exported_metrics()\n", + "\n", + "# get the exemplars from the bytes_in counter aggregator\n", + "aggregator = metric_data[0].aggregator\n", + "exemplars = aggregator.checkpoint_exemplars" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "One of the key values of exemplars is its ability to handle dropped labels (labels that are too high cardinality to create a new metric record for each value). \n", + "In our application, we drop the \"customer_id\" label since there is an unbounded number of possible labels. However, with exemplars, we can still estimate stats related\n", + "to the customer ids, for example the sizes of our top customers:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": "
", + "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAOcAAADnCAYAAADl9EEgAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3deVxU5f4H8M9zZmMfQEBElkkRGQgp9bqjNzO1XH5XE00tq2t608q0q4lpNVdNSzPTNkuzbDMNc8XUtDT33HLBGXFHwB0YtoFZzvn9MUKoLAMM85yZed6vF6+bM2fm+cLlw3PmnGchgiCAYRjx4WgXwDBM1Vg4GUakWDgZRqRYOBlGpFg4GUakWDgZRqRYOBlGpFg4GUakWDgZRqRYOBlGpKS0C2D+duTIkRCpVLoMwINgfzhdDQ/glNlsfqFdu3Y3bHkBC6eISKXSZaGhoerg4OA8juPYoGcXwvM8uXnzZty1a9eWARhoy2vYX2dxeTA4OLiABdP1cBwnBAcH62E9K7LtNY1YD1N3HAum67rz/63NmWPhZBiRYp85RUyVktbOnu936d1+R2o7JjMzUzp+/PjI48ePe/n5+VmCgoJMH3300ZU2bdqU1aWtb7/91j8uLq60Xbt2pfWvuGH+85//hG/btk0pk8mEqKiospUrV14KCgqylD9/9uxZeWJiYvzkyZNzZs6cef348eOKYcOGtSx/PisrS/H6669nv/XWWzZdwLE31nMyFXiex8CBA6O7d+9eeOXKlVPp6enad999NzsnJ0dW1/dat26d/4kTJzwbo87qmM3mu/7dp0+fgoyMjPSMjIzT0dHRpW+++WZo5edfeeWV8B49eujL/52YmFim0+lO63S606dOnTrt4eHBP/XUU/kOKv8+LJxMhU2bNvlKpVLh9ddfv1n+WOfOnQ19+/Yt2rRpk+8jjzwSXf74qFGjIhcvXtwEAMaPH9+8ZcuW8TExMXFjx44N//XXX723b9/uP2PGjPDY2Ni49PR0xb59+zwTExNjY2Ji4h577LGWN2/elABAhw4dWo8ePTriwQcfVLdo0SJ+165dXr17924ZFRX14IQJE8LK2/v0008DExIS1LGxsXEjRoyIKg+il5fXw2PGjAlv3bp13I4dO3wqfz+DBw8ukMlk5d9HcXZ2trz8uW+//dY/KirKqFarq+zZN2zY4BcZGVkWExNjtMfPtj5YOJkKJ06c8ExMTCypy2uuXbsm2bx5c8DZs2fTMzIyTs+ZM+fqY489VtyrV6/82bNnZ+l0utPx8fFlzz333ANz5szJysjIOB0fH2+YOnVqRfDkcjl/6tQp7fPPP38zOTk5eunSpZk6nS591apVQdeuXZMcPXrUIzU1NfDw4cM6nU53muM4YcmSJU0AwGAwcB07diw+c+bM6T59+hRVV+fXX38d1LdvXz0A6PV6bsGCBaHz5s3Lqe74lStXBg4ZMuR2XX4W9sY+czIN0qRJE4tCoeCHDRum6t+/f/6wYcP09x5z+/ZtSWFhoaRfv35FADBmzJjbycnJLcqfHzRoUD4AJCYmGqKjow1RUVEmAIiIiCi7cOGCfOfOnT6nTp3ySkxMVANAaWkpFxISYgYAiUSC5557Lq+mGqdOnRoqkUiEF198MRcApkyZEvbyyy9fVyqVfFXHl5aWku3btys/+OCDrPr9VOyDhZOpkJCQYFi3bl1AVc/JZDKB5//+XS4rKyN3Hsdff/2l3bBhg19qamrAZ599FnLgwIGMurTr4eEhAADHcVAoFBW3kjiOg9lsJoIgkOTk5NuffPJJ9r2vlcvlvFRa/a/x4sWLm2zdutV/9+7dGRxnPVE8cuSId1paWsDbb78dXlBQIOE4Dh4eHvwbb7xxEwBSU1OVcXFxJREREeZq39gB2GktU2HAgAGFRqORvP/++0Hljx08eNBzy5YtPi1btiw7d+6cp8FgILdu3ZLs2bPHD7CeIubm5kqGDRumX7JkyRWdTucFAD4+PpaCggIOsPaufn5+li1btvgAwJdfftmkc+fO1Z6C3qtv374FmzZtCsjOzpYCwPXr1yUZGRny2l6Xmprqt2jRotDNmzef8/X1rfjLcuTIkTPZ2dkns7OzT44ZM+bGq6++erU8mADw448/Bg4dOjTX1voaC+s5RcyWWx/2xHEcNmzYcH78+PERixYtClUoFEJ4eHjZRx99dCU6Oto0YMCAvNjY2Pjw8PCy+Pj4EgDIz8+X9O/fP7q8J501a9YVABg5cmTuuHHjVEuWLGmampp6/quvvro4bty4qAkTJnCRkZFlK1euvGRrXe3atSudMWNG9qOPPhrD8zxkMpmwePHizNou1rz22muRRqOR69mzZwwAtG3btuiHH37IrOk1BQUF3J49e/xWrFhx2db6Ggth69aKx/Hjxy8lJibeol0H03iOHz8elJiYqLLlWHZayzAixcLJMCLFwskwIsUuCImYIAgoNfEKo4VXmMy8zMTzcrNFkJl5QWrhBakgCJwggPCw/q8AEAgCIYQIhIDnCLFwhPCEgJcQYpFKiFkm4YwyCWeUSzijTEqMcglnIoTQ/laZKrBwioQqJS16VXJzr6v5hmalZt7TaLZ4GM2ChwChHsmx/SIfARHkUq5UIeMMHlJJiadcYvCUSUrkUo7qPT6GhZMKVUoaByARQBKA7gC6AWhaUMbjZlGdJn80mACBlJktnmVmi2cBTIHlj0s5zuQplxT5KKQFvh7SQg+ZxLGFMSycjqJKSQuCdXmK/wPQA4Cytte0WRZl1xpOvFD7rbtbN65jnmYa0o8fk/kqlQFNgoIDpmjmIrpVjNFbLin0UUgLlJ4yvVTCWWp6HzFMGVu+fHnAnDlzwi5cuOCxc+dObffu3UsAYO3atX4zZsxobjKZiEwmE+bOnZs1cODAQgBISkpqdePGDZnFYiEdOnQo/OabbzJrGoHUmFg4G5EqJS0KwKA7X10BSOhWVDNBEDBpzDMYMOQpzPt0OQDgzOmTyL15A+YW0XK9gW+iN5ia5OSXCl4KSYHSU5bn7ynLryqo69at8zebzXpHhtNsNqNykB566CHDmjVrzo0ZM0ZV+biQkBBTWlraOZVKZTp06JBHv379Ym7cuHECANavX38+MDCQ53kejz/+eMvly5cHjB07tsaxu42FXa21M1VKmrcqJe3fqpS0fQAuAVgI66mrqIMJAH/u2w2pVIqhz/y74rHWcQlo27ELDu3fg5efGwbAeio8fcok5WdffKnSXi1MfGH8qw+2jG7VRmxTxtq2bVuamJh43+l4165dDSqVygRYRx+VlZVxBoOBAEBgYCAPACaTiZhMJkLzYhnrOe1ElZLWHsALAIYD8KNcTr2cO6NFXMJDdXpNXt5tsn3rZsX6nX9CKuEISosRHdHU1KtXr/z+/fvrn3/++TwAiImJiVu4cGFmv379iiZOnBg2derUsOXLl18B/p4yNmvWrJDk5OToQ4cOaUNCQswqlSrhjTfeuJ6TkyMrnzKmUCiEp59+OnLJkiVNXn755dvlU8aWLl1arxkkK1asCIiPjy/x9PSsuIrWrVu3VidOnPDu0aNHRf00sHA2gColTQJgKIDJANpSLocKH18/KBQKvD35FXTv1Ufa49E+Tc9cL2xq4WQmowUmQRDycnNzqU4Zq87hw4c93nrrreZbtmw5W/nxPXv2nC0pKSGDBg1qsXHjRr9BgwYV1O+n0zAsnPWgSkmTA3gOwOsAWtZ8tPOIjonF9rT1VT4nkUggVJoyZiyzni1KpVJ8v3EHDu7dhV/TNuDHr5di2aoNEMDJiixcaMb1QqXUZLpeU7uNOWWsOufPn5cNGTIk+ssvv7wYHx9/36mvl5eXMGDAgPy1a9f60won+8xZB6qUNC9VStprAC4A+BwuFEwA6NC1O4xGI1K//7risQztKRw9uA9h4RG4cPYMjGVlKNDrcXDvLgBASXERCgsLkNSzN6a8/Q4yTp8CAHj5+KC4qAhlZt6zmHio/AKayH7e+EuYIAgOmzJWnVu3bkmeeOKJVv/73/+yevfuXVz+uF6v5y5fviwDAJPJhF9++UUZGxtrqG87DcV6ThuoUtIIgGcBvAMgrJbD7caWWx/2RAjBwqXfYv7/3sBXny6C3MMDzcMjMEUzF6Fh4ejd/194slcXhEVEITa+DQCguKgIr44eCWNZKQRBwOS33gEA9B04GDOnTsQPX32OBUtWYNbCz4hm2mvNUl6fHBoR3rzkxx++P1tTLZXVd8rYN9984z9lypTIvLw86aBBg1qp1eqSPXv2nJ03b15IZmamYu7cuWFz584NA4AdO3Zk8DyPfv36RRuNRiIIAunSpUvBlClTbtbURmNiU8ZqoUpJ+yeABXDAZ8qlA5uhaWSL2g90AV5yaWGYv0eml1xK7T4oDXWZMsZ6zmqoUtKiAbwP66ABxs5KjGbf8zeK4/y9ZDebKT1yahvU4I5YOO9x5wrsJAAzATh03VV3I0AgeSXGkIJSU2BTP4+sIB8F1dXuxIaFsxJVSlocgOUAOtKuxZ1YeEGak29Q6Q2mgIgAr8tyKWeiXZMYsHACUKWkSWG9LfIWAAXlctxWcZlZefZGYXwzpeflQG85tZv/YuH24VSlpD0A4EcAHWjXwgAWXpBk5ZW00BtMeeEBnpdlbvxZ1K3vc6pS0gYDOAYWTNEpLDUFnL1RFFdUZvaiXQst7tlzapQSAO8tlbVtN8Y0udapW7SM3JFk1/f7/tHdtR5TacoYfJVKNAkKxhTNXKhaRNf62sp+25KGqBYt0TImtr7lwmzh5RdvFceG+nlkBvsq6rwqYXVTxn7//XevcePGqQDrTJzp06fnjBo1Kh+wrnU7efLkSJ7n8fTTT9+aM2fOtXp/Aw3kfj2nRhkEYCuA//bijvZ4kvvjEO2SxKJ8ylj7zt2QtvcYfty8ExNS3kLuzbrvgPf71jRcOHvGHjWRq3pDVObtkiheqHlViHt3GSufMta+ffu7RiO1b9++9OTJk6d1Ot3pbdu2nZ04cWKUyWSC2WzGpEmTIjdv3pyRkZGRvmbNmsAjR454NPibqCf3CqdG2RbAYQCPAgAhIPNlS6IjyXWqe2KIha1TxgBgzowpWL/6BwDAh3M1GNSzE4Y81hULZr2Jvw4fxM5ff8EH77yFoX2ScOXSRejST+LpgY9hyGNdMfGFp1GQb91Zb3Ryf8zXvIHhTzyCfz3SEaf+OmqdU5rUDh/Pm13R3nfffxuU+HC7h2Jj1fENnTLm6+vLl+8+ZjAYKqaF7dy50zsqKqosLi7O6OHhIQwePDg3NTXV3z4/3bpzn3BqlH0A7AFw1/ICHEHAZvm0AhnM1LZ6E4v6TBnLz8vFb1vS8POO/Uj9dS/GTJiMh9p3xD8fexyvTZ+J1Vt3I0L1AGZMfBETp2mQ+utetIqNw5IP36t4D6lchpWbf0fy089j4uiReGP2fKzZvg/rf1qJ/LxcXDh7Bls3rsXXa7dya38/yBGJhKvrLmP3+u2337yjo6Pj27ZtG79w4cLLMpkMV65ckTdv3rzi9yA8PNxYedtAR3OPcGqUAwGsRzWDCnxIadwP8tkHHFuUa6g8ZWz7Lxvh6Xn/j7iwQI/CAj3ad+4KABg4ZDiOHNxX8fw/H3scABAdG4eWrWMR3DQUcoUC4ZFRuJaTjYN7d0F74jhG9u+JQb26yvfu2x949vxFH6D+U8Z69uxZfO7cufQ9e/Zo58+f36ykpER0SxC6/gUhjXIIgB8A1Lg78z+4jO6jJZv3fWl5ootjChMfe04Zqwu53HprmeM4yOR/d1Qcx8FiNkMQgAHJT+HVlLf/fo6QQL3BlFffKWPl2rZtW+rt7W05fPiwZ0RExF09ZVZW1l09qaO5ds+pUT4N6z1Mm7ZNnyH9rk0sybzQuEWJV2NMGQMAXz8l/JT+OHqnt9z08yq079jV5ro6du2O7WkbcPuWdYKIPi8PWVcuc5m3S6KBuq8jotPp5CaTdRBSRkaG/MKFCx6tWrUy9ujRo/jSpUseOp1OXlpaSn7++efAJ598ktq2867bc2qUL8A659LmP0CEwGed/M1rD5d9UWKAgvr9NVtufdhTI08Zw+xpr6HUUILwSBVmLvjE5rpaxsTipSnTMW7kYPA8D6lMhjdmz0dYeCQBIcgtNgZUNaKouiljO3bs8Onfv38zqVQqcBwnLFiwILNZs2ZmAFiwYEFm3759YywWC0aMGHGrffv21GbNuOaUMY3yJQAfAajX54gzfPjePsZ5tv9ptxN3mjJmTwREiAj0PO/vJb9vV22xce9dxjTKyQA+Rj2DCQCtuayuU6Q/OrbbYupNgECu5Bla6g0mp1xYrTquFU6NcgyA+fZ4q/GSDR3akzNae7wX0/gEQSBXcktaFpaafGo/2jm4Tjg1yiQAtn+QqQUhUKyUz/b2Q5HoT5UYK14QuMu3S6INRgu1UT325Brh1CijAKyBjVdlbSUjlsjNijd09nxPpnHxgiC5dLs42mThRb+Id22cP5wapTesAwyCG+Ptw8mtju9Kl+5qjPdmGofJwisu3y6Orm0srtg5dzg1SgLgG1h37Go0wyS/d32EO3a8Mdtg7KvEaPHJyi2JpF1HQzj7fc63AQxu7EYIgXSZ7P2mHcs+uXkL/o3SQ1dF1quzXd/PtH1/rcdcv5qNOdOn4MLZM+B5Ht179cFr02dCJpdDl34SN69fRVLP3gCAzz54F15e3nj2xVfsVuOKJR/h5x+/hUKhgFQqw/Dnx2LAkKfq9B7ZVzJx/PBBPDEoOUhRUGpo6udR92k1IuC8Pad1WN5bjmpOQoTQLYqUKwSVxrC5GOuUsVF4pE8/bNx9BBv+OIyS4mJ8NG8WAOBM+kns/u1Xu7Vnsdy9yMHqb5dj/+6d+H7jdqzeuhtf/LgO9bkPn5OVic3rUwEANwrKwh0xYbt8xJE9OWc4NcpEAF+jAfcy6yOIFLT9TPahy97//HPvH1AoFPjXsJEArONpp7z9Dtat+h5FhQX4dMEcbNu4FkP7JGHLhp8BAOfPnsHo5P54outD+H755xXvtennVRjR/1EM7ZOEmSkTK4LYqXU43p85A8m9u+H4kT/vav/Ljz/AjDkL4ONrvV3p4+uHgcnDAQCPd26DvFzr4nzpx49hdHJ/AMDh/XsxtE+S9atvdxQXFWLR3P/h2J/7MbRPEr5Z+gk5l5Pb4skhQx6IiYmJU6vVcRs3bvQFgMWLFzfp1atXyy5durRq3rx5wpw5c4I1Gk1TtVodl5iYGHv9+nUJAKSnpyuSkpJaxcfHq9u1a9f62LFjHgDw5JNPqkaMGBHZpk2b2HHjxoWnpaX5xMbGxsXGxsap1eq4vLy8BuXL+cKpUcoAfAfAm0bzfbjD3f+P23uYRtuN7VyG7r4pYz6+fghtHo6crEyM/+8b6D1gEFZv3Y2+A62fJi6dz8Bn363B9xt34POF78FkMlVM8VqxdgtWb90NCSfB5rU/AQAMJcVIeLgdftq2B207/H3aXlRYgOLiIoRHqepU84ovPsK02fOxeutufL1mMxQennh12tt4uENnrN66G8+MGY9vl3+hEDiZd0ZGxukffvjhwtixY1Xls1AyMjI809LSzh86dEg7d+7c5l5eXrxWqz3dvn374s8//7wJALzwwgtRn376aWZ6erp2/vz5WePGjav4LHv16lX50aNHdcuWLctasGBB6OLFiy/rdLrTBw4c0Pn4+DToLMsZP3NOA/AgrcYJAVko+7TFEWNMTpYQ7LCtGcQqqWdvyBUKyBUKBAYFI/fWjbumeAFAaWkpAoOsH9UlEgl6PTHQbu0/1L4j3p85A08MSkavx/ujabP7xyAcO3QAw58fq7hdVBb48MMP54aFhRlPnjzpAQBdunQpDAgI4AMCAngfHx9LcnJyPgAkJCSUnDhxwkuv13PHjh3zSU5OrtgXx2g0VpyxDR48OK98VkynTp2KJk+eHDF06NDc4cOH57Vs2bJB4XSunlOjVAOYTrsMjgiBv8hT8qQwu9T6qi1btcbpk3/d9VhRYQGuZWchQlX1mN/y6V5A+a5gloopXqu37sbqrbuxYdchjHstxXq8wgMSyf23IH18/eDl5Y2sy5eqbEcikYK/83G/rOzvseijX5oEzbxFKCs14NlBfXHxXEa1399VfWlkmcly1+RpuVx+165mlXc8M5vNxGKxwNfX16zT6U6Xf124cCG9ou5KveOcOXOuLVu27LLBYOCSkpJiy09/68t5wmm9bbIMALWZ6ZX5EkP8d/K5+2o/0nl07NYDpQYDNqb+CMB6wWbBrDcxMHkEPD294O3jg5Li2hcbqGqKV05WZq2vG/3SJMyZMRlFhdYd90qKiypqCYuIhPbOH44dmzdWvObKpYtopY7Hv8dPRHxiW1w8dxbe3j4oKfq7zrYdOmPz2p/AC4Jk15/HH7h69aq8TZs2Ns02CQwM5MPDw43Lly8PAACe57F///4qJ+2np6crOnToYHjnnXeutWnTpvjUqVMNCqczndaOByCqidCdOG2PZyVb9q+w9LXvPY87bLn1YU+EECxc9i3emT4ZXyyaD57n0a3nY5gw9U0AwD86J2H5Jx9iaJ8k/PulSdW+Tw1TvGpsf+io0SgpKcaI/o9CKpVCKpNh1NiXAAAvTnwdb0+ZgE/mz0H7zt0qXvPdl5/h0L7d4DgOLWNi0e2RXiAcB04iQXLvbhiYPALDRo3G7Df+iyd7dYFEKvX58JPPr1beybo2K1euvDBmzJio9957r5nZbCaDBg3K7dy5831bA86bNy9k3759foQQoXXr1oYhQ4Y0aOinc0wZ0ygjAKQD8KVdyr0EAYV9jO/dyhAiHmjoe7EpY44h4Yi5dVPfUzQ2T3LFKWOfQYTBBABC4Lte/qbZA2XUNlll6sbCC9IcfWlz2nXURvzh1CiHA+hHu4yaeBJjq7Xyt4/QroOxXX6JMVjsq8mLO5wapQ+AD2mXYQs1l9ltkvSnBg1QECDUa0QMUz/X9IYIR7bH8zwBYPPtFXGHE3gVQAjtImw1QbL2Hw+Rc/Ve5vxyvgnmkgIWUAcpMVp8HLV6As/z5ObNm0oAp2x9jXgvCGmU/gAuAqC24nZ9GAXJ5XZlS/wL4V3nPVj8FBxe6RiAKH8ZiGNHJrotKQdjoKfkqgOa4gGcMpvNL7Rr186mgfhiDucsADNol1EfmXzwge7GRZ1o18HYbNild/utpl3EvcR5WqtRNoH1lNYpRXI3O82WfskmaDuPWXc2UBYVcYYTmACR3jqx1UjJjq5J3ImTtOtgbBIDoG6TRh1AfOG0LjvyMu0yGooQSL+SzQtqAn2d95VkqKh+yBMl4gsnMBZAIO0i7EFK+GZbFCmXXXmCtgtpq0pJ60G7iMrEFU7rXM3XaJdhT8FE3+5j2Ud/0K6DsYmoek9xhRMYAiCcdhH29gR3sHt/bj8bQSR+A1QpaS1rP8wxxBbOZ2gX0BgIAbdY9rGqOW464n4aU38crBcjRUE89zk1ymAAOXCuaWx1ohe8TrYrWxJrhtSui18zdpULIPTSu/2oT6QXU885FC4cTABQkpKEFbL3XGqCtgsKBPAE7SIAcYVzJO0CHKGrJL3H05Jf2Rb34iaKj1fiOK3VKB8A4DY7SgsCCnoZ5+edF5pH0a6FqVIZrKe21Ha1BsTTc46gXYAjEQK/jfLppQoYqe2azNRIASCZdhEsnJR4EWPrNXKNS65/6yKoD+ejH06N8iEAcbTLoOFB7lK3CZKf99Cug6lSkioljer4bvrhBIbTLoCmSdLUdomkhsVWGVpkAB6lWYAYwtmHdgE0EQLPn+Qz5T4oKaBdC3Ofx2k2TjecGqUvgASqNYiAnJhVm+TT02s/knEwNw4n0EkENYiCirveWSP9mg2QF5cIVUpaPK3GaQejK+X2ReVZybbOXblTNi8AxThET1oN0w6nqLZXoI0QyFbI3g0MQEEu7VqYCh1pNUwvnBolB4rfuFhJCR+2VZFygU3QFo0OtBqm2XMmAHDImqHOJoTkt18k+4R9/hSHaFVKWgCNhmmGk53S1mAAt797X+7gUdp1MCCg1HvSDCe7GFQDQsB9Klsc0Qy3r9GuhXG/cLJFl2vBESH4F0XKDQksZtq1uDkq9+LphFOjlABg06Vs4E+K23wlm7eXdh1uLppGo7R6zlC4+KoH9tRdcrLHcMmOg7TrcGNUFv2iFU6XW2Gvsc2Rfhn7AMnJpF2Hm/JTpaQ5fLc7Fk4nQQiUafLpJWyCNjUOP7Vl4XQiXqQsdrV85iHadbgpFk6mZonchaTxkvXsApHjhTm6QRZOJzRFuurhBHLhLO063IzD9+9h4XRChMArVa6ResNQRLsWN8LCydhGQcwPbJTPOEG7DjfiNuF0+Pm7K2rBXe0yQ/otGyDvGG4QTo2SAJA7vF0XNVryS6dOXDpb4qTxuUE4AQmFNl0WIZB/J5ur9EdhHu1aXJyXoxtk4XQBUsKHb1GknAPEsLeGy3L47y0Lp4sIJXn/+ED22S7adbgwh//e0hh8zsLZSB6++JeQdngyW3+oEQiE0+Pdfg5tk0Y42dzERnCWk2WUHfVtIaFw4cItCLzDP9PTOK0to9CmSzMQUnL0WJMLEp6wObKNx+E7XVO4laLnwXpPu5rqFbw/QcuxNZkal9HRDdIahMB6TztZqvTb2+sXqYywlQwbm9uEk81JtIMMmeziLwY/3+gctliaA7hNONmKcg1URlA6MqypcUoqbybsCrgjXHV0g7TCeYlSuy7j36FND3U6Bb1/CdrSrsVNXHB0g7TCeZlSuy7hez+f/ekyeacxW/hg2rW4kYuObpD1nE7mklSa+W5gQNyoHfw+mQUP0K7HjbhNz3mJUrtOzQgYhzUPLfIxgO97REikXY+bcZuek53W1sOLoSEHSjgubvLPlhME8KddjxvhQaFDYT2nk0j18T54yNOje+QN4YL6Crt14mDZap3WTW6laPQ3ABiotO2EsqSS7JlBga0BYNoqy23CVst3NIef0gJ0NzJip7Y2MAPmoWHNcgVC/Lum84ebFOEftGtyQw6/GATQDSeVv0bOZkLT4L2FEi6B4wXLuM08+5xJh9v1nH9SbNspbPb2Orzb06M7AIz4nd8rN9PZ7SAMP9QAAAtmSURBVIpxv56TrRpXg+sSyfWU4CYqEEK8DYK+/59CPO2a3BiVHcZphnM/KAwmdgY8wA9pHnpVICQIACat4//igCa063JT2Wqd9jSNhumFU6M3ADhMrX0RmxIStDtfInkIAMJvCpcSLglsriY9O2g1TLPnBNip7X12eHke2+blmVT+72mrLdcJIKNZk5v7lVbDLJwickvC3XwtJCgMhHAA0OEMfyy4AB1p1+XmttNqmHY49wKwUK5BFARASA5rlskT0hQAiCDwr2zgHb6QMXOXdLVOS23uMd1wavQFAI5TrUEkZgQF/nFLKmlX/u9hf/B7FWa0plkTQ++UFqAdTiu3P7Xd4+lxYoOPd8V4Wc8yofBf+4VYmjUxACie0gLiCCfVHwBt+RyX93LT4CAQUjFe9tX1/FFOAJtITZcJANUV9MUQzm0AbtEugpbk5qFnLYRUbIkYmitcefi80IlmTQwA4IBap6W6OTH9cGr0JgAraZdBw+wmAbuuSaUdKj82bbUlmwAKWjUxFah+3gTEEE6rFbQLcLRDHorTq3x97hpc8PA5/nizPLBekz4BwCraRYgjnBr9EQBuswFsAUf0Y0NDfEDI34MLBEGYuI5nmwqLww61TptBuwhxhNPqG9oFOMrwsNDTZkIiKz82eJ+w19MENa2amLt8RrsAQFzh/A5uMCBhQYD/7kyZrHPlxxRGoTh5N8+mg4lDNoD1tIsAxBROjT4HFAcZO8IJhfzM10rf+1YyeHkjf0giIJRGTcx9lqp1WlF0EuIJp5XLXhgqJqTouWZN5SDEo/LjwflCTocMgY2fFQczgKW0iygntnCuBVBAu4jGMDIs9LiJkPsWgZ622nKRAJ40amLus16t0+bQLqKcuMJpneP5Be0y7O1Tf+We83LZfctZJlzkTzW/DTZXUzxEcSGonLjCabUALrRFoFYuO/+Zv9/9mw0JgvDaWh4EIBTKYu53Rq3Tiuqah/jCqdFfA/Al7TLsoZQQwzPNmgog5L6pXwMOCvu9y/AgjbqYKi2hXcC9xBdOq/dgHXjs1J5tFnKkjOPuu0UiNwmGETt5FYWSmKrpAXxNu4h7iTOcGv0ViPCHVRdfKX33nlYoulX13Iub+YMSAWFVPcdQ8a5ap82nXcS9xBlOq5lw0s+e52XSSx8E+Lep6rkmBcK1rqcFtmq7eFwB8CHtIqoi3nBq9FkAPqZdRl0ZgbLhYaGlIMS3qudTfrKcI4C3o+tiqvWWWqcVZScg3nBazYX184DTeKFZyEEDx1W5ikHsFUEbeYPtECYiJyDiMd3iDqdGnwtgHu0ybLXK1+fAMQ/r9glVeT3VYmK3TkRlilqn5WkXUR1xh9NqAQAt7SJqkymVZs1uElDtrJK+h/kDPqWo8nMoQ8UatU67jXYRNRF/ODX6MgDPQ8QzVkyAaWjzUD0IUVb1vNQslI3awTd3dF1MtYoBTKJdRG3EH04A0OgPAlhIu4zqjA8N3lfMcdVuNDR2C39AyiPCkTUxNZql1mmv0C6iNs4RTqs3AZyhXcS91vl4/3mghs+Z/kXCzR4nhfuH7zG0aAF8QLsIWzhPODX6UlhPb0XzAT5HKrn6VlBgNAip9iLP1J8sOgJUeVuFcTgzgLFqndYpRp85TzgBQKPfD2AR7TIAwAJYksNCbwqEBFZ3THS2cKbFNXbrRESmq3XaPbSLsJVzhdNqOoCztIuYGBK0p0AiqfHqa8pPlhLinD9jV7QBwHzaRdSF8/3iWOd8/hsUT2+3eHsd2Vlpm76q9PyLP+hnwMOOqomp0UUAz6p1WoF2IXXhfOEEAI1+D6yjhxzuhkRy4/XgJpHl2/RVRWoRjC9s5dmaQOJQBmCIGAe218Y5w2n1JoA1jmyQB/jk5qFZAiE17mPy3K/8fimPKEfVxdRoolqnPUq7iPpw3nBq9AKAUXDg1vUpwU1250okNd4W8SsWbj92THjIUTUxNfpOrdOKbhK1rZw3nACg0ZcAGAggq7Gb2uXpcfwXb68q52dWNmWNJZ0AVY4UYhwqHcCLtItoCOcOJwBo9FdhDWhxYzWRy3G3JzQNDgEhkpqOe+CacC4mm906EYEiWD9nNtrvhCM4fzgBQKM/BmAkGuEKrgAIQ5qHXuQJaVbbsSmrLfkEqDHATKMzA3hGrdPqaBfSUK4RTgDQ6NcDmGbvt307KPCPm1Jp+9qOSzrJHw4oRq3HMY3KDGC4WqddR7sQe3CdcAKARj8PwHJ7vd0+D4+TayttB18djhfML/7CVztSiHEIM4ARap02lXYh9uJa4bQaCzvMbtdzXP740OCAytvBV+eZHfw+mQUtGtomU29mACPVOu1PtAuxJ9cLp0ZvAfAcgE8a8jZDw0LPWAgJr+04H4OQ/8RhIaEhbTENYoE1mKtpF2JvrhdOwHoPVKN/GcCc+rx8bmDAHzkyqU2bC/33Z/44AQLq0w7TYBYAT7tiMAFXDWc5jX46gKl1eckRhUL7g5+PTVu/R9wQLsZlCmyvEzossF6V/ZF2IY3FtcMJlF8kehE23GYpJKTghWYhXiDEpu3fp6223CSArPYjGTuzABil1mlX0i6kMbl+OAFAo/8cwNOwXjio1vDmoafMhNg0Jrazlj8SVIgO9iiPqZNiAE+pddofaBfS2NwjnACg0a8EMAiAoaqnFwUod1+WyWw6ReV4wTJ+E+9nz/IYm+gAdHCl2yU1cZ9wAoBGvwlAZwDnKz98Si4/u0zpZ/MAguG7+H0KM1rZuzymRqsB/EOt056mXYijuFc4AUCjPw6gHawz41FCSPGzzZpyIMSm3aW9SgX9gANCtevTMnZnAjBJrdMOU+u0RbSLcST3CycAaPR6AP8CMG1Us6ZHjBxpaetLJ67j/+KAoMYrjqkkB8Ajap1WlBsNNTYiCE61coPdJaxI6AbgewCRtR0bdlu4vPALSzMC2HQ1l2mQ32AdJ3uDdiG0uGfPWcnJZ0/uAfAQgFW1HTttleUaC2ajEwC8C6C3OwcTYD3nXRJWJDwJ4FMAIfc+1z6D/+v1NTxb4aBxXQQwXq3TbqFdiBi4fc9Z2clnT64BEId7Bs4TQeAnbOA96FTlFowA3gEQz4L5N9ZzViNhRUIvAIsBqJN3W3Yn7xFqXAqTqbffYO0tRbfVBm0snDVIWJEgJbzw0vfzLeOlPGJo1+NiLgKYptZpa/2s765YOG2gjVUHwLrKwisA2Oltw+TBegr7kVqnNdIuRsxYOOtAG6sOBzAT1iU52VpBdWOEdY7tbLVOm0u7GGfAwlkP2lh1PIDJAIYBsGlkkRu7CuALAF+oddoc2sU4ExbOBrhzuvssgP8AiKVcjtjsBvAxgLXOsuWe2LBw2ok2Vv1PWOeNDoL7DlQoBvAdgE/UOu1J2sU4OxZOO9PGqkNg3QVtLIAHKJfjKGdgHbyxQq3T6mkX4ypYOBuJNlZNAPSBNai94HrrDJ0DsA3AzwB+c7bt9ZwBC6cDaGPVHIC2AB4F0BNAEpzvQlIerAMGtgHYptZpL9Etx/WxcFKgjVXLYZ30/eidrw4Aal0f18HMAA7AGsZfARxS67QWuiW5FxZOEdDGqn0BdIe1R40GoLrz1cQBzfMAsmFdHaL86xSAXWqdtsAB7TPVYOEUsTuhVd35euCe/44E4A1rj1vVgAgB1hv/ZXe+buHv8F2o9N8X1TptWaN9E0y9sXC6gDsXn6SwLtPJAShj9xadHwsnw4gUm8/JMCLFwskwIsXCyTAixcLJMCLFwskwIsXCyTAixcLJMCLFwskwIsXCyTAixcLJMCLFwskwIsXCyTAixcLJMCLFwskwIsXCyTAixcLJMCLFwskwIsXCyTAixcLJMCLFwskwIsXCyTAixcLJMCLFwskwIsXCyTAixcLJMCLFwskwIsXCyTAi9f9xx5uzgbZHkQAAAABJRU5ErkJggg==\n" + }, + "metadata": {} + } + ], + "source": [ + "# Sum up the total bytes in per customer from all of the exemplars collected\n", + "customer_bytes_map = defaultdict(int)\n", + "for exemplar in exemplars:\n", + " customer_bytes_map[exemplar.dropped_labels] += exemplar.value\n", + "\n", + "\n", + "customer_bytes_list = sorted(list(customer_bytes_map.items()), key=lambda t: t[1], reverse=True)\n", + "\n", + "# Save our top 5 customers and sum all of the rest into \"Others\".\n", + "top_3_customers = [(\"Customer {}\".format(dict(val[0])[\"customer_id\"]), val[1]) for val in customer_bytes_list[:3]] + [(\"Other Customers\", sum([val[1] for val in customer_bytes_list[3:]]))]\n", + "\n", + "# unzip the data into X (sizes of each customer's contribution) and labels\n", + "labels, X = zip(*top_3_customers)\n", + "\n", + "# create the chart with matplotlib and show it\n", + "plt.pie(X)\n", + "plt.legend(labels, loc = \"upper right\") \n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Running this shows that the usage of our service is relatively closely split between customer 247, customer 123, and everyone else, which lines up closely with the data that was generated.\n", + "The more exemplars we sample, the more accurate this data will be, but also the more costly (in terms of memory usage) the metric would be.\n", + "\n", + "We can use the \"sample_count\" property of exemplars to predict the actual number of bytes customers sent (vs the percentage)\n", + "For example, to predict the number of bytes customer 123 sent:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": "Customer 123 sent about 1025206 bytes this interval\n" + } + ], + "source": [ + "# Estimate how many bytes customer 123 sent\n", + "customer_123_bytes = customer_bytes_map[((\"customer_id\", 123), (\"method\", \"REST\"))]\n", + "\n", + "# Since the exemplars were randomly sampled, all sample_counts will be the same\n", + "sample_count = exemplars[0].sample_count\n", + "full_customer_123_bytes = sample_count * customer_123_bytes\n", + "\n", + "# With seed == 1 we get 1025206 - quite close to the statistical mean of 1000000! (more exemplars would make this estimation even more accurate)\n", + "print(\"Customer 123 sent about {} bytes this interval\".format(int(full_customer_123_bytes)))\n" + ] + }, + { + "source": [ + "We could also estimate the percentage of our top 25 customers that use gRPC (another dropped label):" + ], + "cell_type": "markdown", + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": "~44% of the top 25 customers (by bytes in) used gRPC this interval\n" + } + ], + "source": [ + "# Determine the top 25 customers by how many bytes they sent in exemplars\n", + "top_25_customers = customer_bytes_list[:25]\n", + "\n", + "# out of those 25 customers, determine how many used grpc, and come up with a ratio\n", + "percent_grpc = len(list(filter(lambda customer_value: customer_value[0][1][1] == \"gRPC\", top_25_customers))) / len(top_25_customers)\n", + "\n", + "print(\"~{}% of the top 25 customers (by bytes in) used gRPC this interval\".format(int(percent_grpc*100)))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The value of exemplars goes beyond just handling dropped labels, however. We can also estimate the input distribution to the `bytes_counter` metric, through histograms or quantiles:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": "50th Percentile Bytes In: 1031\n90th Percentile Bytes In: 1624\n99th Percentile Bytes In: 6094\n" + } + ], + "source": [ + "# Determine the 50th, 90th, and 99th percentile of byte size sent in\n", + "quantiles = np.quantile([exemplar.value for exemplar in exemplars], [0.5, 0.9, 0.99])\n", + "print(\"50th Percentile Bytes In:\", int(quantiles[0]))\n", + "print(\"90th Percentile Bytes In:\", int(quantiles[1]))\n", + "print(\"99th Percentile Bytes In:\", int(quantiles[2]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is only a small subset of the things that can be done with exemplars - almost any statistic \n", + "that could be created through an aggregator on the original data can be estimated through exemplars." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "jupyter3_Python_3", + "language": "python", + "name": "jupyter3_python_3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/examples/exemplars/statistical_exemplars.py b/docs/examples/exemplars/statistical_exemplars.py new file mode 100644 index 00000000000..353e516cb4e --- /dev/null +++ b/docs/examples/exemplars/statistical_exemplars.py @@ -0,0 +1,132 @@ +import numpy as np +import matplotlib.pyplot as plt +import random + +from collections import defaultdict + +from opentelemetry import metrics +from opentelemetry.sdk.metrics import Counter, MeterProvider +from opentelemetry.sdk.metrics.export.aggregate import SumAggregator +from opentelemetry.sdk.metrics.export.controller import PushController +from opentelemetry.sdk.metrics.export.in_memory_metrics_exporter import InMemoryMetricsExporter +from opentelemetry.sdk.metrics.view import View, ViewConfig + +## set up opentelemetry + +# Sets the global MeterProvider instance +metrics.set_meter_provider(MeterProvider()) + +meter = metrics.get_meter(__name__) + +# Export to a python list so we can do stats with the data +exporter = InMemoryMetricsExporter() + +# instead of waiting for the controller to tick over time, we will just tick it ourselves +controller = PushController(meter, exporter, 500) + +# Create the metric that we will use +bytes_counter = meter.create_metric( + name="bytes_counter", + description="Number of bytes received by service", + unit="By", + value_type=int, + metric_type=Counter, +) + +# Every time interval we will collect 100 exemplars statistically (selected without bias) +aggregator_config = {"num_exemplars": 100, "statistical_exemplars": True} + +# Assign a Sum aggregator to `bytes_counter` that collects exemplars +counter_view = View( + bytes_counter, + SumAggregator, + aggregator_config=aggregator_config, + label_keys=["environment"], + view_config=ViewConfig.LABEL_KEYS, +) + +meter.register_view(counter_view) + +## generate the random metric data + +def unknown_customer_calls(): + """Generate customer call data to our application""" + + # set a random seed for consistency of data for example purposes + np.random.seed(1) + # Make exemplar selection consistent for example purposes + random.seed(1) + + # customer 123 is a big user, and made 1000 requests in this timeframe + requests = np.random.normal(1000, 250, 1000) # 1000 requests with average 1000 bytes, covariance 100 + + for request in requests: + bytes_counter.add(int(request), {"environment": "production", "method": "REST", "customer_id": 123}) + + # customer 247 is another big user, making fewer, but bigger requests + requests = np.random.normal(5000, 1250, 200) # 200 requests with average size of 5k bytes + + for request in requests: + bytes_counter.add(int(request), {"environment": "production", "method": "REST", "customer_id": 247}) + + # There are many other smaller customers + for customer_id in range(250): + requests = np.random.normal(1000, 250, np.random.randint(1, 10)) + method = "REST" if np.random.randint(2) else "gRPC" + for request in requests: + bytes_counter.add(int(request), {"environment": "production", "method": method, "customer_id": customer_id}) + +unknown_customer_calls() + +# Tick the controller so it sends metrics to the exporter +controller.tick() + +# collect metrics from our exporter +metric_data = exporter.get_exported_metrics() + +# get the exemplars from the bytes_in counter aggregator +aggregator = metric_data[0].aggregator +exemplars = aggregator.checkpoint_exemplars + +# Sum up the total bytes in per customer from all of the exemplars collected +customer_bytes_map = defaultdict(int) +for exemplar in exemplars: + customer_bytes_map[exemplar.dropped_labels] += exemplar.value + + +customer_bytes_list = sorted(list(customer_bytes_map.items()), key=lambda t: t[1], reverse=True) + +# Save our top 5 customers and sum all of the rest into "Others". +top_5_customers = [("Customer {}".format(dict(val[0])["customer_id"]), val[1]) for val in customer_bytes_list[:5]] + [("Other Customers", sum([val[1] for val in customer_bytes_list[5:]]))] + +# unzip the data into X (sizes of each customer's contribution) and labels +labels, X = zip(*top_5_customers) + +# create the chart with matplotlib and show it +plt.pie(X, labels=labels) +plt.show() + +# Estimate how many bytes customer 123 sent +customer_123_bytes = customer_bytes_map[(("customer_id", 123), ("method", "REST"))] + +# Since the exemplars were randomly sampled, all sample_counts will be the same +sample_count = exemplars[0].sample_count +print("sample count", sample_count, "custmer", customer_123_bytes) +full_customer_123_bytes = sample_count * customer_123_bytes + +# With seed == 1 we get 1008612 - quite close to the statistical mean of 1000000! (more exemplars would make this estimation even more accurate) +print("Customer 123 sent about {} bytes this interval".format(int(full_customer_123_bytes))) + +# Determine the top 25 customers by how many bytes they sent in exemplars +top_25_customers = customer_bytes_list[:25] + +# out of those 25 customers, determine how many used grpc, and come up with a ratio +percent_grpc = len(list(filter(lambda customer_value: customer_value[0][1][1] == "gRPC", top_25_customers))) / len(top_25_customers) + +print("~{}% of the top 25 customers (by bytes in) used gRPC this interval".format(int(percent_grpc*100))) + +# Determine the 50th, 90th, and 99th percentile of byte size sent in +quantiles = np.quantile([exemplar.value for exemplar in exemplars], [0.5, 0.9, 0.99]) +print("50th Percentile Bytes In:", int(quantiles[0])) +print("90th Percentile Bytes In:", int(quantiles[1])) +print("99th Percentile Bytes In:", int(quantiles[2])) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py index 16911f94efb..8abada0a3c7 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py @@ -77,11 +77,12 @@ def export( ) -> "MetricsExportResult": for record in metric_records: print( - '{}(data="{}", labels="{}", value={})'.format( + '{}(data="{}", labels="{}", value={}, exemplars={})'.format( type(self).__name__, record.instrument, record.labels, record.aggregator.checkpoint, + record.aggregator.checkpoint_exemplars ) ) return MetricsExportResult.SUCCESS diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py index 121f39a98b6..a0a8db346bc 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py @@ -16,8 +16,17 @@ import logging import threading from collections import OrderedDict, namedtuple +import itertools +from collections import namedtuple, OrderedDict from opentelemetry.util import time_ns +from opentelemetry.sdk.metrics.export.exemplars import ( + Exemplar, + RandomExemplarSampler, + MinMaxExemplarSampler, + BucketedExemplarSampler, + ExemplarManager +) logger = logging.getLogger(__name__) @@ -36,9 +45,10 @@ def __init__(self, config=None): self.config = config else: self.config = {} + self.checkpoint_exemplars = list() @abc.abstractmethod - def update(self, value): + def update(self, value, dropped_labels=None): """Updates the current with the new value.""" @abc.abstractmethod @@ -59,15 +69,19 @@ def __init__(self, config=None): self.checkpoint = 0 self._lock = threading.Lock() self.last_update_timestamp = None + self.exemplar_manager = ExemplarManager(config, MinMaxExemplarSampler, RandomExemplarSampler) - def update(self, value): + def update(self, value, dropped_labels=None): with self._lock: self.current += value self.last_update_timestamp = time_ns() + self.exemplar_manager.sample(value, dropped_labels) + def take_checkpoint(self): with self._lock: self.checkpoint = self.current + self.checkpoint_exemplars = self.exemplar_manager.take_checkpoint() self.current = 0 def merge(self, other): @@ -77,6 +91,7 @@ def merge(self, other): self.last_update_timestamp = get_latest_timestamp( self.last_update_timestamp, other.last_update_timestamp ) + self.checkpoint_exemplars = self.exemplar_manager.merge(self.checkpoint_exemplars, other.checkpoint_exemplars) class MinMaxSumCountAggregator(Aggregator): @@ -105,8 +120,11 @@ def __init__(self, config=None): self._lock = threading.Lock() self.last_update_timestamp = None - def update(self, value): + self.exemplar_manager = ExemplarManager(config, MinMaxExemplarSampler, RandomExemplarSampler) + + def update(self, value, dropped_labels=None): with self._lock: + if self.current is self._EMPTY: self.current = self._TYPE(value, value, value, 1) else: @@ -118,9 +136,12 @@ def update(self, value): ) self.last_update_timestamp = time_ns() + self.exemplar_manager.sample(value, dropped_labels) + def take_checkpoint(self): with self._lock: self.checkpoint = self.current + self.checkpoint_exemplars = self.exemplar_manager.take_checkpoint() self.current = self._EMPTY def merge(self, other): @@ -132,6 +153,7 @@ def merge(self, other): self.last_update_timestamp = get_latest_timestamp( self.last_update_timestamp, other.last_update_timestamp ) + self.checkpoint_exemplars = self.exemplar_manager.merge(self.checkpoint_exemplars, other.checkpoint_exemplars) class HistogramAggregator(Aggregator): @@ -151,6 +173,8 @@ def __init__(self, config=None): self.current = OrderedDict([(bb, 0) for bb in self._boundaries]) self.checkpoint = OrderedDict([(bb, 0) for bb in self._boundaries]) + self.exemplar_manager = ExemplarManager(config, BucketedExemplarSampler, BucketedExemplarSampler, boundaries=self._boundaries) + self.current[">"] = 0 self.checkpoint[">"] = 0 @@ -178,18 +202,21 @@ def _merge_checkpoint(cls, val1, val2): logger.warning("Cannot merge histograms with different buckets.") return val1 - def update(self, value): + def update(self, value, dropped_labels=None): with self._lock: if self.current is None: self.current = [0 for ii in range(len(self._boundaries) + 1)] # greater than max value if value >= self._boundaries[len(self._boundaries) - 1]: self.current[">"] += 1 + self.exemplar_manager.sample(value, dropped_labels, bucket_index=len(self._boundaries)) else: - for bb in self._boundaries: + for index, bb in enumerate(self._boundaries): # find first bucket that value is less than if value < bb: self.current[bb] += 1 + + self.exemplar_manager.sample(value, dropped_labels, bucket_index=index) break self.last_update_timestamp = time_ns() @@ -197,6 +224,9 @@ def take_checkpoint(self): with self._lock: self.checkpoint = self.current self.current = OrderedDict([(bb, 0) for bb in self._boundaries]) + + self.checkpoint_exemplars = self.exemplar_manager.take_checkpoint() + self.current[">"] = 0 def merge(self, other): @@ -205,6 +235,9 @@ def merge(self, other): self.checkpoint = self._merge_checkpoint( self.checkpoint, other.checkpoint ) + + self.checkpoint_exemplars = self.exemplar_manager.merge(self.checkpoint_exemplars, other.checkpoint_exemplars) + self.last_update_timestamp = get_latest_timestamp( self.last_update_timestamp, other.last_update_timestamp ) @@ -218,7 +251,7 @@ def __init__(self, config=None): self._lock = threading.Lock() self.last_update_timestamp = None - def update(self, value): + def update(self, value, dropped_labels=None): with self._lock: self.current = value self.last_update_timestamp = time_ns() @@ -245,19 +278,20 @@ class ValueObserverAggregator(Aggregator): def __init__(self, config=None): super().__init__(config=config) - self.mmsc = MinMaxSumCountAggregator() + self.mmsc = MinMaxSumCountAggregator(config=config) self.current = None self.checkpoint = self._TYPE(None, None, None, 0, None) self.last_update_timestamp = None - def update(self, value): - self.mmsc.update(value) + def update(self, value, dropped_labels=None): + self.mmsc.update(value, dropped_labels=dropped_labels) self.current = value self.last_update_timestamp = time_ns() def take_checkpoint(self): self.mmsc.take_checkpoint() self.checkpoint = self._TYPE(*(self.mmsc.checkpoint + (self.current,))) + self.checkpoint_exemplars = self.mmsc.checkpoint_exemplars def merge(self, other): if verify_type(self, other): @@ -269,6 +303,7 @@ def merge(self, other): if self.last_update_timestamp == other.last_update_timestamp: last = other.checkpoint.last self.checkpoint = self._TYPE(*(self.mmsc.checkpoint + (last,))) + self.checkpoint_exemplars = self.mmsc.checkpoint_exemplars def get_latest_timestamp(time_stamp, other_timestamp): diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py new file mode 100644 index 00000000000..9fc74cebe57 --- /dev/null +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py @@ -0,0 +1,277 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + Exemplars are sample data points for aggregators. For more information, see the `spec `_ + + Every synchronous aggregator is instrumented with two exemplar recorders: + 1. A "semantic" exemplar sampler, which only samples exemplars if they have a sampled trace context (and can pick exemplars with other biases, ie min + max). + 2. A "statistical" exemplar sampler, which samples exemplars without bias (ie no preferenced for traced exemplars) + + To use an exemplar recorder, pass in two arguments to the aggregator config in views (see the "Exemplars" example for an example): + "num_exemplars": The number of exemplars to record (if applicable, in each bucket). Note that in non-statistical mode the recorder may not use "num_exemplars" + "statistical_exemplars": If exemplars should be recorded statistically + + For exemplars to be recorded, `num_exemplars` must be greater than 0. +""" + +import abc +import random +import itertools + +from opentelemetry.context import get_current +from opentelemetry.util import time_ns + +class Exemplar: + """ + A sample data point for an aggregator. Exemplars represent individual measurements recorded. + """ + def __init__(self, value, timestamp, dropped_labels=None, span_id=None, trace_id=None, sample_count=None): + self._value = value + self._timestamp = timestamp + self._span_id = span_id + self._trace_id = trace_id + self._sample_count = sample_count + self._dropped_labels = dropped_labels + + def __repr__(self): + return f"Exemplar(value={self._value}, timestamp={self._timestamp}, labels={dict(self._dropped_labels) if self._dropped_labels else None}, context={{'span_id':{self._span_id}, 'trace_id':{self._trace_id}}})" + + @property + def value(self): + """The current value of the Exemplar point""" + return self._value + + @property + def timestamp(self): + """The time that this Exemplar's value was recorded""" + return self._timestamp + + @property + def span_id(self): + """The span ID of the context when the exemplar was recorded""" + return self._span_id + + @property + def trace_id(self): + """The trace ID of the context when the exemplar was recorded""" + return self._trace_id + + @property + def dropped_labels(self): + """Labels that were dropped by the aggregator but still passed by the user""" + return self._dropped_labels + + @property + def sample_count(self): + """For statistical exemplars, how many measurements a single exemplar represents""" + return self._sample_count + + def set_sample_count(self, count): + self._sample_count = count + +class ExemplarSampler: + """ + Abstract class to sample exemplars through a stream of incoming measurements + """ + def __init__(self, k, statistical=False): + self._k = k + self._statistical = statistical + self._sample_set = list() + + @abc.abstractmethod + def sample(self, exemplar, **kwargs): + """ + Given an exemplar, determine if it should be sampled or not + """ + pass + + @property + @abc.abstractmethod + def sample_set(self): + """ + Return the list of exemplars that have been sampled + """ + pass + + @abc.abstractmethod + def merge(self, set1, set2): + """ + Given two lists of sampled exemplars, merge them while maintaining the invariants specified by this sampler + """ + pass + + @abc.abstractmethod + def reset(self): + """ + Reset the sampler + """ + pass + +class RandomExemplarSampler(ExemplarSampler): + """ + Randomly sample a set of k exemplars from a stream. Each measurement in the stream + will have an equal chance of being sampled. + + If RandomExemplarSampler` is specified to be statistical, it will add a sample_count to every exemplar it records. + This value will be equal to the number of measurements recorded per every exemplar measured - all exemplars will have the same sample_count value. + """ + def __init__(self, k, statistical=False): + super().__init__(k, statistical=statistical) + self.rand_count = 0 + + def sample(self, exemplar, **kwargs): + self.rand_count += 1 + + if len(self.sample_set) < self._k: + self.sample_set.append(exemplar) + return + + j = random.randint(0, self.rand_count-1) + + if j < self._k: + self.sample_set[j] = exemplar + + def merge(self, set1, set2): + combined = set1 + set2 + if len(combined) <= self._k: + return combined + else: + return random.sample(combined, self._k) + + @property + def sample_set(self): + if self._statistical: + for exemplar in self._sample_set: + exemplar.set_sample_count(self.rand_count / len(self._sample_set)) + return self._sample_set + + def reset(self): + self._sample_set = [] + self.rand_count = 0 + +class MinMaxExemplarSampler(ExemplarSampler): + """ + Sample the minimum and maximum measurements recorded only + """ + def __init__(self, k, statistical=False): + # K will always be 2 (min and max), and selecting min and max can never be statistical + super().__init__(2, statistical=False) + self._sample_set = [] + + def sample(self, exemplar, **kwargs): + self._sample_set = [min(self._sample_set + [exemplar], key=lambda exemplar: exemplar.value), max(self._sample_set + [exemplar], key=lambda exemplar: exemplar.value)] + if self._sample_set[0] == self._sample_set[1]: + self._sample_set = [self._sample_set[0]] + + @property + def sample_set(self): + return self._sample_set + + def merge(self, set1, set2): + merged_set = set1 + set2 + if len(merged_set) <= 2: + return sorted(merged_set, key=lambda exemplar: exemplar.value) + + return [min(merged_set), max(merged_set)] + + def reset(self): + self._sample_set = [] + +class BucketedExemplarSampler(ExemplarSampler): + """ + Randomly sample k exemplars for each bucket in the aggregator. + + If `BucketedExemplarSampler` is specified to be statistical, it will add a sample_count to every exemplar it records. + This value will be equal to `len(bucket.exemplars) / bucket.count`, that is the number of measurements each exemplar represents. + """ + def __init__(self, k, statistical=False, boundaries=None): + super().__init__(k) + self._boundaries = boundaries + self._sample_set = [RandomExemplarSampler(k, statistical=statistical) for _ in range(len(self._boundaries) + 1)] + + def sample(self, exemplar, **kwargs): + bucket_index = kwargs.get("bucket_index") + if bucket_index is None: + return + + self._sample_set[bucket_index].sample(exemplar) + + @property + def sample_set(self): + return list(itertools.chain.from_iterable([sampler.sample_set for sampler in self._sample_set])) + + def merge(self, set1, set2): + exemplar_set = [list() for _ in range(len(self._boundaries) + 1)] + for setx in [set1, set2]: + bucket_idx = 0 + for exemplar in setx: + if exemplar.value >= self._boundaries[-1]: + exemplar_set[-1].append(exemplar) + continue + + while exemplar.value >= self._boundaries[bucket_idx]: + bucket_idx += 1 + exemplar_set[bucket_idx].append(exemplar) + + for i, inner_set in enumerate(exemplar_set): + if len(inner_set) > self._k: + exemplar_set[i] = random.sample(inner_set, self._k) + return list(itertools.chain.from_iterable(exemplar_set)) + + def reset(self): + for sampler in self._sample_set: + sampler.reset() + +class ExemplarManager: + """ + Manages two different exemplar samplers: + 1. A "semantic" exemplar sampler, which only samples exemplars if they have a sampled trace context. + 2. A "statistical" exemplar sampler, which samples exemplars without bias (ie no preferenced for traced exemplars) + """ + + def __init__(self, config, default_exemplar_sampler, statistical_exemplar_sampler=None, **kwargs): + if config: + self.exemplars_count = config.get('num_exemplars', 0) + self.record_exemplars = self.exemplars_count > 0 + self.statistical_exemplars = config.get('statistical_exemplars', False) + if self.statistical_exemplars and statistical_exemplar_sampler: + self.exemplar_sampler = statistical_exemplar_sampler(self.exemplars_count, statistical=self.statistical_exemplars, **kwargs) + else: + self.exemplar_sampler = default_exemplar_sampler(self.exemplars_count, statistical=self.statistical_exemplars, **kwargs) + else: + self.record_exemplars = False + + def sample(self, value, dropped_labels, **kwargs): + context = get_current() + + is_sampled = 'current-span' in context and context['current-span'].get_context().trace_flags.sampled if context else False + + # if not statistical, we want to gather traced exemplars only - so otherwise don't sample + if self.record_exemplars and (is_sampled or self.statistical_exemplars): + span_id = context['current-span'].context.span_id if context else None + trace_id = context['current-span'].context.trace_id if context else None + self.exemplar_sampler.sample(Exemplar(value, time_ns(), dropped_labels, span_id, trace_id), **kwargs) + + def take_checkpoint(self): + if self.record_exemplars: + ret = self.exemplar_sampler.sample_set + self.exemplar_sampler.reset() + return ret + return [] + + def merge(self, checkpoint_exemplars, other_checkpoint_exemplars): + if self.record_exemplars: + return self.exemplar_sampler.merge(checkpoint_exemplars, other_checkpoint_exemplars) + return [] diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/view.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/view.py index 0dd75c6887b..2f85c95573c 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/view.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/view.py @@ -39,12 +39,13 @@ class ViewData: - def __init__(self, labels: Tuple[Tuple[str, str]], aggregator: Aggregator): + def __init__(self, labels: Tuple[Tuple[str, str]], aggregator: Aggregator, dropped_labels: Tuple[Tuple[str, str]] = None): self.labels = labels self.aggregator = aggregator + self.dropped_labels = dropped_labels def record(self, value: ValueT): - self.aggregator.update(value) + self.aggregator.update(value, dropped_labels=self.dropped_labels) # Uniqueness is based on labels and aggregator type def __hash__(self): diff --git a/opentelemetry-sdk/tests/metrics/export/test_exemplars.py b/opentelemetry-sdk/tests/metrics/export/test_exemplars.py new file mode 100644 index 00000000000..95412d2b8e9 --- /dev/null +++ b/opentelemetry-sdk/tests/metrics/export/test_exemplars.py @@ -0,0 +1,460 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from unittest.mock import patch +from time import time + +from opentelemetry.sdk.metrics.export.aggregate import ( + SumAggregator, + MinMaxSumCountAggregator, + HistogramAggregator, + Exemplar, + RandomExemplarSampler, + MinMaxExemplarSampler, + BucketedExemplarSampler, + ExemplarManager, + ValueObserverAggregator +) +from opentelemetry.sdk.metrics import ( + MeterProvider, + ValueRecorder, +) +from opentelemetry import trace, metrics +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.trace.sampling import ALWAYS_OFF, ALWAYS_ON + +from opentelemetry.sdk.metrics.export.in_memory_metrics_exporter import InMemoryMetricsExporter +from opentelemetry.sdk.metrics.view import View, ViewConfig +from opentelemetry.sdk.metrics.export.controller import PushController + +class TestRandomExemplarSampler(unittest.TestCase): + def test_sample(self): + sampler = RandomExemplarSampler(2, statistical=True) + exemplar1 = Exemplar(1, time()) + exemplar2 = Exemplar(2, time()) + exemplar3 = Exemplar(3, time()) + + sampler.sample(exemplar1) + self.assertEqual(len(sampler.sample_set), 1) + self.assertEqual(sampler.sample_set[0], exemplar1) + self.assertEqual(exemplar1.sample_count, 1) + + sampler.sample(exemplar2) + self.assertEqual(len(sampler.sample_set), 2) + self.assertEqual(sampler.sample_set[1], exemplar2) + self.assertEqual(exemplar1.sample_count, 1) + self.assertEqual(exemplar2.sample_count, 1) + + def _patched_randint(mn, mx): + return mn + + with patch("random.randint", _patched_randint): + sampler.sample(exemplar3) + self.assertEqual(len(sampler.sample_set), 2) + self.assertEqual(sampler.sample_set[0], exemplar3) + self.assertEqual(exemplar3.sample_count, 1.5) + self.assertEqual(exemplar2.sample_count, 1.5) + + def _patched_randint(mn, mx): + return 1 + + with patch("random.randint", _patched_randint): + sampler.sample(exemplar1) + self.assertEqual(len(sampler.sample_set), 2) + self.assertEqual(sampler.sample_set[1], exemplar1) + self.assertEqual(exemplar1.sample_count, 2) + + def test_reset(self): + sampler = RandomExemplarSampler(2) + exemplar1 = Exemplar(1, time()) + exemplar2 = Exemplar(2, time()) + + sampler.sample(exemplar1) + sampler.sample(exemplar2) + + sampler.reset() + self.assertEqual(len(sampler.sample_set), 0) + + sampler.sample(exemplar1) + self.assertEqual(len(sampler.sample_set), 1) + + def test_merge(self): + set1 = [1, 2, 3] + set2 = [4, 5, 6] + sampler = RandomExemplarSampler(6) + self.assertEqual(set1+set2, sampler.merge(set1, set2)) + sampler = RandomExemplarSampler(8) + self.assertEqual(set1+set2, sampler.merge(set1, set2)) + sampler = RandomExemplarSampler(4) + self.assertEqual(4, len(sampler.merge(set1, set2))) + + +class TestMinMaxExemplarSampler(unittest.TestCase): + def test_sample(self): + sampler = MinMaxExemplarSampler(2) + exemplar1 = Exemplar(1, time()) + exemplar2 = Exemplar(2, time()) + exemplar3 = Exemplar(3, time()) + + sampler.sample(exemplar1) + self.assertEqual(len(sampler.sample_set), 1) + self.assertEqual(sampler.sample_set[0], exemplar1) + + sampler.sample(exemplar2) + self.assertEqual(len(sampler.sample_set), 2) + self.assertEqual(sampler.sample_set[0], exemplar1) + self.assertEqual(sampler.sample_set[1], exemplar2) + + sampler.sample(exemplar3) + self.assertEqual(len(sampler.sample_set), 2) + self.assertEqual(sampler.sample_set[0], exemplar1) + self.assertEqual(sampler.sample_set[1], exemplar3) + + def test_reset(self): + sampler = MinMaxExemplarSampler(2) + exemplar1 = Exemplar(1, time()) + exemplar2 = Exemplar(2, time()) + + sampler.sample(exemplar1) + sampler.sample(exemplar2) + + sampler.reset() + self.assertEqual(len(sampler.sample_set), 0) + + sampler.sample(exemplar1) + self.assertEqual(len(sampler.sample_set), 1) + + def test_merge(self): + set1 = [1, 2, 3] + set2 = [4, 5, 6] + sampler = MinMaxExemplarSampler(2) + self.assertEqual([1, 6], sampler.merge(set1, set2)) + + +class TestBucketedExemplarSampler(unittest.TestCase): + def test_exemplars(self): + sampler = BucketedExemplarSampler(1, boundaries=[2, 4, 7], statistical=True) + sampler.sample(Exemplar(3, time()), bucket_index=1) + self.assertEqual(len(sampler.sample_set), 1) + self.assertEqual(sampler.sample_set[0].value, 3) + + sampler.sample(Exemplar(5, time()), bucket_index=2) + + self.assertEqual(len(sampler.sample_set), 2) + self.assertEqual(sampler.sample_set[1].value, 5) + self.assertEqual(sampler.sample_set[1].sample_count, 1) + + def _patched_randint(mn, mx): + return 0 + + with patch("random.randint", _patched_randint): + sampler.sample(Exemplar(6, time()), bucket_index=2) + + self.assertEqual(len(sampler.sample_set), 2) + self.assertEqual(sampler.sample_set[1].value, 6) + self.assertEqual(sampler.sample_set[1].sample_count, 2) + + sampler.sample(Exemplar(1, time()), bucket_index=0) + sampler.sample(Exemplar(9, time()), bucket_index=3) + + self.assertEqual(len(sampler.sample_set), 4) + self.assertEqual(sampler.sample_set[0].sample_count, 1) + self.assertEqual(sampler.sample_set[1].sample_count, 1) + self.assertEqual(sampler.sample_set[2].sample_count, 2) + self.assertEqual(sampler.sample_set[3].sample_count, 1) + + def test_merge(self): + sampler = BucketedExemplarSampler(1, boundaries=[3, 4, 6]) + + self.assertEqual(len(sampler.merge([Exemplar(1, time())], [Exemplar(2, time())])), 1) + + self.assertEqual(len(sampler.merge([Exemplar(1, time()), Exemplar(5, time())], [Exemplar(2, time())])), 2) + + +class TestExemplarManager(unittest.TestCase): + def test_statistical(self): + config = {"statistical_exemplars": True, "num_exemplars": 1} + manager = ExemplarManager(config, MinMaxExemplarSampler, RandomExemplarSampler) + self.assertIsInstance(manager.exemplar_sampler, RandomExemplarSampler) + manager.sample(5, {"dropped_label": "value"}) + self.assertEqual(len(manager.exemplar_sampler.sample_set), 1) + self.assertEqual(manager.exemplar_sampler.sample_set[0].value, 5) + self.assertEqual(manager.exemplar_sampler.sample_set[0].dropped_labels, {"dropped_label": "value"}) + + checkpoint = manager.take_checkpoint() + self.assertEqual(len(checkpoint), 1) + self.assertEqual(checkpoint[0].value, 5) + + self.assertEqual(len(manager.exemplar_sampler.sample_set), 0) + + merged = manager.merge([Exemplar(2, time())], [Exemplar(3, time())]) + self.assertEqual(len(merged), 1) + + def test_semantic(self): + config = {"statistical_exemplars": True, "num_exemplars": 1} + manager = ExemplarManager(config, MinMaxExemplarSampler, RandomExemplarSampler) + self.assertIsInstance(manager.exemplar_sampler, RandomExemplarSampler) + manager.sample(5, {}) + self.assertEqual(len(manager.exemplar_sampler.sample_set), 1) + self.assertEqual(manager.exemplar_sampler.sample_set[0].value, 5) + + checkpoint = manager.take_checkpoint() + self.assertEqual(len(checkpoint), 1) + self.assertEqual(checkpoint[0].value, 5) + + self.assertEqual(len(manager.exemplar_sampler.sample_set), 0) + + merged = manager.merge([Exemplar(2, time())], [Exemplar(3, time())]) + self.assertEqual(len(merged), 1) + + +class TestStandardExemplars(unittest.TestCase): + def _no_exemplars_test(self, aggregator): + config = {} + agg = aggregator(config=config) + agg.update(3) + agg.update(5) + agg.take_checkpoint() + self.assertEqual(agg.checkpoint_exemplars, []) + + other_agg = aggregator(config={"num_exemplars": 2, "statistical_exemplars": True}) + other_agg.update(2) + other_agg.update(4) + other_agg.take_checkpoint() + self.assertEqual(len(other_agg.checkpoint_exemplars), 2) + agg.merge(other_agg) + self.assertEqual(agg.checkpoint_exemplars, []) + + def _simple_exemplars_test(self, aggregator): + config = {"num_exemplars": 2, "statistical_exemplars": True} + agg = aggregator(config=config) + agg.update(2, dropped_labels={"dropped_label": "value"}) + agg.take_checkpoint() + self.assertEqual(len(agg.checkpoint_exemplars), 1) + self.assertEqual(agg.checkpoint_exemplars[0].value, 2) + self.assertEqual(agg.checkpoint_exemplars[0].dropped_labels, {"dropped_label": "value"}) + + agg.update(2) + agg.update(5) + agg.take_checkpoint() + self.assertEqual(len(agg.checkpoint_exemplars), 2) + self.assertEqual(agg.checkpoint_exemplars[1].value, 5) + + agg.update(2) + agg.update(5) + + def _patched_randint(mn, mx): + return 1 + with patch("random.randint", _patched_randint): + agg.update(7) + + agg.take_checkpoint() + self.assertEqual(len(agg.checkpoint_exemplars), 2) + self.assertEqual(agg.checkpoint_exemplars[0].value, 2) + self.assertEqual(agg.checkpoint_exemplars[1].value, 7) + + def _record_traces_only_test(self, aggregator): + config = {"num_exemplars": 2} + agg = aggregator(config=config) + + agg.update(2) + agg.take_checkpoint() + self.assertEqual(len(agg.checkpoint_exemplars), 0) + + # Test with sampler on/off + tp = TracerProvider(sampler=ALWAYS_ON) + tracer = tp.get_tracer(__name__) + + span = tracer.start_span("Test Span ON") + with tracer.use_span(span): + agg.update(5) + agg.update(7) + agg.update(6) + + agg.take_checkpoint() + self.assertEqual(len(agg.checkpoint_exemplars), 2) + + self.assertEqual(agg.checkpoint_exemplars[0].span_id, span.context.span_id) + self.assertEqual(agg.checkpoint_exemplars[0].value, 5) + self.assertEqual(agg.checkpoint_exemplars[1].value, 7) + + tp = TracerProvider(sampler=ALWAYS_OFF) + tracer = tp.get_tracer(__name__) + + with tracer.start_as_current_span("Test Span OFF"): + agg.update(5) + + agg.take_checkpoint() + self.assertEqual(len(agg.checkpoint_exemplars), 0) + + def _merge_aggregators_test(self, aggregator): + config = {"num_exemplars": 2, "statistical_exemplars": True} + + agg1 = aggregator(config=config) + agg2 = aggregator(config=config) + + agg1.update(1) + agg1.take_checkpoint() + + agg2.update(2) + agg2.take_checkpoint() + + self.assertEqual(len(agg1.checkpoint_exemplars), 1) + self.assertEqual(len(agg2.checkpoint_exemplars), 1) + + agg1.merge(agg2) + + self.assertEqual(len(agg1.checkpoint_exemplars), 2) + + def test_sum_aggregator(self): + self._no_exemplars_test(SumAggregator) + self._simple_exemplars_test(SumAggregator) + self._record_traces_only_test(SumAggregator) + self._merge_aggregators_test(SumAggregator) + + def test_mmsc_aggregator(self): + self._no_exemplars_test(MinMaxSumCountAggregator) + self._simple_exemplars_test(MinMaxSumCountAggregator) + self._record_traces_only_test(MinMaxSumCountAggregator) + self._merge_aggregators_test(MinMaxSumCountAggregator) + + def test_observer_aggregator(self): + self._no_exemplars_test(ValueObserverAggregator) + self._simple_exemplars_test(ValueObserverAggregator) + self._record_traces_only_test(ValueObserverAggregator) + self._merge_aggregators_test(ValueObserverAggregator) + + +class TestHistogramExemplars(unittest.TestCase): + def test_no_exemplars(self): + config = {"bounds": [2, 4, 6]} + agg = HistogramAggregator(config=config) + agg.update(3) + agg.update(5) + agg.take_checkpoint() + self.assertEqual(agg.checkpoint_exemplars, []) + + other_agg = HistogramAggregator(config=dict(config, **{"num_exemplars": 1, "statistical_exemplars": True})) + + other_agg.update(3) + other_agg.update(5) + other_agg.take_checkpoint() + self.assertEqual(len(other_agg.checkpoint_exemplars), 2) + + agg.merge(other_agg) + self.assertEqual(agg.checkpoint_exemplars, []) + + def test_simple_exemplars(self): + config = {"bounds": [2, 4, 7], "num_exemplars": 1, "statistical_exemplars": True} + agg = HistogramAggregator(config=config) + agg.update(2, dropped_labels={"dropped_label": "value"}) + agg.take_checkpoint() + self.assertEqual(len(agg.checkpoint_exemplars), 1) + self.assertEqual(agg.checkpoint_exemplars[0].value, 2) + self.assertEqual(agg.checkpoint_exemplars[0].dropped_labels, {"dropped_label": "value"}) + + agg.update(2) + agg.update(5) + agg.take_checkpoint() + self.assertEqual(len(agg.checkpoint_exemplars), 2) + self.assertEqual(agg.checkpoint_exemplars[1].value, 5) + + agg.update(5) + + def _patched_randint(mn, mx): + return 0 + + with patch("random.randint", _patched_randint): + agg.update(6) + + agg.take_checkpoint() + self.assertEqual(len(agg.checkpoint_exemplars), 1) + self.assertEqual(agg.checkpoint_exemplars[0].value, 6) + + agg.update(1) + agg.update(3) + agg.update(6) + agg.update(9) + agg.take_checkpoint() + self.assertEqual(len(agg.checkpoint_exemplars), 4) + + def test_record_traces_only(self): + config = {"bounds": [2, 4, 6], "num_exemplars": 2, "statistical_exemplars": False} + agg = HistogramAggregator(config=config) + + agg.update(2) + agg.take_checkpoint() + self.assertEqual(len(agg.checkpoint_exemplars), 0) + + # Test with sampler on/off + tp = TracerProvider(sampler=ALWAYS_ON) + tracer = tp.get_tracer(__name__) + + span = tracer.start_span("Test Span ON") + with tracer.use_span(span): + agg.update(5) + + agg.take_checkpoint() + self.assertEqual(len(agg.checkpoint_exemplars), 1) + + self.assertEqual(agg.checkpoint_exemplars[0].span_id, span.context.span_id) + + tp = TracerProvider(sampler=ALWAYS_OFF) + tracer = tp.get_tracer(__name__) + + with tracer.start_as_current_span("Test Span OFF"): + agg.update(5) + + agg.take_checkpoint() + self.assertEqual(len(agg.checkpoint_exemplars), 0) + +class TestFullPipelineExemplars(unittest.TestCase): + def test_histogram(self): + # Use the meter type provided by the SDK package + metrics.set_meter_provider(MeterProvider()) + meter = metrics.get_meter(__name__) + exporter = InMemoryMetricsExporter() + controller = PushController(meter, exporter, 5) + + requests_size = meter.create_metric( + name="requests_size", + description="size of requests", + unit="1", + value_type=int, + metric_type=ValueRecorder, + ) + + size_view = View( + requests_size, + HistogramAggregator(config={"bounds": [20, 40, 60, 80, 100], "num_exemplars": 1, "statistical_exemplars": True}), + label_keys=["environment"], + config=ViewConfig.LABEL_KEYS, + ) + + meter.register_view(size_view) + + # Since this is using the HistogramAggregator, the bucket counts will be reflected + # with each record + requests_size.record(25, {"environment": "staging", "test": "value"}) + requests_size.record(1, {"environment": "staging", "test": "value2"}) + requests_size.record(200, {"environment": "staging", "test": "value3"}) + + controller.tick() + metrics_list = exporter.get_exported_metrics() + self.assertEqual(len(metrics_list), 1) + exemplars = metrics_list[0].aggregator.checkpoint_exemplars + self.assertEqual(len(exemplars), 3) + self.assertEqual([(exemplar.value, exemplar.dropped_labels) for exemplar in exemplars], + [(1, (("test", "value2"),)), (25, (("test", "value"),)), (200, (("test", "value3"),))]) From c2727b4ee65089498832eba533baaea6a89fca63 Mon Sep 17 00:00:00 2001 From: Connor Adams Date: Thu, 23 Jul 2020 17:11:18 -0400 Subject: [PATCH 2/6] linting --- docs/examples/exemplars/semantic_exemplars.py | 38 +++-- .../exemplars/statistical_exemplars.py | 90 ++++++++--- .../src/opentelemetry/sdk/metrics/__init__.py | 2 +- .../sdk/metrics/export/__init__.py | 2 +- .../sdk/metrics/export/aggregate.py | 46 ++++-- .../sdk/metrics/export/exemplars.py | 142 ++++++++++++----- .../src/opentelemetry/sdk/metrics/view.py | 15 +- .../tests/metrics/export/test_exemplars.py | 147 +++++++++++++----- .../tests/metrics/export/test_export.py | 2 +- .../tests/metrics/test_metrics.py | 4 +- 10 files changed, 358 insertions(+), 130 deletions(-) diff --git a/docs/examples/exemplars/semantic_exemplars.py b/docs/examples/exemplars/semantic_exemplars.py index 5d14dd3bea5..bf80ede1f6b 100644 --- a/docs/examples/exemplars/semantic_exemplars.py +++ b/docs/examples/exemplars/semantic_exemplars.py @@ -20,14 +20,9 @@ import time from opentelemetry import metrics -from opentelemetry.sdk.metrics import ( - MeterProvider, - ValueRecorder, -) +from opentelemetry.sdk.metrics import MeterProvider, ValueRecorder from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter -from opentelemetry.sdk.metrics.export.aggregate import ( - HistogramAggregator, -) +from opentelemetry.sdk.metrics.export.aggregate import HistogramAggregator from opentelemetry.sdk.metrics.view import View, ViewConfig # Set up OpenTelemetry metrics @@ -35,7 +30,9 @@ meter = metrics.get_meter(__name__) # Use the Google Cloud Monitoring Metrics Exporter since its the only one that currently supports exemplars -metrics.get_meter_provider().start_pipeline(meter, ConsoleMetricsExporter(), 10) +metrics.get_meter_provider().start_pipeline( + meter, ConsoleMetricsExporter(), 10 +) # Create our duration metric request_duration = meter.create_metric( @@ -54,8 +51,24 @@ # We want to generate 1 exemplar per bucket, where each exemplar has a linked trace that was recorded. # So we need to set num_exemplars to 1 and not specify statistical_exemplars (defaults to false) HistogramAggregator, - aggregator_config={"bounds": [0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000], - "num_exemplars": 1}, + aggregator_config={ + "bounds": [ + 0, + 25, + 50, + 75, + 100, + 200, + 400, + 600, + 800, + 1000, + 2000, + 4000, + 6000, + ], + "num_exemplars": 1, + }, label_keys=["environment"], view_config=ViewConfig.LABEL_KEYS, ) @@ -64,5 +77,8 @@ for i in range(100): # Generate some random data for the histogram with a dropped label "customer_id" - request_duration.record(random.randint(1, 8000), {"environment": "staging", "customer_id": random.randint(1, 100)}) + request_duration.record( + random.randint(1, 8000), + {"environment": "staging", "customer_id": random.randint(1, 100)}, + ) time.sleep(1) diff --git a/docs/examples/exemplars/statistical_exemplars.py b/docs/examples/exemplars/statistical_exemplars.py index 353e516cb4e..25fd29e82ba 100644 --- a/docs/examples/exemplars/statistical_exemplars.py +++ b/docs/examples/exemplars/statistical_exemplars.py @@ -1,17 +1,18 @@ -import numpy as np -import matplotlib.pyplot as plt import random - from collections import defaultdict +import matplotlib.pyplot as plt +import numpy as np from opentelemetry import metrics from opentelemetry.sdk.metrics import Counter, MeterProvider from opentelemetry.sdk.metrics.export.aggregate import SumAggregator from opentelemetry.sdk.metrics.export.controller import PushController -from opentelemetry.sdk.metrics.export.in_memory_metrics_exporter import InMemoryMetricsExporter +from opentelemetry.sdk.metrics.export.in_memory_metrics_exporter import ( + InMemoryMetricsExporter, +) from opentelemetry.sdk.metrics.view import View, ViewConfig -## set up opentelemetry +# set up opentelemetry # Sets the global MeterProvider instance metrics.set_meter_provider(MeterProvider()) @@ -47,7 +48,8 @@ meter.register_view(counter_view) -## generate the random metric data +# generate the random metric data + def unknown_customer_calls(): """Generate customer call data to our application""" @@ -58,23 +60,49 @@ def unknown_customer_calls(): random.seed(1) # customer 123 is a big user, and made 1000 requests in this timeframe - requests = np.random.normal(1000, 250, 1000) # 1000 requests with average 1000 bytes, covariance 100 + requests = np.random.normal( + 1000, 250, 1000 + ) # 1000 requests with average 1000 bytes, covariance 100 for request in requests: - bytes_counter.add(int(request), {"environment": "production", "method": "REST", "customer_id": 123}) + bytes_counter.add( + int(request), + { + "environment": "production", + "method": "REST", + "customer_id": 123, + }, + ) # customer 247 is another big user, making fewer, but bigger requests - requests = np.random.normal(5000, 1250, 200) # 200 requests with average size of 5k bytes + requests = np.random.normal( + 5000, 1250, 200 + ) # 200 requests with average size of 5k bytes for request in requests: - bytes_counter.add(int(request), {"environment": "production", "method": "REST", "customer_id": 247}) + bytes_counter.add( + int(request), + { + "environment": "production", + "method": "REST", + "customer_id": 247, + }, + ) # There are many other smaller customers for customer_id in range(250): requests = np.random.normal(1000, 250, np.random.randint(1, 10)) method = "REST" if np.random.randint(2) else "gRPC" for request in requests: - bytes_counter.add(int(request), {"environment": "production", "method": method, "customer_id": customer_id}) + bytes_counter.add( + int(request), + { + "environment": "production", + "method": method, + "customer_id": customer_id, + }, + ) + unknown_customer_calls() @@ -94,10 +122,15 @@ def unknown_customer_calls(): customer_bytes_map[exemplar.dropped_labels] += exemplar.value -customer_bytes_list = sorted(list(customer_bytes_map.items()), key=lambda t: t[1], reverse=True) +customer_bytes_list = sorted( + list(customer_bytes_map.items()), key=lambda t: t[1], reverse=True +) # Save our top 5 customers and sum all of the rest into "Others". -top_5_customers = [("Customer {}".format(dict(val[0])["customer_id"]), val[1]) for val in customer_bytes_list[:5]] + [("Other Customers", sum([val[1] for val in customer_bytes_list[5:]]))] +top_5_customers = [ + ("Customer {}".format(dict(val[0])["customer_id"]), val[1]) + for val in customer_bytes_list[:5] +] + [("Other Customers", sum([val[1] for val in customer_bytes_list[5:]]))] # unzip the data into X (sizes of each customer's contribution) and labels labels, X = zip(*top_5_customers) @@ -107,7 +140,9 @@ def unknown_customer_calls(): plt.show() # Estimate how many bytes customer 123 sent -customer_123_bytes = customer_bytes_map[(("customer_id", 123), ("method", "REST"))] +customer_123_bytes = customer_bytes_map[ + (("customer_id", 123), ("method", "REST")) +] # Since the exemplars were randomly sampled, all sample_counts will be the same sample_count = exemplars[0].sample_count @@ -115,18 +150,35 @@ def unknown_customer_calls(): full_customer_123_bytes = sample_count * customer_123_bytes # With seed == 1 we get 1008612 - quite close to the statistical mean of 1000000! (more exemplars would make this estimation even more accurate) -print("Customer 123 sent about {} bytes this interval".format(int(full_customer_123_bytes))) +print( + "Customer 123 sent about {} bytes this interval".format( + int(full_customer_123_bytes) + ) +) # Determine the top 25 customers by how many bytes they sent in exemplars top_25_customers = customer_bytes_list[:25] # out of those 25 customers, determine how many used grpc, and come up with a ratio -percent_grpc = len(list(filter(lambda customer_value: customer_value[0][1][1] == "gRPC", top_25_customers))) / len(top_25_customers) - -print("~{}% of the top 25 customers (by bytes in) used gRPC this interval".format(int(percent_grpc*100))) +percent_grpc = len( + list( + filter( + lambda customer_value: customer_value[0][1][1] == "gRPC", + top_25_customers, + ) + ) +) / len(top_25_customers) + +print( + "~{}% of the top 25 customers (by bytes in) used gRPC this interval".format( + int(percent_grpc * 100) + ) +) # Determine the 50th, 90th, and 99th percentile of byte size sent in -quantiles = np.quantile([exemplar.value for exemplar in exemplars], [0.5, 0.9, 0.99]) +quantiles = np.quantile( + [exemplar.value for exemplar in exemplars], [0.5, 0.9, 0.99] +) print("50th Percentile Bytes In:", int(quantiles[0])) print("90th Percentile Bytes In:", int(quantiles[1])) print("99th Percentile Bytes In:", int(quantiles[2])) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py index 2af8a551ee1..9bad705b9c2 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py @@ -75,7 +75,7 @@ def update(self, value: metrics_api.ValueT): with self._view_datas_lock: # record the value for each view_data belonging to this aggregator for view_data in self.view_datas: - view_data.record(value) + view_data.record(value, self._labels) def release(self): self.decrease_ref_count() diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py index 8abada0a3c7..ddd08df13c8 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py @@ -82,7 +82,7 @@ def export( record.instrument, record.labels, record.aggregator.checkpoint, - record.aggregator.checkpoint_exemplars + record.aggregator.checkpoint_exemplars, ) ) return MetricsExportResult.SUCCESS diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py index a0a8db346bc..998ee23b358 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py @@ -16,17 +16,14 @@ import logging import threading from collections import OrderedDict, namedtuple -import itertools -from collections import namedtuple, OrderedDict -from opentelemetry.util import time_ns from opentelemetry.sdk.metrics.export.exemplars import ( - Exemplar, - RandomExemplarSampler, - MinMaxExemplarSampler, BucketedExemplarSampler, - ExemplarManager + ExemplarManager, + MinMaxExemplarSampler, + RandomExemplarSampler, ) +from opentelemetry.util import time_ns logger = logging.getLogger(__name__) @@ -69,7 +66,9 @@ def __init__(self, config=None): self.checkpoint = 0 self._lock = threading.Lock() self.last_update_timestamp = None - self.exemplar_manager = ExemplarManager(config, MinMaxExemplarSampler, RandomExemplarSampler) + self.exemplar_manager = ExemplarManager( + config, MinMaxExemplarSampler, RandomExemplarSampler + ) def update(self, value, dropped_labels=None): with self._lock: @@ -91,7 +90,9 @@ def merge(self, other): self.last_update_timestamp = get_latest_timestamp( self.last_update_timestamp, other.last_update_timestamp ) - self.checkpoint_exemplars = self.exemplar_manager.merge(self.checkpoint_exemplars, other.checkpoint_exemplars) + self.checkpoint_exemplars = self.exemplar_manager.merge( + self.checkpoint_exemplars, other.checkpoint_exemplars + ) class MinMaxSumCountAggregator(Aggregator): @@ -120,7 +121,9 @@ def __init__(self, config=None): self._lock = threading.Lock() self.last_update_timestamp = None - self.exemplar_manager = ExemplarManager(config, MinMaxExemplarSampler, RandomExemplarSampler) + self.exemplar_manager = ExemplarManager( + config, MinMaxExemplarSampler, RandomExemplarSampler + ) def update(self, value, dropped_labels=None): with self._lock: @@ -153,7 +156,9 @@ def merge(self, other): self.last_update_timestamp = get_latest_timestamp( self.last_update_timestamp, other.last_update_timestamp ) - self.checkpoint_exemplars = self.exemplar_manager.merge(self.checkpoint_exemplars, other.checkpoint_exemplars) + self.checkpoint_exemplars = self.exemplar_manager.merge( + self.checkpoint_exemplars, other.checkpoint_exemplars + ) class HistogramAggregator(Aggregator): @@ -173,7 +178,12 @@ def __init__(self, config=None): self.current = OrderedDict([(bb, 0) for bb in self._boundaries]) self.checkpoint = OrderedDict([(bb, 0) for bb in self._boundaries]) - self.exemplar_manager = ExemplarManager(config, BucketedExemplarSampler, BucketedExemplarSampler, boundaries=self._boundaries) + self.exemplar_manager = ExemplarManager( + config, + BucketedExemplarSampler, + BucketedExemplarSampler, + boundaries=self._boundaries, + ) self.current[">"] = 0 self.checkpoint[">"] = 0 @@ -209,14 +219,18 @@ def update(self, value, dropped_labels=None): # greater than max value if value >= self._boundaries[len(self._boundaries) - 1]: self.current[">"] += 1 - self.exemplar_manager.sample(value, dropped_labels, bucket_index=len(self._boundaries)) + self.exemplar_manager.sample( + value, dropped_labels, bucket_index=len(self._boundaries) + ) else: for index, bb in enumerate(self._boundaries): # find first bucket that value is less than if value < bb: self.current[bb] += 1 - self.exemplar_manager.sample(value, dropped_labels, bucket_index=index) + self.exemplar_manager.sample( + value, dropped_labels, bucket_index=index + ) break self.last_update_timestamp = time_ns() @@ -236,7 +250,9 @@ def merge(self, other): self.checkpoint, other.checkpoint ) - self.checkpoint_exemplars = self.exemplar_manager.merge(self.checkpoint_exemplars, other.checkpoint_exemplars) + self.checkpoint_exemplars = self.exemplar_manager.merge( + self.checkpoint_exemplars, other.checkpoint_exemplars + ) self.last_update_timestamp = get_latest_timestamp( self.last_update_timestamp, other.last_update_timestamp diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py index 9fc74cebe57..9eb82a5aed8 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py @@ -27,17 +27,27 @@ """ import abc -import random import itertools +import random from opentelemetry.context import get_current from opentelemetry.util import time_ns + class Exemplar: """ A sample data point for an aggregator. Exemplars represent individual measurements recorded. """ - def __init__(self, value, timestamp, dropped_labels=None, span_id=None, trace_id=None, sample_count=None): + + def __init__( + self, + value, + timestamp, + dropped_labels=None, + span_id=None, + trace_id=None, + sample_count=None, + ): self._value = value self._timestamp = timestamp self._span_id = span_id @@ -46,7 +56,13 @@ def __init__(self, value, timestamp, dropped_labels=None, span_id=None, trace_id self._dropped_labels = dropped_labels def __repr__(self): - return f"Exemplar(value={self._value}, timestamp={self._timestamp}, labels={dict(self._dropped_labels) if self._dropped_labels else None}, context={{'span_id':{self._span_id}, 'trace_id':{self._trace_id}}})" + return "Exemplar(value={}, timestamp={}, labels={}, context={{'span_id':{}, 'trace_id':{}}})".format( + self._value, + self._timestamp, + dict(self._dropped_labels) if self._dropped_labels else None, + self._span_id, + self._trace_id, + ) @property def value(self): @@ -67,24 +83,26 @@ def span_id(self): def trace_id(self): """The trace ID of the context when the exemplar was recorded""" return self._trace_id - + @property def dropped_labels(self): """Labels that were dropped by the aggregator but still passed by the user""" return self._dropped_labels - + @property def sample_count(self): """For statistical exemplars, how many measurements a single exemplar represents""" return self._sample_count - + def set_sample_count(self, count): self._sample_count = count + class ExemplarSampler: """ Abstract class to sample exemplars through a stream of incoming measurements """ + def __init__(self, k, statistical=False): self._k = k self._statistical = statistical @@ -95,7 +113,6 @@ def sample(self, exemplar, **kwargs): """ Given an exemplar, determine if it should be sampled or not """ - pass @property @abc.abstractmethod @@ -103,21 +120,19 @@ def sample_set(self): """ Return the list of exemplars that have been sampled """ - pass @abc.abstractmethod def merge(self, set1, set2): """ Given two lists of sampled exemplars, merge them while maintaining the invariants specified by this sampler """ - pass @abc.abstractmethod def reset(self): """ Reset the sampler """ - pass + class RandomExemplarSampler(ExemplarSampler): """ @@ -127,6 +142,7 @@ class RandomExemplarSampler(ExemplarSampler): If RandomExemplarSampler` is specified to be statistical, it will add a sample_count to every exemplar it records. This value will be equal to the number of measurements recorded per every exemplar measured - all exemplars will have the same sample_count value. """ + def __init__(self, k, statistical=False): super().__init__(k, statistical=statistical) self.rand_count = 0 @@ -138,40 +154,52 @@ def sample(self, exemplar, **kwargs): self.sample_set.append(exemplar) return - j = random.randint(0, self.rand_count-1) + replace_index = random.randint(0, self.rand_count - 1) - if j < self._k: - self.sample_set[j] = exemplar + if replace_index < self._k: + self.sample_set[replace_index] = exemplar def merge(self, set1, set2): combined = set1 + set2 if len(combined) <= self._k: return combined - else: - return random.sample(combined, self._k) + return random.sample(combined, self._k) @property def sample_set(self): if self._statistical: for exemplar in self._sample_set: - exemplar.set_sample_count(self.rand_count / len(self._sample_set)) + exemplar.set_sample_count( + self.rand_count / len(self._sample_set) + ) return self._sample_set def reset(self): self._sample_set = [] self.rand_count = 0 + class MinMaxExemplarSampler(ExemplarSampler): """ Sample the minimum and maximum measurements recorded only """ + def __init__(self, k, statistical=False): # K will always be 2 (min and max), and selecting min and max can never be statistical super().__init__(2, statistical=False) self._sample_set = [] def sample(self, exemplar, **kwargs): - self._sample_set = [min(self._sample_set + [exemplar], key=lambda exemplar: exemplar.value), max(self._sample_set + [exemplar], key=lambda exemplar: exemplar.value)] + self._sample_set = [ + min( + self._sample_set + [exemplar], + key=lambda exemplar: exemplar.value, + ), + max( + self._sample_set + [exemplar], + key=lambda exemplar: exemplar.value, + ), + ] if self._sample_set[0] == self._sample_set[1]: self._sample_set = [self._sample_set[0]] @@ -189,6 +217,7 @@ def merge(self, set1, set2): def reset(self): self._sample_set = [] + class BucketedExemplarSampler(ExemplarSampler): """ Randomly sample k exemplars for each bucket in the aggregator. @@ -196,10 +225,14 @@ class BucketedExemplarSampler(ExemplarSampler): If `BucketedExemplarSampler` is specified to be statistical, it will add a sample_count to every exemplar it records. This value will be equal to `len(bucket.exemplars) / bucket.count`, that is the number of measurements each exemplar represents. """ + def __init__(self, k, statistical=False, boundaries=None): super().__init__(k) self._boundaries = boundaries - self._sample_set = [RandomExemplarSampler(k, statistical=statistical) for _ in range(len(self._boundaries) + 1)] + self._sample_set = [ + RandomExemplarSampler(k, statistical=statistical) + for _ in range(len(self._boundaries) + 1) + ] def sample(self, exemplar, **kwargs): bucket_index = kwargs.get("bucket_index") @@ -210,10 +243,15 @@ def sample(self, exemplar, **kwargs): @property def sample_set(self): - return list(itertools.chain.from_iterable([sampler.sample_set for sampler in self._sample_set])) + return list( + itertools.chain.from_iterable( + [sampler.sample_set for sampler in self._sample_set] + ) + ) def merge(self, set1, set2): exemplar_set = [list() for _ in range(len(self._boundaries) + 1)] + # Sort both sets back into buckets for setx in [set1, set2]: bucket_idx = 0 for exemplar in setx: @@ -224,16 +262,18 @@ def merge(self, set1, set2): while exemplar.value >= self._boundaries[bucket_idx]: bucket_idx += 1 exemplar_set[bucket_idx].append(exemplar) - - for i, inner_set in enumerate(exemplar_set): + + # Pick only k exemplars for every bucket + for index, inner_set in enumerate(exemplar_set): if len(inner_set) > self._k: - exemplar_set[i] = random.sample(inner_set, self._k) + exemplar_set[index] = random.sample(inner_set, self._k) return list(itertools.chain.from_iterable(exemplar_set)) - + def reset(self): for sampler in self._sample_set: sampler.reset() + class ExemplarManager: """ Manages two different exemplar samplers: @@ -241,28 +281,58 @@ class ExemplarManager: 2. A "statistical" exemplar sampler, which samples exemplars without bias (ie no preferenced for traced exemplars) """ - def __init__(self, config, default_exemplar_sampler, statistical_exemplar_sampler=None, **kwargs): + def __init__( + self, + config, + default_exemplar_sampler, + statistical_exemplar_sampler, + **kwargs + ): if config: - self.exemplars_count = config.get('num_exemplars', 0) + self.exemplars_count = config.get("num_exemplars", 0) self.record_exemplars = self.exemplars_count > 0 - self.statistical_exemplars = config.get('statistical_exemplars', False) - if self.statistical_exemplars and statistical_exemplar_sampler: - self.exemplar_sampler = statistical_exemplar_sampler(self.exemplars_count, statistical=self.statistical_exemplars, **kwargs) + self.statistical_exemplars = config.get( + "statistical_exemplars", False + ) + if self.statistical_exemplars: + self.exemplar_sampler = statistical_exemplar_sampler( + self.exemplars_count, + statistical=self.statistical_exemplars, + **kwargs + ) else: - self.exemplar_sampler = default_exemplar_sampler(self.exemplars_count, statistical=self.statistical_exemplars, **kwargs) + self.exemplar_sampler = default_exemplar_sampler( + self.exemplars_count, + statistical=self.statistical_exemplars, + **kwargs + ) else: self.record_exemplars = False def sample(self, value, dropped_labels, **kwargs): context = get_current() - is_sampled = 'current-span' in context and context['current-span'].get_context().trace_flags.sampled if context else False + is_sampled = ( + "current-span" in context + and context["current-span"].get_context().trace_flags.sampled + if context + else False + ) # if not statistical, we want to gather traced exemplars only - so otherwise don't sample - if self.record_exemplars and (is_sampled or self.statistical_exemplars): - span_id = context['current-span'].context.span_id if context else None - trace_id = context['current-span'].context.trace_id if context else None - self.exemplar_sampler.sample(Exemplar(value, time_ns(), dropped_labels, span_id, trace_id), **kwargs) + if self.record_exemplars and ( + is_sampled or self.statistical_exemplars + ): + span_id = ( + context["current-span"].context.span_id if context else None + ) + trace_id = ( + context["current-span"].context.trace_id if context else None + ) + self.exemplar_sampler.sample( + Exemplar(value, time_ns(), dropped_labels, span_id, trace_id), + **kwargs + ) def take_checkpoint(self): if self.record_exemplars: @@ -273,5 +343,7 @@ def take_checkpoint(self): def merge(self, checkpoint_exemplars, other_checkpoint_exemplars): if self.record_exemplars: - return self.exemplar_sampler.merge(checkpoint_exemplars, other_checkpoint_exemplars) + return self.exemplar_sampler.merge( + checkpoint_exemplars, other_checkpoint_exemplars + ) return [] diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/view.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/view.py index 2f85c95573c..ec1e9df6f71 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/view.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/view.py @@ -39,13 +39,20 @@ class ViewData: - def __init__(self, labels: Tuple[Tuple[str, str]], aggregator: Aggregator, dropped_labels: Tuple[Tuple[str, str]] = None): + def __init__( + self, labels: Tuple[Tuple[str, str]], aggregator: Aggregator, + ): self.labels = labels self.aggregator = aggregator - self.dropped_labels = dropped_labels - def record(self, value: ValueT): - self.aggregator.update(value, dropped_labels=self.dropped_labels) + def record(self, value: ValueT, all_labels: Tuple[Tuple[str, str]]): + label_dict = dict(self.labels) + self.aggregator.update( + value, + dropped_labels=tuple( + filter(lambda label: label[0] not in label_dict, all_labels) + ), + ) # Uniqueness is based on labels and aggregator type def __hash__(self): diff --git a/opentelemetry-sdk/tests/metrics/export/test_exemplars.py b/opentelemetry-sdk/tests/metrics/export/test_exemplars.py index 95412d2b8e9..e80dee1a832 100644 --- a/opentelemetry-sdk/tests/metrics/export/test_exemplars.py +++ b/opentelemetry-sdk/tests/metrics/export/test_exemplars.py @@ -13,31 +13,32 @@ # limitations under the License. import unittest -from unittest.mock import patch from time import time +from unittest.mock import patch +from opentelemetry import metrics +from opentelemetry.sdk.metrics import MeterProvider, ValueRecorder from opentelemetry.sdk.metrics.export.aggregate import ( - SumAggregator, - MinMaxSumCountAggregator, HistogramAggregator, - Exemplar, - RandomExemplarSampler, MinMaxExemplarSampler, + MinMaxSumCountAggregator, + SumAggregator, + ValueObserverAggregator, +) +from opentelemetry.sdk.metrics.export.controller import PushController +from opentelemetry.sdk.metrics.export.exemplars import ( BucketedExemplarSampler, + Exemplar, ExemplarManager, - ValueObserverAggregator + RandomExemplarSampler, ) -from opentelemetry.sdk.metrics import ( - MeterProvider, - ValueRecorder, +from opentelemetry.sdk.metrics.export.in_memory_metrics_exporter import ( + InMemoryMetricsExporter, ) -from opentelemetry import trace, metrics +from opentelemetry.sdk.metrics.view import View, ViewConfig from opentelemetry.sdk.trace import TracerProvider from opentelemetry.trace.sampling import ALWAYS_OFF, ALWAYS_ON -from opentelemetry.sdk.metrics.export.in_memory_metrics_exporter import InMemoryMetricsExporter -from opentelemetry.sdk.metrics.view import View, ViewConfig -from opentelemetry.sdk.metrics.export.controller import PushController class TestRandomExemplarSampler(unittest.TestCase): def test_sample(self): @@ -57,8 +58,9 @@ def test_sample(self): self.assertEqual(exemplar1.sample_count, 1) self.assertEqual(exemplar2.sample_count, 1) - def _patched_randint(mn, mx): - return mn + def _patched_randint(minimum, maximum): + # pylint: disable=unused-argument + return minimum with patch("random.randint", _patched_randint): sampler.sample(exemplar3) @@ -66,8 +68,9 @@ def _patched_randint(mn, mx): self.assertEqual(sampler.sample_set[0], exemplar3) self.assertEqual(exemplar3.sample_count, 1.5) self.assertEqual(exemplar2.sample_count, 1.5) - - def _patched_randint(mn, mx): + + def _patched_randint(minimum, maximum): + # pylint: disable=unused-argument return 1 with patch("random.randint", _patched_randint): @@ -94,9 +97,9 @@ def test_merge(self): set1 = [1, 2, 3] set2 = [4, 5, 6] sampler = RandomExemplarSampler(6) - self.assertEqual(set1+set2, sampler.merge(set1, set2)) + self.assertEqual(set1 + set2, sampler.merge(set1, set2)) sampler = RandomExemplarSampler(8) - self.assertEqual(set1+set2, sampler.merge(set1, set2)) + self.assertEqual(set1 + set2, sampler.merge(set1, set2)) sampler = RandomExemplarSampler(4) self.assertEqual(4, len(sampler.merge(set1, set2))) @@ -145,7 +148,9 @@ def test_merge(self): class TestBucketedExemplarSampler(unittest.TestCase): def test_exemplars(self): - sampler = BucketedExemplarSampler(1, boundaries=[2, 4, 7], statistical=True) + sampler = BucketedExemplarSampler( + 1, boundaries=[2, 4, 7], statistical=True + ) sampler.sample(Exemplar(3, time()), bucket_index=1) self.assertEqual(len(sampler.sample_set), 1) self.assertEqual(sampler.sample_set[0].value, 3) @@ -156,7 +161,8 @@ def test_exemplars(self): self.assertEqual(sampler.sample_set[1].value, 5) self.assertEqual(sampler.sample_set[1].sample_count, 1) - def _patched_randint(mn, mx): + def _patched_randint(minimum, maximum): + # pylint: disable=unused-argument return 0 with patch("random.randint", _patched_randint): @@ -178,20 +184,35 @@ def _patched_randint(mn, mx): def test_merge(self): sampler = BucketedExemplarSampler(1, boundaries=[3, 4, 6]) - self.assertEqual(len(sampler.merge([Exemplar(1, time())], [Exemplar(2, time())])), 1) + self.assertEqual( + len(sampler.merge([Exemplar(1, time())], [Exemplar(2, time())])), 1 + ) - self.assertEqual(len(sampler.merge([Exemplar(1, time()), Exemplar(5, time())], [Exemplar(2, time())])), 2) + self.assertEqual( + len( + sampler.merge( + [Exemplar(1, time()), Exemplar(5, time())], + [Exemplar(2, time())], + ) + ), + 2, + ) class TestExemplarManager(unittest.TestCase): def test_statistical(self): config = {"statistical_exemplars": True, "num_exemplars": 1} - manager = ExemplarManager(config, MinMaxExemplarSampler, RandomExemplarSampler) + manager = ExemplarManager( + config, MinMaxExemplarSampler, RandomExemplarSampler + ) self.assertIsInstance(manager.exemplar_sampler, RandomExemplarSampler) manager.sample(5, {"dropped_label": "value"}) self.assertEqual(len(manager.exemplar_sampler.sample_set), 1) self.assertEqual(manager.exemplar_sampler.sample_set[0].value, 5) - self.assertEqual(manager.exemplar_sampler.sample_set[0].dropped_labels, {"dropped_label": "value"}) + self.assertEqual( + manager.exemplar_sampler.sample_set[0].dropped_labels, + {"dropped_label": "value"}, + ) checkpoint = manager.take_checkpoint() self.assertEqual(len(checkpoint), 1) @@ -204,7 +225,9 @@ def test_statistical(self): def test_semantic(self): config = {"statistical_exemplars": True, "num_exemplars": 1} - manager = ExemplarManager(config, MinMaxExemplarSampler, RandomExemplarSampler) + manager = ExemplarManager( + config, MinMaxExemplarSampler, RandomExemplarSampler + ) self.assertIsInstance(manager.exemplar_sampler, RandomExemplarSampler) manager.sample(5, {}) self.assertEqual(len(manager.exemplar_sampler.sample_set), 1) @@ -229,7 +252,9 @@ def _no_exemplars_test(self, aggregator): agg.take_checkpoint() self.assertEqual(agg.checkpoint_exemplars, []) - other_agg = aggregator(config={"num_exemplars": 2, "statistical_exemplars": True}) + other_agg = aggregator( + config={"num_exemplars": 2, "statistical_exemplars": True} + ) other_agg.update(2) other_agg.update(4) other_agg.take_checkpoint() @@ -244,7 +269,10 @@ def _simple_exemplars_test(self, aggregator): agg.take_checkpoint() self.assertEqual(len(agg.checkpoint_exemplars), 1) self.assertEqual(agg.checkpoint_exemplars[0].value, 2) - self.assertEqual(agg.checkpoint_exemplars[0].dropped_labels, {"dropped_label": "value"}) + self.assertEqual( + agg.checkpoint_exemplars[0].dropped_labels, + {"dropped_label": "value"}, + ) agg.update(2) agg.update(5) @@ -255,8 +283,10 @@ def _simple_exemplars_test(self, aggregator): agg.update(2) agg.update(5) - def _patched_randint(mn, mx): + def _patched_randint(minimum, maximum): + # pylint: disable=unused-argument return 1 + with patch("random.randint", _patched_randint): agg.update(7) @@ -282,11 +312,13 @@ def _record_traces_only_test(self, aggregator): agg.update(5) agg.update(7) agg.update(6) - + agg.take_checkpoint() self.assertEqual(len(agg.checkpoint_exemplars), 2) - self.assertEqual(agg.checkpoint_exemplars[0].span_id, span.context.span_id) + self.assertEqual( + agg.checkpoint_exemplars[0].span_id, span.context.span_id + ) self.assertEqual(agg.checkpoint_exemplars[0].value, 5) self.assertEqual(agg.checkpoint_exemplars[1].value, 7) @@ -346,7 +378,11 @@ def test_no_exemplars(self): agg.take_checkpoint() self.assertEqual(agg.checkpoint_exemplars, []) - other_agg = HistogramAggregator(config=dict(config, **{"num_exemplars": 1, "statistical_exemplars": True})) + other_agg = HistogramAggregator( + config=dict( + config, **{"num_exemplars": 1, "statistical_exemplars": True} + ) + ) other_agg.update(3) other_agg.update(5) @@ -357,13 +393,20 @@ def test_no_exemplars(self): self.assertEqual(agg.checkpoint_exemplars, []) def test_simple_exemplars(self): - config = {"bounds": [2, 4, 7], "num_exemplars": 1, "statistical_exemplars": True} + config = { + "bounds": [2, 4, 7], + "num_exemplars": 1, + "statistical_exemplars": True, + } agg = HistogramAggregator(config=config) agg.update(2, dropped_labels={"dropped_label": "value"}) agg.take_checkpoint() self.assertEqual(len(agg.checkpoint_exemplars), 1) self.assertEqual(agg.checkpoint_exemplars[0].value, 2) - self.assertEqual(agg.checkpoint_exemplars[0].dropped_labels, {"dropped_label": "value"}) + self.assertEqual( + agg.checkpoint_exemplars[0].dropped_labels, + {"dropped_label": "value"}, + ) agg.update(2) agg.update(5) @@ -373,7 +416,8 @@ def test_simple_exemplars(self): agg.update(5) - def _patched_randint(mn, mx): + def _patched_randint(minimum, maximum): + # pylint: disable=unused-argument return 0 with patch("random.randint", _patched_randint): @@ -391,7 +435,11 @@ def _patched_randint(mn, mx): self.assertEqual(len(agg.checkpoint_exemplars), 4) def test_record_traces_only(self): - config = {"bounds": [2, 4, 6], "num_exemplars": 2, "statistical_exemplars": False} + config = { + "bounds": [2, 4, 6], + "num_exemplars": 2, + "statistical_exemplars": False, + } agg = HistogramAggregator(config=config) agg.update(2) @@ -409,7 +457,9 @@ def test_record_traces_only(self): agg.take_checkpoint() self.assertEqual(len(agg.checkpoint_exemplars), 1) - self.assertEqual(agg.checkpoint_exemplars[0].span_id, span.context.span_id) + self.assertEqual( + agg.checkpoint_exemplars[0].span_id, span.context.span_id + ) tp = TracerProvider(sampler=ALWAYS_OFF) tracer = tp.get_tracer(__name__) @@ -420,6 +470,7 @@ def test_record_traces_only(self): agg.take_checkpoint() self.assertEqual(len(agg.checkpoint_exemplars), 0) + class TestFullPipelineExemplars(unittest.TestCase): def test_histogram(self): # Use the meter type provided by the SDK package @@ -438,9 +489,14 @@ def test_histogram(self): size_view = View( requests_size, - HistogramAggregator(config={"bounds": [20, 40, 60, 80, 100], "num_exemplars": 1, "statistical_exemplars": True}), + HistogramAggregator, + aggregator_config={ + "bounds": (20, 40, 60, 80, 100), + "num_exemplars": 1, + "statistical_exemplars": True, + }, label_keys=["environment"], - config=ViewConfig.LABEL_KEYS, + view_config=ViewConfig.LABEL_KEYS, ) meter.register_view(size_view) @@ -456,5 +512,14 @@ def test_histogram(self): self.assertEqual(len(metrics_list), 1) exemplars = metrics_list[0].aggregator.checkpoint_exemplars self.assertEqual(len(exemplars), 3) - self.assertEqual([(exemplar.value, exemplar.dropped_labels) for exemplar in exemplars], - [(1, (("test", "value2"),)), (25, (("test", "value"),)), (200, (("test", "value3"),))]) + self.assertEqual( + [ + (exemplar.value, exemplar.dropped_labels) + for exemplar in exemplars + ], + [ + (1, (("test", "value2"),)), + (25, (("test", "value"),)), + (200, (("test", "value3"),)), + ], + ) diff --git a/opentelemetry-sdk/tests/metrics/export/test_export.py b/opentelemetry-sdk/tests/metrics/export/test_export.py index 99aa9c4a629..c262a5dd202 100644 --- a/opentelemetry-sdk/tests/metrics/export/test_export.py +++ b/opentelemetry-sdk/tests/metrics/export/test_export.py @@ -50,7 +50,7 @@ def test_export(self): labels = {"environment": "staging"} aggregator = SumAggregator() record = MetricRecord(metric, labels, aggregator) - result = '{}(data="{}", labels="{}", value={})'.format( + result = '{}(data="{}", labels="{}", value={}, exemplars=[])'.format( ConsoleMetricsExporter.__name__, metric, labels, diff --git a/opentelemetry-sdk/tests/metrics/test_metrics.py b/opentelemetry-sdk/tests/metrics/test_metrics.py index b854f2d5db9..d620d5eb6f5 100644 --- a/opentelemetry-sdk/tests/metrics/test_metrics.py +++ b/opentelemetry-sdk/tests/metrics/test_metrics.py @@ -496,7 +496,7 @@ def test_add(self): view_datas_mock = mock.Mock() bound_metric.view_datas = [view_datas_mock] bound_metric.add(3) - view_datas_mock.record.assert_called_once_with(3) + view_datas_mock.record.assert_called_once_with(3, ()) def test_add_disabled(self): meter_mock = mock.Mock() @@ -538,7 +538,7 @@ def test_record(self): view_datas_mock = mock.Mock() bound_valuerecorder.view_datas = [view_datas_mock] bound_valuerecorder.record(3) - view_datas_mock.record.assert_called_once_with(3) + view_datas_mock.record.assert_called_once_with(3, ()) def test_record_disabled(self): meter_mock = mock.Mock() From f3ed3f36936b6b70704b1c068a530bb95520c741 Mon Sep 17 00:00:00 2001 From: Connor Adams Date: Thu, 6 Aug 2020 15:31:36 -0400 Subject: [PATCH 3/6] semantic -> trace, link to wiki --- docs/examples/exemplars/README.rst | 6 +++--- .../{semantic_exemplars.py => trace_exemplars.py} | 2 +- .../opentelemetry/sdk/metrics/export/exemplars.py | 6 ++++-- .../tests/metrics/export/test_exemplars.py | 15 +++------------ 4 files changed, 11 insertions(+), 18 deletions(-) rename docs/examples/exemplars/{semantic_exemplars.py => trace_exemplars.py} (95%) diff --git a/docs/examples/exemplars/README.rst b/docs/examples/exemplars/README.rst index b49a02b8de6..a647071e365 100644 --- a/docs/examples/exemplars/README.rst +++ b/docs/examples/exemplars/README.rst @@ -28,13 +28,13 @@ The opentelemetry SDK provides a way to sample exemplars statistically: See 'statistical_exemplars.ipynb' for the example (TODO: how do I link this?) -Semantic exemplars +Trace exemplars ^^^^^^^^^^^^^^^^^^ -Semantic exemplars are exemplars that have not been sampled statistically, +Trace exemplars are exemplars that have not been sampled statistically, but instead aim to provide value as individual exemplars. They will have a trace id/span id attached for the active trace when the exemplar was recorded, and they may focus on measurements with abnormally high/low values. -'semantic_exemplars.py' shows how to generate exemplars for a histogram aggregation. +'trace_exemplars.py' shows how to generate exemplars for a histogram aggregation. Currently only the Google Cloud Monitoring exporter supports uploading these exemplars. diff --git a/docs/examples/exemplars/semantic_exemplars.py b/docs/examples/exemplars/trace_exemplars.py similarity index 95% rename from docs/examples/exemplars/semantic_exemplars.py rename to docs/examples/exemplars/trace_exemplars.py index bf80ede1f6b..735e329d9ac 100644 --- a/docs/examples/exemplars/semantic_exemplars.py +++ b/docs/examples/exemplars/trace_exemplars.py @@ -13,7 +13,7 @@ # limitations under the License. # """ -This example shows how to generate "semantic" exemplars for a histogram, and how to export them to Google Cloud Monitoring. +This example shows how to generate trace exemplars for a histogram, and how to export them to Google Cloud Monitoring. """ import random diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py index 9eb82a5aed8..5f4b8677125 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py @@ -16,7 +16,7 @@ Exemplars are sample data points for aggregators. For more information, see the `spec `_ Every synchronous aggregator is instrumented with two exemplar recorders: - 1. A "semantic" exemplar sampler, which only samples exemplars if they have a sampled trace context (and can pick exemplars with other biases, ie min + max). + 1. A "trace" exemplar sampler, which only samples exemplars if they have a sampled trace context (and can pick exemplars with other biases, ie min + max). 2. A "statistical" exemplar sampler, which samples exemplars without bias (ie no preferenced for traced exemplars) To use an exemplar recorder, pass in two arguments to the aggregator config in views (see the "Exemplars" example for an example): @@ -154,6 +154,8 @@ def sample(self, exemplar, **kwargs): self.sample_set.append(exemplar) return + # We sample a random subset of a stream using "Algorithm R": + # https://en.wikipedia.org/wiki/Reservoir_sampling#Simple_algorithm replace_index = random.randint(0, self.rand_count - 1) if replace_index < self._k: @@ -277,7 +279,7 @@ def reset(self): class ExemplarManager: """ Manages two different exemplar samplers: - 1. A "semantic" exemplar sampler, which only samples exemplars if they have a sampled trace context. + 1. A "trace" exemplar sampler, which only samples exemplars if they have a sampled trace context. 2. A "statistical" exemplar sampler, which samples exemplars without bias (ie no preferenced for traced exemplars) """ diff --git a/opentelemetry-sdk/tests/metrics/export/test_exemplars.py b/opentelemetry-sdk/tests/metrics/export/test_exemplars.py index e80dee1a832..77e34b69247 100644 --- a/opentelemetry-sdk/tests/metrics/export/test_exemplars.py +++ b/opentelemetry-sdk/tests/metrics/export/test_exemplars.py @@ -223,21 +223,12 @@ def test_statistical(self): merged = manager.merge([Exemplar(2, time())], [Exemplar(3, time())]) self.assertEqual(len(merged), 1) - def test_semantic(self): - config = {"statistical_exemplars": True, "num_exemplars": 1} + def test_trace(self): + config = {"statistical_exemplars": False, "num_exemplars": 1} manager = ExemplarManager( config, MinMaxExemplarSampler, RandomExemplarSampler ) - self.assertIsInstance(manager.exemplar_sampler, RandomExemplarSampler) - manager.sample(5, {}) - self.assertEqual(len(manager.exemplar_sampler.sample_set), 1) - self.assertEqual(manager.exemplar_sampler.sample_set[0].value, 5) - - checkpoint = manager.take_checkpoint() - self.assertEqual(len(checkpoint), 1) - self.assertEqual(checkpoint[0].value, 5) - - self.assertEqual(len(manager.exemplar_sampler.sample_set), 0) + self.assertIsInstance(manager.exemplar_sampler, MinMaxExemplarSampler) merged = manager.merge([Exemplar(2, time())], [Exemplar(3, time())]) self.assertEqual(len(merged), 1) From a4047059311d9f5e8eae5efa4db41c0f007ba6f2 Mon Sep 17 00:00:00 2001 From: Connor Adams Date: Thu, 6 Aug 2020 18:59:40 -0400 Subject: [PATCH 4/6] fixes --- docs/examples/exemplars/README.rst | 4 +- .../exemplars/statistical_exemplars.ipynb | 4 +- .../exemplars/statistical_exemplars.py | 18 +-- .../sdk/metrics/export/exemplars.py | 121 +++++++++--------- .../tests/metrics/export/test_exemplars.py | 24 ++-- 5 files changed, 86 insertions(+), 85 deletions(-) diff --git a/docs/examples/exemplars/README.rst b/docs/examples/exemplars/README.rst index a647071e365..deed297efdb 100644 --- a/docs/examples/exemplars/README.rst +++ b/docs/examples/exemplars/README.rst @@ -1,6 +1,8 @@ OpenTelemetry Exemplars Example =============================== +.. _Exemplars: + Exemplars are example measurements for aggregations. While they are simple conceptually, exemplars can estimate any statistic about the input distribution, can provide links to sample traces for high latency requests, and much more. For more information about exemplars and how they work in OpenTelemetry, see the `spec `_ @@ -24,7 +26,7 @@ Statistical exemplars The opentelemetry SDK provides a way to sample exemplars statistically: - Exemplars will be picked to represent the input distribution, without unquantifiable bias - - A "sample_count" attribute will be set on each exemplar to quantify how many measurements each exemplar represents + - A "sample_count" attribute will be set on each exemplar to quantify how many measurements each exemplar represents (for randomly sampled exemplars, this value will be N (total measurements) / num_samples. For histogram exemplars, this value will be specific to each bucket). See 'statistical_exemplars.ipynb' for the example (TODO: how do I link this?) diff --git a/docs/examples/exemplars/statistical_exemplars.ipynb b/docs/examples/exemplars/statistical_exemplars.ipynb index 5f3659e41e8..ca7edd1c3db 100644 --- a/docs/examples/exemplars/statistical_exemplars.ipynb +++ b/docs/examples/exemplars/statistical_exemplars.ipynb @@ -122,7 +122,7 @@ " random.seed(1)\n", "\n", " # customer 123 is a big user, and made 1000 requests in this timeframe\n", - " requests = np.random.normal(1000, 250, 1000) # 1000 requests with average 1000 bytes, covariance 100\n", + " requests = np.random.normal(1000, 250, 1000) # 1000 requests with average 1000 bytes, standard deviation 250\n", "\n", " for request in requests:\n", " bytes_counter.add(int(request), {\"environment\": \"production\", \"method\": \"REST\", \"customer_id\": 123})\n", @@ -205,7 +205,7 @@ " customer_bytes_map[exemplar.dropped_labels] += exemplar.value\n", "\n", "\n", - "customer_bytes_list = sorted(list(customer_bytes_map.items()), key=lambda t: t[1], reverse=True)\n", + "customer_bytes_list = sorted(customer_bytes_map.items(), key=lambda t: t[1], reverse=True)\n", "\n", "# Save our top 5 customers and sum all of the rest into \"Others\".\n", "top_3_customers = [(\"Customer {}\".format(dict(val[0])[\"customer_id\"]), val[1]) for val in customer_bytes_list[:3]] + [(\"Other Customers\", sum([val[1] for val in customer_bytes_list[3:]]))]\n", diff --git a/docs/examples/exemplars/statistical_exemplars.py b/docs/examples/exemplars/statistical_exemplars.py index 25fd29e82ba..b7a3ffbd5cd 100644 --- a/docs/examples/exemplars/statistical_exemplars.py +++ b/docs/examples/exemplars/statistical_exemplars.py @@ -61,8 +61,8 @@ def unknown_customer_calls(): # customer 123 is a big user, and made 1000 requests in this timeframe requests = np.random.normal( - 1000, 250, 1000 - ) # 1000 requests with average 1000 bytes, covariance 100 + 1000, 100, 1000 + ) # 1000 requests with average 1000 bytes, standard deviation 100 for request in requests: bytes_counter.add( @@ -123,7 +123,7 @@ def unknown_customer_calls(): customer_bytes_list = sorted( - list(customer_bytes_map.items()), key=lambda t: t[1], reverse=True + customer_bytes_map.items(), key=lambda t: t[1], reverse=True ) # Save our top 5 customers and sum all of the rest into "Others". @@ -146,7 +146,6 @@ def unknown_customer_calls(): # Since the exemplars were randomly sampled, all sample_counts will be the same sample_count = exemplars[0].sample_count -print("sample count", sample_count, "custmer", customer_123_bytes) full_customer_123_bytes = sample_count * customer_123_bytes # With seed == 1 we get 1008612 - quite close to the statistical mean of 1000000! (more exemplars would make this estimation even more accurate) @@ -160,13 +159,10 @@ def unknown_customer_calls(): top_25_customers = customer_bytes_list[:25] # out of those 25 customers, determine how many used grpc, and come up with a ratio -percent_grpc = len( - list( - filter( - lambda customer_value: customer_value[0][1][1] == "gRPC", - top_25_customers, - ) - ) +percent_grpc = sum( + 1 + for customer_value in top_25_customers + if customer_value[0][1][1] == "gRPC" ) / len(top_25_customers) print( diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py index 5f4b8677125..3de83fdaefe 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py @@ -19,7 +19,7 @@ 1. A "trace" exemplar sampler, which only samples exemplars if they have a sampled trace context (and can pick exemplars with other biases, ie min + max). 2. A "statistical" exemplar sampler, which samples exemplars without bias (ie no preferenced for traced exemplars) - To use an exemplar recorder, pass in two arguments to the aggregator config in views (see the "Exemplars" example for an example): + To use an exemplar recorder, pass in two arguments to the aggregator config in views (see the :ref:`Exemplars` example for an example): "num_exemplars": The number of exemplars to record (if applicable, in each bucket). Note that in non-statistical mode the recorder may not use "num_exemplars" "statistical_exemplars": If exemplars should be recorded statistically @@ -29,6 +29,7 @@ import abc import itertools import random +from typing import List, Optional, Tuple, Union from opentelemetry.context import get_current from opentelemetry.util import time_ns @@ -41,12 +42,12 @@ class Exemplar: def __init__( self, - value, - timestamp, - dropped_labels=None, - span_id=None, - trace_id=None, - sample_count=None, + value: Union[int, float], + timestamp: int, + dropped_labels: Optional[Tuple[Tuple[str, str]]] = None, + span_id: Optional[bytes] = None, + trace_id: Optional[bytes] = None, + sample_count: Optional[float] = None, ): self._value = value self._timestamp = timestamp @@ -94,22 +95,22 @@ def sample_count(self): """For statistical exemplars, how many measurements a single exemplar represents""" return self._sample_count - def set_sample_count(self, count): + def set_sample_count(self, count: float): self._sample_count = count -class ExemplarSampler: +class ExemplarSampler(abc.ABC): """ - Abstract class to sample exemplars through a stream of incoming measurements + Abstract class to sample `k` exemplars in some way through a stream of incoming measurements """ - def __init__(self, k, statistical=False): + def __init__(self, k: int, statistical: bool = False): self._k = k self._statistical = statistical - self._sample_set = list() + self._sample_set = [] @abc.abstractmethod - def sample(self, exemplar, **kwargs): + def sample(self, exemplar: Exemplar, **kwargs): """ Given an exemplar, determine if it should be sampled or not """ @@ -122,7 +123,7 @@ def sample_set(self): """ @abc.abstractmethod - def merge(self, set1, set2): + def merge(self, set1: List[Exemplar], set2: List[Exemplar]): """ Given two lists of sampled exemplars, merge them while maintaining the invariants specified by this sampler """ @@ -139,19 +140,19 @@ class RandomExemplarSampler(ExemplarSampler): Randomly sample a set of k exemplars from a stream. Each measurement in the stream will have an equal chance of being sampled. - If RandomExemplarSampler` is specified to be statistical, it will add a sample_count to every exemplar it records. + If `RandomExemplarSampler` is specified to be statistical, it will add a sample_count to every exemplar it records. This value will be equal to the number of measurements recorded per every exemplar measured - all exemplars will have the same sample_count value. """ - def __init__(self, k, statistical=False): + def __init__(self, k: int, statistical: bool = False): super().__init__(k, statistical=statistical) self.rand_count = 0 - def sample(self, exemplar, **kwargs): + def sample(self, exemplar: Exemplar, **kwargs): self.rand_count += 1 - if len(self.sample_set) < self._k: - self.sample_set.append(exemplar) + if len(self._sample_set) < self._k: + self._sample_set.append(exemplar) return # We sample a random subset of a stream using "Algorithm R": @@ -159,13 +160,15 @@ def sample(self, exemplar, **kwargs): replace_index = random.randint(0, self.rand_count - 1) if replace_index < self._k: - self.sample_set[replace_index] = exemplar + self._sample_set[replace_index] = exemplar - def merge(self, set1, set2): - combined = set1 + set2 - if len(combined) <= self._k: - return combined - return random.sample(combined, self._k) + def merge(self, set1: List[Exemplar], set2: List[Exemplar]): + """ + Assume that set2 is the latest set of exemplars. + For simplicity, we will just keep set2 and assume set1 has already been exported. + This may need to change with a different SDK implementation. + """ + return set2 @property def sample_set(self): @@ -186,12 +189,12 @@ class MinMaxExemplarSampler(ExemplarSampler): Sample the minimum and maximum measurements recorded only """ - def __init__(self, k, statistical=False): + def __init__(self, k: int, statistical: bool = False): # K will always be 2 (min and max), and selecting min and max can never be statistical super().__init__(2, statistical=False) self._sample_set = [] - def sample(self, exemplar, **kwargs): + def sample(self, exemplar: Exemplar, **kwargs): self._sample_set = [ min( self._sample_set + [exemplar], @@ -209,12 +212,13 @@ def sample(self, exemplar, **kwargs): def sample_set(self): return self._sample_set - def merge(self, set1, set2): - merged_set = set1 + set2 - if len(merged_set) <= 2: - return sorted(merged_set, key=lambda exemplar: exemplar.value) - - return [min(merged_set), max(merged_set)] + def merge(self, set1: List[Exemplar], set2: List[Exemplar]): + """ + Assume that set2 is the latest set of exemplars. + For simplicity, we will just keep set2 and assume set1 has already been exported. + This may need to change with a different SDK implementation. + """ + return set2 def reset(self): self._sample_set = [] @@ -228,7 +232,9 @@ class BucketedExemplarSampler(ExemplarSampler): This value will be equal to `len(bucket.exemplars) / bucket.count`, that is the number of measurements each exemplar represents. """ - def __init__(self, k, statistical=False, boundaries=None): + def __init__( + self, k: int, statistical: bool = False, boundaries: list = None + ): super().__init__(k) self._boundaries = boundaries self._sample_set = [ @@ -236,7 +242,7 @@ def __init__(self, k, statistical=False, boundaries=None): for _ in range(len(self._boundaries) + 1) ] - def sample(self, exemplar, **kwargs): + def sample(self, exemplar: Exemplar, **kwargs): bucket_index = kwargs.get("bucket_index") if bucket_index is None: return @@ -251,25 +257,13 @@ def sample_set(self): ) ) - def merge(self, set1, set2): - exemplar_set = [list() for _ in range(len(self._boundaries) + 1)] - # Sort both sets back into buckets - for setx in [set1, set2]: - bucket_idx = 0 - for exemplar in setx: - if exemplar.value >= self._boundaries[-1]: - exemplar_set[-1].append(exemplar) - continue - - while exemplar.value >= self._boundaries[bucket_idx]: - bucket_idx += 1 - exemplar_set[bucket_idx].append(exemplar) - - # Pick only k exemplars for every bucket - for index, inner_set in enumerate(exemplar_set): - if len(inner_set) > self._k: - exemplar_set[index] = random.sample(inner_set, self._k) - return list(itertools.chain.from_iterable(exemplar_set)) + def merge(self, set1: List[Exemplar], set2: List[Exemplar]): + """ + Assume that set2 is the latest set of exemplars. + For simplicity, we will just keep set2 and assume set1 has already been exported. + This may need to change with a different SDK implementation. + """ + return set2 def reset(self): for sampler in self._sample_set: @@ -285,9 +279,9 @@ class ExemplarManager: def __init__( self, - config, - default_exemplar_sampler, - statistical_exemplar_sampler, + config: dict, + default_exemplar_sampler: ExemplarSampler, + statistical_exemplar_sampler: ExemplarSampler, **kwargs ): if config: @@ -311,7 +305,12 @@ def __init__( else: self.record_exemplars = False - def sample(self, value, dropped_labels, **kwargs): + def sample( + self, + value: Union[int, float], + dropped_labels: Tuple[Tuple[str, str]], + **kwargs + ): context = get_current() is_sampled = ( @@ -343,7 +342,11 @@ def take_checkpoint(self): return ret return [] - def merge(self, checkpoint_exemplars, other_checkpoint_exemplars): + def merge( + self, + checkpoint_exemplars: List[Exemplar], + other_checkpoint_exemplars: List[Exemplar], + ): if self.record_exemplars: return self.exemplar_sampler.merge( checkpoint_exemplars, other_checkpoint_exemplars diff --git a/opentelemetry-sdk/tests/metrics/export/test_exemplars.py b/opentelemetry-sdk/tests/metrics/export/test_exemplars.py index 77e34b69247..4e78f4d23e9 100644 --- a/opentelemetry-sdk/tests/metrics/export/test_exemplars.py +++ b/opentelemetry-sdk/tests/metrics/export/test_exemplars.py @@ -97,11 +97,11 @@ def test_merge(self): set1 = [1, 2, 3] set2 = [4, 5, 6] sampler = RandomExemplarSampler(6) - self.assertEqual(set1 + set2, sampler.merge(set1, set2)) + self.assertEqual(set2, sampler.merge(set1, set2)) sampler = RandomExemplarSampler(8) - self.assertEqual(set1 + set2, sampler.merge(set1, set2)) + self.assertEqual(set2, sampler.merge(set1, set2)) sampler = RandomExemplarSampler(4) - self.assertEqual(4, len(sampler.merge(set1, set2))) + self.assertEqual(3, len(sampler.merge(set1, set2))) class TestMinMaxExemplarSampler(unittest.TestCase): @@ -140,10 +140,10 @@ def test_reset(self): self.assertEqual(len(sampler.sample_set), 1) def test_merge(self): - set1 = [1, 2, 3] - set2 = [4, 5, 6] + set1 = [1, 3] + set2 = [4, 6] sampler = MinMaxExemplarSampler(2) - self.assertEqual([1, 6], sampler.merge(set1, set2)) + self.assertEqual([4, 6], sampler.merge(set1, set2)) class TestBucketedExemplarSampler(unittest.TestCase): @@ -195,7 +195,7 @@ def test_merge(self): [Exemplar(2, time())], ) ), - 2, + 1, ) @@ -339,7 +339,7 @@ def _merge_aggregators_test(self, aggregator): agg1.merge(agg2) - self.assertEqual(len(agg1.checkpoint_exemplars), 2) + self.assertEqual(len(agg1.checkpoint_exemplars), 1) def test_sum_aggregator(self): self._no_exemplars_test(SumAggregator) @@ -495,8 +495,8 @@ def test_histogram(self): # Since this is using the HistogramAggregator, the bucket counts will be reflected # with each record requests_size.record(25, {"environment": "staging", "test": "value"}) - requests_size.record(1, {"environment": "staging", "test": "value2"}) - requests_size.record(200, {"environment": "staging", "test": "value3"}) + requests_size.record(1, {"environment": "staging", "test": "value"}) + requests_size.record(200, {"environment": "staging", "test": "value"}) controller.tick() metrics_list = exporter.get_exported_metrics() @@ -509,8 +509,8 @@ def test_histogram(self): for exemplar in exemplars ], [ - (1, (("test", "value2"),)), + (1, (("test", "value"),)), (25, (("test", "value"),)), - (200, (("test", "value3"),)), + (200, (("test", "value"),)), ], ) From b158e59b9ecaa3da0266eccf2b6c6ae3b6b6bcf3 Mon Sep 17 00:00:00 2001 From: Connor Adams Date: Fri, 7 Aug 2020 12:12:37 -0400 Subject: [PATCH 5/6] readme --- docs/examples/exemplars/README.rst | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/examples/exemplars/README.rst b/docs/examples/exemplars/README.rst index deed297efdb..7151ae36a98 100644 --- a/docs/examples/exemplars/README.rst +++ b/docs/examples/exemplars/README.rst @@ -28,7 +28,11 @@ The opentelemetry SDK provides a way to sample exemplars statistically: - Exemplars will be picked to represent the input distribution, without unquantifiable bias - A "sample_count" attribute will be set on each exemplar to quantify how many measurements each exemplar represents (for randomly sampled exemplars, this value will be N (total measurements) / num_samples. For histogram exemplars, this value will be specific to each bucket). -See 'statistical_exemplars.ipynb' for the example (TODO: how do I link this?) +.. literalinclude:: statistical_exemplars.py + :language: python + :lines: 1- + +For the output of this example, see the corresponding Jupyter notebook. Trace exemplars ^^^^^^^^^^^^^^^^^^ @@ -38,5 +42,7 @@ but instead aim to provide value as individual exemplars. They will have a trace id/span id attached for the active trace when the exemplar was recorded, and they may focus on measurements with abnormally high/low values. -'trace_exemplars.py' shows how to generate exemplars for a histogram aggregation. +.. literalinclude:: trace_exemplars.py + :language: python + :lines: 1- Currently only the Google Cloud Monitoring exporter supports uploading these exemplars. From bb2e3023362f5d5c085d8ebfe8f7cf84ac4fe31e Mon Sep 17 00:00:00 2001 From: Connor Adams Date: Fri, 7 Aug 2020 12:47:45 -0400 Subject: [PATCH 6/6] nits --- docs/examples/exemplars/README.rst | 1 + .../sdk/metrics/export/aggregate.py | 2 +- .../sdk/metrics/export/exemplars.py | 65 ++++++------------- 3 files changed, 23 insertions(+), 45 deletions(-) diff --git a/docs/examples/exemplars/README.rst b/docs/examples/exemplars/README.rst index 7151ae36a98..89af3407b6f 100644 --- a/docs/examples/exemplars/README.rst +++ b/docs/examples/exemplars/README.rst @@ -45,4 +45,5 @@ and they may focus on measurements with abnormally high/low values. .. literalinclude:: trace_exemplars.py :language: python :lines: 1- + Currently only the Google Cloud Monitoring exporter supports uploading these exemplars. diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py index 998ee23b358..48908857b3e 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py @@ -42,7 +42,7 @@ def __init__(self, config=None): self.config = config else: self.config = {} - self.checkpoint_exemplars = list() + self.checkpoint_exemplars = [] @abc.abstractmethod def update(self, value, dropped_labels=None): diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py index 3de83fdaefe..98bc6c44660 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/exemplars.py @@ -12,24 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" - Exemplars are sample data points for aggregators. For more information, see the `spec `_ +"""Exemplars are sample data points for aggregators. For more information, see the `spec `_ - Every synchronous aggregator is instrumented with two exemplar recorders: - 1. A "trace" exemplar sampler, which only samples exemplars if they have a sampled trace context (and can pick exemplars with other biases, ie min + max). - 2. A "statistical" exemplar sampler, which samples exemplars without bias (ie no preferenced for traced exemplars) +Every synchronous aggregator is instrumented with two exemplar recorders: + 1. A "trace" exemplar sampler, which only samples exemplars if they have a sampled trace context (and can pick exemplars with other biases, ie min + max). + 2. A "statistical" exemplar sampler, which samples exemplars without bias (ie no preferenced for traced exemplars) - To use an exemplar recorder, pass in two arguments to the aggregator config in views (see the :ref:`Exemplars` example for an example): - "num_exemplars": The number of exemplars to record (if applicable, in each bucket). Note that in non-statistical mode the recorder may not use "num_exemplars" - "statistical_exemplars": If exemplars should be recorded statistically +To use an exemplar recorder, pass in two arguments to the aggregator config in views (see the :ref:`Exemplars` example for an example): + "num_exemplars": The number of exemplars to record (if applicable, in each bucket). Note that in non-statistical mode the recorder may not use "num_exemplars" + "statistical_exemplars": If exemplars should be recorded statistically - For exemplars to be recorded, `num_exemplars` must be greater than 0. +For exemplars to be recorded, `num_exemplars` must be greater than 0. """ import abc import itertools import random -from typing import List, Optional, Tuple, Union +from typing import List, Optional, Tuple, Type, Union from opentelemetry.context import get_current from opentelemetry.util import time_ns @@ -95,7 +94,8 @@ def sample_count(self): """For statistical exemplars, how many measurements a single exemplar represents""" return self._sample_count - def set_sample_count(self, count: float): + @sample_count.setter + def sample_count(self, count: float): self._sample_count = count @@ -122,11 +122,14 @@ def sample_set(self): Return the list of exemplars that have been sampled """ - @abc.abstractmethod def merge(self, set1: List[Exemplar], set2: List[Exemplar]): """ - Given two lists of sampled exemplars, merge them while maintaining the invariants specified by this sampler + Assume that set2 is the latest set of exemplars. + For simplicity, we will just keep set2 and assume set1 has already been exported. + This may need to change with a different SDK implementation. """ + # pylint: disable=unused-argument,no-self-use + return set2 @abc.abstractmethod def reset(self): @@ -162,21 +165,11 @@ def sample(self, exemplar: Exemplar, **kwargs): if replace_index < self._k: self._sample_set[replace_index] = exemplar - def merge(self, set1: List[Exemplar], set2: List[Exemplar]): - """ - Assume that set2 is the latest set of exemplars. - For simplicity, we will just keep set2 and assume set1 has already been exported. - This may need to change with a different SDK implementation. - """ - return set2 - @property def sample_set(self): if self._statistical: for exemplar in self._sample_set: - exemplar.set_sample_count( - self.rand_count / len(self._sample_set) - ) + exemplar.sample_count = self.rand_count / len(self._sample_set) return self._sample_set def reset(self): @@ -212,14 +205,6 @@ def sample(self, exemplar: Exemplar, **kwargs): def sample_set(self): return self._sample_set - def merge(self, set1: List[Exemplar], set2: List[Exemplar]): - """ - Assume that set2 is the latest set of exemplars. - For simplicity, we will just keep set2 and assume set1 has already been exported. - This may need to change with a different SDK implementation. - """ - return set2 - def reset(self): self._sample_set = [] @@ -233,7 +218,7 @@ class BucketedExemplarSampler(ExemplarSampler): """ def __init__( - self, k: int, statistical: bool = False, boundaries: list = None + self, k: int, statistical: bool = False, boundaries: List[float] = None ): super().__init__(k) self._boundaries = boundaries @@ -253,18 +238,10 @@ def sample(self, exemplar: Exemplar, **kwargs): def sample_set(self): return list( itertools.chain.from_iterable( - [sampler.sample_set for sampler in self._sample_set] + sampler.sample_set for sampler in self._sample_set ) ) - def merge(self, set1: List[Exemplar], set2: List[Exemplar]): - """ - Assume that set2 is the latest set of exemplars. - For simplicity, we will just keep set2 and assume set1 has already been exported. - This may need to change with a different SDK implementation. - """ - return set2 - def reset(self): for sampler in self._sample_set: sampler.reset() @@ -280,8 +257,8 @@ class ExemplarManager: def __init__( self, config: dict, - default_exemplar_sampler: ExemplarSampler, - statistical_exemplar_sampler: ExemplarSampler, + default_exemplar_sampler: Type[ExemplarSampler], + statistical_exemplar_sampler: Type[ExemplarSampler], **kwargs ): if config: