diff --git a/source/server/http/BUILD b/source/server/http/BUILD index dbbb828c8930..7d46ea39d930 100644 --- a/source/server/http/BUILD +++ b/source/server/http/BUILD @@ -90,6 +90,7 @@ envoy_cc_library( srcs = ["stats_handler.cc"], hdrs = ["stats_handler.h"], deps = [ + ":prometheus_stats_lib", ":utils_lib", "//include/envoy/http:codes_interface", "//include/envoy/server:admin_interface", @@ -102,6 +103,17 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "prometheus_stats_lib", + srcs = ["prometheus_stats.cc"], + hdrs = ["prometheus_stats.h"], + deps = [ + ":utils_lib", + "//source/common/buffer:buffer_lib", + "//source/common/stats:histogram_lib", + ], +) + envoy_cc_library( name = "utils_lib", srcs = ["utils.cc"], diff --git a/source/server/http/prometheus_stats.cc b/source/server/http/prometheus_stats.cc new file mode 100644 index 000000000000..e04edeccf9cf --- /dev/null +++ b/source/server/http/prometheus_stats.cc @@ -0,0 +1,218 @@ +#include "server/http/prometheus_stats.h" + +#include "common/common/empty_string.h" +#include "common/stats/histogram_impl.h" + +#include "absl/strings/str_cat.h" + +namespace Envoy { +namespace Server { + +namespace { + +const std::regex& promRegex() { CONSTRUCT_ON_FIRST_USE(std::regex, "[^a-zA-Z0-9_]"); } + +/** + * Take a string and sanitize it according to Prometheus conventions. + */ +std::string sanitizeName(const std::string& name) { + // The name must match the regex [a-zA-Z_][a-zA-Z0-9_]* as required by + // prometheus. Refer to https://prometheus.io/docs/concepts/data_model/. + std::string stats_name = std::regex_replace(name, promRegex(), "_"); + if (stats_name[0] >= '0' && stats_name[0] <= '9') { + return absl::StrCat("_", stats_name); + } else { + return stats_name; + } +} + +/* + * Determine whether a metric has never been emitted and choose to + * not show it if we only wanted used metrics. + */ +template +static bool shouldShowMetric(const StatType& metric, const bool used_only, + const absl::optional& regex) { + return ((!used_only || metric.used()) && + (!regex.has_value() || std::regex_search(metric.name(), regex.value()))); +} + +/* + * Comparator for Stats::Metric that does not require a string representation + * to make the comparison, for memory efficiency. + */ +struct MetricLessThan { + bool operator()(const Stats::Metric* a, const Stats::Metric* b) const { + ASSERT(&a->constSymbolTable() == &b->constSymbolTable()); + return a->constSymbolTable().lessThan(a->statName(), b->statName()); + } +}; + +/** + * Processes a stat type (counter, gauge, histogram) by generating all output lines, sorting + * them by tag-extracted metric name, and then outputting them in the correct sorted order into + * response. + * + * @param response The buffer to put the output into. + * @param used_only Whether to only output stats that are used. + * @param regex A filter on which stats to output. + * @param metrics The metrics to output stats for. This must contain all stats of the given type + * to be included in the same output. + * @param generate_output A function which returns the output text for this metric. + * @param type The name of the prometheus metric type for used in TYPE annotations. + */ +template +uint64_t outputStatType( + Buffer::Instance& response, const bool used_only, const absl::optional& regex, + const std::vector>& metrics, + const std::function& generate_output, + absl::string_view type) { + + /* + * From + * https:*github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#grouping-and-sorting: + * + * All lines for a given metric must be provided as one single group, with the optional HELP and + * TYPE lines first (in no particular order). Beyond that, reproducible sorting in repeated + * expositions is preferred but not required, i.e. do not sort if the computational cost is + * prohibitive. + */ + + // This is an unsorted collection of dumb-pointers (no need to increment then decrement every + // refcount; ownership is held throughout by `metrics`). It is unsorted for efficiency, but will + // be sorted before producing the final output to satisfy the "preferred" ordering from the + // prometheus spec: metrics will be sorted by their tags' textual representation, which will be + // consistent across calls. + using StatTypeUnsortedCollection = std::vector; + + // Return early to avoid crashing when getting the symbol table from the first metric. + if (metrics.empty()) { + return 0; + } + + // There should only be one symbol table for all of the stats in the admin + // interface. If this assumption changes, the name comparisons in this function + // will have to change to compare to convert all StatNames to strings before + // comparison. + const Stats::SymbolTable& global_symbol_table = metrics.front()->constSymbolTable(); + + // Sorted collection of metrics sorted by their tagExtractedName, to satisfy the requirements + // of the exposition format. + std::map groups( + global_symbol_table); + + for (const auto& metric : metrics) { + ASSERT(&global_symbol_table == &metric->constSymbolTable()); + + if (!shouldShowMetric(*metric, used_only, regex)) { + continue; + } + + groups[metric->tagExtractedStatName()].push_back(metric.get()); + } + + for (auto& group : groups) { + const std::string prefixed_tag_extracted_name = + PrometheusStatsFormatter::metricName(global_symbol_table.toString(group.first)); + response.add(fmt::format("# TYPE {0} {1}\n", prefixed_tag_extracted_name, type)); + + // Sort before producing the final output to satisfy the "preferred" ordering from the + // prometheus spec: metrics will be sorted by their tags' textual representation, which will + // be consistent across calls. + std::sort(group.second.begin(), group.second.end(), MetricLessThan()); + + for (const auto& metric : group.second) { + response.add(generate_output(*metric, prefixed_tag_extracted_name)); + } + response.add("\n"); + } + return groups.size(); +} + +/* + * Return the prometheus output for a numeric Stat (Counter or Gauge). + */ +template +std::string generateNumericOutput(const StatType& metric, + const std::string& prefixed_tag_extracted_name) { + const std::string tags = PrometheusStatsFormatter::formattedTags(metric.tags()); + return fmt::format("{0}{{{1}}} {2}\n", prefixed_tag_extracted_name, tags, metric.value()); +} + +/* + * Returns the prometheus output for a histogram. The output is a multi-line string (with embedded + * newlines) that contains all the individual bucket counts and sum/count for a single histogram + * (metric_name plus all tags). + */ +std::string generateHistogramOutput(const Stats::ParentHistogram& histogram, + const std::string& prefixed_tag_extracted_name) { + const std::string tags = PrometheusStatsFormatter::formattedTags(histogram.tags()); + const std::string hist_tags = histogram.tags().empty() ? EMPTY_STRING : (tags + ","); + + const Stats::HistogramStatistics& stats = histogram.cumulativeStatistics(); + const std::vector& supported_buckets = stats.supportedBuckets(); + const std::vector& computed_buckets = stats.computedBuckets(); + std::string output; + for (size_t i = 0; i < supported_buckets.size(); ++i) { + double bucket = supported_buckets[i]; + uint64_t value = computed_buckets[i]; + // We want to print the bucket in a fixed point (non-scientific) format. The fmt library + // doesn't have a specific modifier to format as a fixed-point value only so we use the + // 'g' operator which prints the number in general fixed point format or scientific format + // with precision 50 to round the number up to 32 significant digits in fixed point format + // which should cover pretty much all cases + output.append(fmt::format("{0}_bucket{{{1}le=\"{2:.32g}\"}} {3}\n", prefixed_tag_extracted_name, + hist_tags, bucket, value)); + } + + output.append(fmt::format("{0}_bucket{{{1}le=\"+Inf\"}} {2}\n", prefixed_tag_extracted_name, + hist_tags, stats.sampleCount())); + output.append(fmt::format("{0}_sum{{{1}}} {2:.32g}\n", prefixed_tag_extracted_name, tags, + stats.sampleSum())); + output.append(fmt::format("{0}_count{{{1}}} {2}\n", prefixed_tag_extracted_name, tags, + stats.sampleCount())); + + return output; +}; + +} // namespace + +std::string PrometheusStatsFormatter::formattedTags(const std::vector& tags) { + std::vector buf; + buf.reserve(tags.size()); + for (const Stats::Tag& tag : tags) { + buf.push_back(fmt::format("{}=\"{}\"", sanitizeName(tag.name_), tag.value_)); + } + return absl::StrJoin(buf, ","); +} + +std::string PrometheusStatsFormatter::metricName(const std::string& extracted_name) { + // Add namespacing prefix to avoid conflicts, as per best practice: + // https://prometheus.io/docs/practices/naming/#metric-names + // Also, naming conventions on https://prometheus.io/docs/concepts/data_model/ + return sanitizeName(fmt::format("envoy_{0}", extracted_name)); +} + +// TODO(efimki): Add support of text readouts stats. +uint64_t PrometheusStatsFormatter::statsAsPrometheus( + const std::vector& counters, + const std::vector& gauges, + const std::vector& histograms, Buffer::Instance& response, + const bool used_only, const absl::optional& regex) { + + uint64_t metric_name_count = 0; + metric_name_count += outputStatType( + response, used_only, regex, counters, generateNumericOutput, "counter"); + + metric_name_count += outputStatType(response, used_only, regex, gauges, + generateNumericOutput, "gauge"); + + metric_name_count += outputStatType( + response, used_only, regex, histograms, generateHistogramOutput, "histogram"); + + return metric_name_count; +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/prometheus_stats.h b/source/server/http/prometheus_stats.h new file mode 100644 index 000000000000..e748d051d47b --- /dev/null +++ b/source/server/http/prometheus_stats.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include + +#include "envoy/buffer/buffer.h" +#include "envoy/stats/histogram.h" +#include "envoy/stats/stats.h" + +namespace Envoy { +namespace Server { +/** + * Formatter for metric/labels exported to Prometheus. + * + * See: https://prometheus.io/docs/concepts/data_model + */ +class PrometheusStatsFormatter { +public: + /** + * Extracts counters and gauges and relevant tags, appending them to + * the response buffer after sanitizing the metric / label names. + * @return uint64_t total number of metric types inserted in response. + */ + static uint64_t statsAsPrometheus(const std::vector& counters, + const std::vector& gauges, + const std::vector& histograms, + Buffer::Instance& response, const bool used_only, + const absl::optional& regex); + /** + * Format the given tags, returning a string as a comma-separated list + * of ="" pairs. + */ + static std::string formattedTags(const std::vector& tags); + /** + * Format the given metric name, prefixed with "envoy_". + */ + static std::string metricName(const std::string& extracted_name); +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/http/stats_handler.cc b/source/server/http/stats_handler.cc index 82dad9f99fc1..a437bd2ac395 100644 --- a/source/server/http/stats_handler.cc +++ b/source/server/http/stats_handler.cc @@ -5,6 +5,7 @@ #include "common/http/headers.h" #include "common/http/utility.h" +#include "server/http/prometheus_stats.h" #include "server/http/utils.h" namespace Envoy { @@ -12,10 +13,6 @@ namespace Server { const uint64_t RecentLookupsCapacity = 100; -namespace { -const std::regex& promRegex() { CONSTRUCT_ON_FIRST_USE(std::regex, "[^a-zA-Z0-9_]"); } -} // namespace - Http::Code StatsHandler::handlerResetCounters(absl::string_view, Http::ResponseHeaderMap&, Buffer::Instance& response, AdminStream&, Server::Instance& server) { @@ -157,162 +154,6 @@ Http::Code StatsHandler::handlerPrometheusStats(absl::string_view path_and_query return Http::Code::OK; } -std::string PrometheusStatsFormatter::sanitizeName(const std::string& name) { - // The name must match the regex [a-zA-Z_][a-zA-Z0-9_]* as required by - // prometheus. Refer to https://prometheus.io/docs/concepts/data_model/. - std::string stats_name = std::regex_replace(name, promRegex(), "_"); - if (stats_name[0] >= '0' && stats_name[0] <= '9') { - return absl::StrCat("_", stats_name); - } else { - return stats_name; - } -} - -std::string PrometheusStatsFormatter::formattedTags(const std::vector& tags) { - std::vector buf; - buf.reserve(tags.size()); - for (const Stats::Tag& tag : tags) { - buf.push_back(fmt::format("{}=\"{}\"", sanitizeName(tag.name_), tag.value_)); - } - return absl::StrJoin(buf, ","); -} - -std::string PrometheusStatsFormatter::metricName(const std::string& extracted_name) { - // Add namespacing prefix to avoid conflicts, as per best practice: - // https://prometheus.io/docs/practices/naming/#metric-names - // Also, naming conventions on https://prometheus.io/docs/concepts/data_model/ - return sanitizeName(fmt::format("envoy_{0}", extracted_name)); -} - -template -uint64_t PrometheusStatsFormatter::outputStatType( - Buffer::Instance& response, const bool used_only, const absl::optional& regex, - const std::vector>& metrics, - const std::function& generate_output, - absl::string_view type) { - - /* - * From - * https:*github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#grouping-and-sorting: - * - * All lines for a given metric must be provided as one single group, with the optional HELP and - * TYPE lines first (in no particular order). Beyond that, reproducible sorting in repeated - * expositions is preferred but not required, i.e. do not sort if the computational cost is - * prohibitive. - */ - - // This is an unsorted collection of dumb-pointers (no need to increment then decrement every - // refcount; ownership is held throughout by `metrics`). It is unsorted for efficiency, but will - // be sorted before producing the final output to satisfy the "preferred" ordering from the - // prometheus spec: metrics will be sorted by their tags' textual representation, which will be - // consistent across calls. - using StatTypeUnsortedCollection = std::vector; - - // Return early to avoid crashing when getting the symbol table from the first metric. - if (metrics.empty()) { - return 0; - } - - // There should only be one symbol table for all of the stats in the admin - // interface. If this assumption changes, the name comparisons in this function - // will have to change to compare to convert all StatNames to strings before - // comparison. - const Stats::SymbolTable& global_symbol_table = metrics.front()->constSymbolTable(); - - // Sorted collection of metrics sorted by their tagExtractedName, to satisfy the requirements - // of the exposition format. - std::map groups( - global_symbol_table); - - for (const auto& metric : metrics) { - ASSERT(&global_symbol_table == &metric->constSymbolTable()); - - if (!shouldShowMetric(*metric, used_only, regex)) { - continue; - } - - groups[metric->tagExtractedStatName()].push_back(metric.get()); - } - - for (auto& group : groups) { - const std::string prefixed_tag_extracted_name = - metricName(global_symbol_table.toString(group.first)); - response.add(fmt::format("# TYPE {0} {1}\n", prefixed_tag_extracted_name, type)); - - // Sort before producing the final output to satisfy the "preferred" ordering from the - // prometheus spec: metrics will be sorted by their tags' textual representation, which will - // be consistent across calls. - std::sort(group.second.begin(), group.second.end(), MetricLessThan()); - - for (const auto& metric : group.second) { - response.add(generate_output(*metric, prefixed_tag_extracted_name)); - } - response.add("\n"); - } - return groups.size(); -} - -template -std::string -PrometheusStatsFormatter::generateNumericOutput(const StatType& metric, - const std::string& prefixed_tag_extracted_name) { - const std::string tags = formattedTags(metric.tags()); - return fmt::format("{0}{{{1}}} {2}\n", prefixed_tag_extracted_name, tags, metric.value()); -} - -std::string -PrometheusStatsFormatter::generateHistogramOutput(const Stats::ParentHistogram& histogram, - const std::string& prefixed_tag_extracted_name) { - const std::string tags = formattedTags(histogram.tags()); - const std::string hist_tags = histogram.tags().empty() ? EMPTY_STRING : (tags + ","); - - const Stats::HistogramStatistics& stats = histogram.cumulativeStatistics(); - const std::vector& supported_buckets = stats.supportedBuckets(); - const std::vector& computed_buckets = stats.computedBuckets(); - std::string output; - for (size_t i = 0; i < supported_buckets.size(); ++i) { - double bucket = supported_buckets[i]; - uint64_t value = computed_buckets[i]; - // We want to print the bucket in a fixed point (non-scientific) format. The fmt library - // doesn't have a specific modifier to format as a fixed-point value only so we use the - // 'g' operator which prints the number in general fixed point format or scientific format - // with precision 50 to round the number up to 32 significant digits in fixed point format - // which should cover pretty much all cases - output.append(fmt::format("{0}_bucket{{{1}le=\"{2:.32g}\"}} {3}\n", prefixed_tag_extracted_name, - hist_tags, bucket, value)); - } - - output.append(fmt::format("{0}_bucket{{{1}le=\"+Inf\"}} {2}\n", prefixed_tag_extracted_name, - hist_tags, stats.sampleCount())); - output.append(fmt::format("{0}_sum{{{1}}} {2:.32g}\n", prefixed_tag_extracted_name, tags, - stats.sampleSum())); - output.append(fmt::format("{0}_count{{{1}}} {2}\n", prefixed_tag_extracted_name, tags, - stats.sampleCount())); - - return output; -}; - -// TODO(efimki): Add support of text readouts stats. -uint64_t PrometheusStatsFormatter::statsAsPrometheus( - const std::vector& counters, - const std::vector& gauges, - const std::vector& histograms, Buffer::Instance& response, - const bool used_only, const absl::optional& regex) { - - uint64_t metric_name_count = 0; - metric_name_count += outputStatType( - response, used_only, regex, counters, generateNumericOutput, "counter"); - - metric_name_count += outputStatType(response, used_only, regex, gauges, - generateNumericOutput, "gauge"); - - metric_name_count += outputStatType( - response, used_only, regex, histograms, generateHistogramOutput, "histogram"); - - return metric_name_count; -} - std::string StatsHandler::statsAsJson(const std::map& all_stats, const std::map& text_readouts, diff --git a/source/server/http/stats_handler.h b/source/server/http/stats_handler.h index 519f77ec0eeb..4103660689a3 100644 --- a/source/server/http/stats_handler.h +++ b/source/server/http/stats_handler.h @@ -66,97 +66,5 @@ class StatsHandler { bool pretty_print = false); }; -/** - * Formatter for metric/labels exported to Prometheus. - * - * See: https://prometheus.io/docs/concepts/data_model - */ -class PrometheusStatsFormatter { -public: - /** - * Extracts counters and gauges and relevant tags, appending them to - * the response buffer after sanitizing the metric / label names. - * @return uint64_t total number of metric types inserted in response. - */ - static uint64_t statsAsPrometheus(const std::vector& counters, - const std::vector& gauges, - const std::vector& histograms, - Buffer::Instance& response, const bool used_only, - const absl::optional& regex); - /** - * Format the given tags, returning a string as a comma-separated list - * of ="" pairs. - */ - static std::string formattedTags(const std::vector& tags); - /** - * Format the given metric name, prefixed with "envoy_". - */ - static std::string metricName(const std::string& extracted_name); - -private: - /** - * Take a string and sanitize it according to Prometheus conventions. - */ - static std::string sanitizeName(const std::string& name); - - /* - * Determine whether a metric has never been emitted and choose to - * not show it if we only wanted used metrics. - */ - template - static bool shouldShowMetric(const StatType& metric, const bool used_only, - const absl::optional& regex) { - return ((!used_only || metric.used()) && - (!regex.has_value() || std::regex_search(metric.name(), regex.value()))); - } - - /** - * Processes a stat type (counter, gauge, histogram) by generating all output lines, sorting - * them by tag-extracted metric name, and then outputting them in the correct sorted order into - * response. - * - * @param response The buffer to put the output into. - * @param used_only Whether to only output stats that are used. - * @param regex A filter on which stats to output. - * @param metrics The metrics to output stats for. This must contain all stats of the given type - * to be included in the same output. - * @param generate_output A function which returns the output text for this metric. - * @param type The name of the prometheus metric type for used in TYPE annotations. - */ - template - static uint64_t outputStatType( - Buffer::Instance& response, const bool used_only, const absl::optional& regex, - const std::vector>& metrics, - const std::function& generate_output, - absl::string_view type); - - /* - * Return the prometheus output for a numeric Stat (Counter or Gauge). - */ - template - static std::string generateNumericOutput(const StatType& metric, - const std::string& prefixed_tag_extracted_name); - - /* - * Returns the prometheus output for a histogram. The output is a multi-line string (with embedded - * newlines) that contains all the individual bucket counts and sum/count for a single histogram - * (metric_name plus all tags). - */ - static std::string generateHistogramOutput(const Stats::ParentHistogram& histogram, - const std::string& prefixed_tag_extracted_name); - - /* - * Comparator for Stats::Metric that does not require a string representation - * to make the comparison, for memory efficiency. - */ - struct MetricLessThan { - bool operator()(const Stats::Metric* a, const Stats::Metric* b) const { - ASSERT(&a->constSymbolTable() == &b->constSymbolTable()); - return a->constSymbolTable().lessThan(a->statName(), b->statName()); - } - }; -}; - } // namespace Server } // namespace Envoy diff --git a/test/server/http/BUILD b/test/server/http/BUILD index 51d73c734688..9a706a07cc6b 100644 --- a/test/server/http/BUILD +++ b/test/server/http/BUILD @@ -73,6 +73,15 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "prometheus_stats_test", + srcs = ["prometheus_stats_test.cc"], + deps = [ + "//source/server/http:prometheus_stats_lib", + "//test/test_common:utility_lib", + ], +) + envoy_cc_test( name = "config_tracker_impl_test", srcs = ["config_tracker_impl_test.cc"], diff --git a/test/server/http/prometheus_stats_test.cc b/test/server/http/prometheus_stats_test.cc new file mode 100644 index 000000000000..fb6f16b958a9 --- /dev/null +++ b/test/server/http/prometheus_stats_test.cc @@ -0,0 +1,667 @@ +#include + +#include "server/http/prometheus_stats.h" + +#include "test/mocks/stats/mocks.h" +#include "test/test_common/utility.h" + +using testing::NiceMock; + +namespace Envoy { +namespace Server { + +class HistogramWrapper { +public: + HistogramWrapper() : histogram_(hist_alloc()) {} + + ~HistogramWrapper() { hist_free(histogram_); } + + const histogram_t* getHistogram() { return histogram_; } + + void setHistogramValues(const std::vector& values) { + for (uint64_t value : values) { + hist_insert_intscale(histogram_, value, 0, 1); + } + } + + void setHistogramValuesWithCounts(const std::vector>& values) { + for (std::pair cv : values) { + hist_insert_intscale(histogram_, cv.first, 0, cv.second); + } + } + +private: + histogram_t* histogram_; +}; + +class PrometheusStatsFormatterTest : public testing::Test { +protected: + PrometheusStatsFormatterTest() + : symbol_table_(Stats::SymbolTableCreator::makeSymbolTable()), alloc_(*symbol_table_), + pool_(*symbol_table_) {} + + ~PrometheusStatsFormatterTest() override { clearStorage(); } + + void addCounter(const std::string& name, Stats::StatNameTagVector cluster_tags) { + Stats::StatNameManagedStorage name_storage(baseName(name, cluster_tags), *symbol_table_); + Stats::StatNameManagedStorage tag_extracted_name_storage(name, *symbol_table_); + counters_.push_back(alloc_.makeCounter(name_storage.statName(), + tag_extracted_name_storage.statName(), cluster_tags)); + } + + void addGauge(const std::string& name, Stats::StatNameTagVector cluster_tags) { + Stats::StatNameManagedStorage name_storage(baseName(name, cluster_tags), *symbol_table_); + Stats::StatNameManagedStorage tag_extracted_name_storage(name, *symbol_table_); + gauges_.push_back(alloc_.makeGauge(name_storage.statName(), + tag_extracted_name_storage.statName(), cluster_tags, + Stats::Gauge::ImportMode::Accumulate)); + } + + using MockHistogramSharedPtr = Stats::RefcountPtr>; + void addHistogram(MockHistogramSharedPtr histogram) { histograms_.push_back(histogram); } + + MockHistogramSharedPtr makeHistogram(const std::string& name, + Stats::StatNameTagVector cluster_tags) { + auto histogram = MockHistogramSharedPtr(new NiceMock()); + histogram->name_ = baseName(name, cluster_tags); + histogram->setTagExtractedName(name); + histogram->setTags(cluster_tags); + histogram->used_ = true; + return histogram; + } + + Stats::StatName makeStat(absl::string_view name) { return pool_.add(name); } + + // Format tags into the name to create a unique stat_name for each name:tag combination. + // If the same stat_name is passed to makeGauge() or makeCounter(), even with different + // tags, a copy of the previous metric will be returned. + std::string baseName(const std::string& name, Stats::StatNameTagVector cluster_tags) { + std::string result = name; + for (const auto& name_tag : cluster_tags) { + result.append(fmt::format("<{}:{}>", symbol_table_->toString(name_tag.first), + symbol_table_->toString(name_tag.second))); + } + return result; + } + + void clearStorage() { + pool_.clear(); + counters_.clear(); + gauges_.clear(); + histograms_.clear(); + EXPECT_EQ(0, symbol_table_->numSymbols()); + } + + Stats::SymbolTablePtr symbol_table_; + Stats::AllocatorImpl alloc_; + Stats::StatNamePool pool_; + std::vector counters_; + std::vector gauges_; + std::vector histograms_; +}; + +TEST_F(PrometheusStatsFormatterTest, MetricName) { + std::string raw = "vulture.eats-liver"; + std::string expected = "envoy_vulture_eats_liver"; + auto actual = PrometheusStatsFormatter::metricName(raw); + EXPECT_EQ(expected, actual); +} + +TEST_F(PrometheusStatsFormatterTest, SanitizeMetricName) { + std::string raw = "An.artist.plays-violin@019street"; + std::string expected = "envoy_An_artist_plays_violin_019street"; + auto actual = PrometheusStatsFormatter::metricName(raw); + EXPECT_EQ(expected, actual); +} + +TEST_F(PrometheusStatsFormatterTest, SanitizeMetricNameDigitFirst) { + std::string raw = "3.artists.play-violin@019street"; + std::string expected = "envoy_3_artists_play_violin_019street"; + auto actual = PrometheusStatsFormatter::metricName(raw); + EXPECT_EQ(expected, actual); +} + +TEST_F(PrometheusStatsFormatterTest, FormattedTags) { + std::vector tags; + Stats::Tag tag1 = {"a.tag-name", "a.tag-value"}; + Stats::Tag tag2 = {"another_tag_name", "another_tag-value"}; + tags.push_back(tag1); + tags.push_back(tag2); + std::string expected = "a_tag_name=\"a.tag-value\",another_tag_name=\"another_tag-value\""; + auto actual = PrometheusStatsFormatter::formattedTags(tags); + EXPECT_EQ(expected, actual); +} + +TEST_F(PrometheusStatsFormatterTest, MetricNameCollison) { + + // Create two counters and two gauges with each pair having the same name, + // but having different tag names and values. + //`statsAsPrometheus()` should return two implying it found two unique stat names + + addCounter("cluster.test_cluster_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_cluster_1.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_cluster_2.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_cluster_2.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(2UL, size); +} + +TEST_F(PrometheusStatsFormatterTest, UniqueMetricName) { + + // Create two counters and two gauges, all with unique names. + // statsAsPrometheus() should return four implying it found + // four unique stat names. + + addCounter("cluster.test_cluster_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_cluster_2.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_cluster_3.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_cluster_4.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(4UL, size); +} + +TEST_F(PrometheusStatsFormatterTest, HistogramWithNoValuesAndNoTags) { + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(std::vector(0)); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram = makeHistogram("histogram1", {}); + ON_CALL(*histogram, cumulativeStatistics()) + .WillByDefault(testing::ReturnRef(h1_cumulative_statistics)); + + addHistogram(histogram); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(1UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram +envoy_histogram1_bucket{le="0.5"} 0 +envoy_histogram1_bucket{le="1"} 0 +envoy_histogram1_bucket{le="5"} 0 +envoy_histogram1_bucket{le="10"} 0 +envoy_histogram1_bucket{le="25"} 0 +envoy_histogram1_bucket{le="50"} 0 +envoy_histogram1_bucket{le="100"} 0 +envoy_histogram1_bucket{le="250"} 0 +envoy_histogram1_bucket{le="500"} 0 +envoy_histogram1_bucket{le="1000"} 0 +envoy_histogram1_bucket{le="2500"} 0 +envoy_histogram1_bucket{le="5000"} 0 +envoy_histogram1_bucket{le="10000"} 0 +envoy_histogram1_bucket{le="30000"} 0 +envoy_histogram1_bucket{le="60000"} 0 +envoy_histogram1_bucket{le="300000"} 0 +envoy_histogram1_bucket{le="600000"} 0 +envoy_histogram1_bucket{le="1800000"} 0 +envoy_histogram1_bucket{le="3600000"} 0 +envoy_histogram1_bucket{le="+Inf"} 0 +envoy_histogram1_sum{} 0 +envoy_histogram1_count{} 0 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +TEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) { + HistogramWrapper h1_cumulative; + + // Force large counts to prove that the +Inf bucket doesn't overflow to scientific notation. + h1_cumulative.setHistogramValuesWithCounts(std::vector>({ + {1, 100000}, + {100, 1000000}, + {1000, 100000000}, + })); + + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram = makeHistogram("histogram1", {}); + ON_CALL(*histogram, cumulativeStatistics()) + .WillByDefault(testing::ReturnRef(h1_cumulative_statistics)); + + addHistogram(histogram); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(1UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram +envoy_histogram1_bucket{le="0.5"} 0 +envoy_histogram1_bucket{le="1"} 0 +envoy_histogram1_bucket{le="5"} 100000 +envoy_histogram1_bucket{le="10"} 100000 +envoy_histogram1_bucket{le="25"} 100000 +envoy_histogram1_bucket{le="50"} 100000 +envoy_histogram1_bucket{le="100"} 100000 +envoy_histogram1_bucket{le="250"} 1100000 +envoy_histogram1_bucket{le="500"} 1100000 +envoy_histogram1_bucket{le="1000"} 1100000 +envoy_histogram1_bucket{le="2500"} 101100000 +envoy_histogram1_bucket{le="5000"} 101100000 +envoy_histogram1_bucket{le="10000"} 101100000 +envoy_histogram1_bucket{le="30000"} 101100000 +envoy_histogram1_bucket{le="60000"} 101100000 +envoy_histogram1_bucket{le="300000"} 101100000 +envoy_histogram1_bucket{le="600000"} 101100000 +envoy_histogram1_bucket{le="1800000"} 101100000 +envoy_histogram1_bucket{le="3600000"} 101100000 +envoy_histogram1_bucket{le="+Inf"} 101100000 +envoy_histogram1_sum{} 105105105000 +envoy_histogram1_count{} 101100000 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +TEST_F(PrometheusStatsFormatterTest, OutputWithAllMetricTypes) { + addCounter("cluster.test_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_2.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_3.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_4.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram1 = + makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, + {makeStat("key2"), makeStat("value2")}}); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + addHistogram(histogram1); + EXPECT_CALL(*histogram1, cumulativeStatistics()) + .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(5UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter +envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 + +# TYPE envoy_cluster_test_2_upstream_cx_total counter +envoy_cluster_test_2_upstream_cx_total{another_tag_name="another_tag-value"} 0 + +# TYPE envoy_cluster_test_3_upstream_cx_total gauge +envoy_cluster_test_3_upstream_cx_total{another_tag_name_3="another_tag_3-value"} 0 + +# TYPE envoy_cluster_test_4_upstream_cx_total gauge +envoy_cluster_test_4_upstream_cx_total{another_tag_name_4="another_tag_4-value"} 0 + +# TYPE envoy_cluster_test_1_upstream_rq_time histogram +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="0.5"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="25"} 1 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="50"} 2 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="100"} 4 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="250"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="500"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1000"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="2500"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5000"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="30000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="60000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="300000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="600000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1800000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="3600000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="+Inf"} 7 +envoy_cluster_test_1_upstream_rq_time_sum{key1="value1",key2="value2"} 5532 +envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +// Test that output groups all metrics of the same name (with different tags) together, +// as required by the Prometheus exposition format spec. Additionally, groups of metrics +// should be sorted by their tags; the format specifies that it is preferred that metrics +// are always grouped in the same order, and sorting is an easy way to ensure this. +TEST_F(PrometheusStatsFormatterTest, OutputSortedByMetricName) { + const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + // Create the 3 clusters in non-sorted order to exercise the sorting. + // Create two of each metric type (counter, gauge, histogram) so that + // the output for each needs to be collected together. + for (const char* cluster : {"ccc", "aaa", "bbb"}) { + const Stats::StatNameTagVector tags{{makeStat("cluster"), makeStat(cluster)}}; + addCounter("cluster.upstream_cx_total", tags); + addCounter("cluster.upstream_cx_connect_fail", tags); + addGauge("cluster.upstream_cx_active", tags); + addGauge("cluster.upstream_rq_active", tags); + + for (const char* hist_name : {"cluster.upstream_rq_time", "cluster.upstream_response_time"}) { + auto histogram1 = makeHistogram(hist_name, tags); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + addHistogram(histogram1); + EXPECT_CALL(*histogram1, cumulativeStatistics()) + .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); + } + } + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + false, absl::nullopt); + EXPECT_EQ(6UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_cluster_upstream_cx_connect_fail counter +envoy_cluster_upstream_cx_connect_fail{cluster="aaa"} 0 +envoy_cluster_upstream_cx_connect_fail{cluster="bbb"} 0 +envoy_cluster_upstream_cx_connect_fail{cluster="ccc"} 0 + +# TYPE envoy_cluster_upstream_cx_total counter +envoy_cluster_upstream_cx_total{cluster="aaa"} 0 +envoy_cluster_upstream_cx_total{cluster="bbb"} 0 +envoy_cluster_upstream_cx_total{cluster="ccc"} 0 + +# TYPE envoy_cluster_upstream_cx_active gauge +envoy_cluster_upstream_cx_active{cluster="aaa"} 0 +envoy_cluster_upstream_cx_active{cluster="bbb"} 0 +envoy_cluster_upstream_cx_active{cluster="ccc"} 0 + +# TYPE envoy_cluster_upstream_rq_active gauge +envoy_cluster_upstream_rq_active{cluster="aaa"} 0 +envoy_cluster_upstream_rq_active{cluster="bbb"} 0 +envoy_cluster_upstream_rq_active{cluster="ccc"} 0 + +# TYPE envoy_cluster_upstream_response_time histogram +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="0.5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="1"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="10"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="25"} 1 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="50"} 2 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="100"} 4 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="250"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="1000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="2500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="5000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="10000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="30000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="60000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="300000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="1800000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="3600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="+Inf"} 7 +envoy_cluster_upstream_response_time_sum{cluster="aaa"} 5532 +envoy_cluster_upstream_response_time_count{cluster="aaa"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="0.5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="1"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="10"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="25"} 1 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="50"} 2 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="100"} 4 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="250"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="1000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="2500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="5000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="10000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="30000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="60000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="300000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="1800000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="3600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="+Inf"} 7 +envoy_cluster_upstream_response_time_sum{cluster="bbb"} 5532 +envoy_cluster_upstream_response_time_count{cluster="bbb"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="0.5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="1"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="5"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="10"} 0 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="25"} 1 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="50"} 2 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="100"} 4 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="250"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="1000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="2500"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="5000"} 6 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="10000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="30000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="60000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="300000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="1800000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="3600000"} 7 +envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="+Inf"} 7 +envoy_cluster_upstream_response_time_sum{cluster="ccc"} 5532 +envoy_cluster_upstream_response_time_count{cluster="ccc"} 7 + +# TYPE envoy_cluster_upstream_rq_time histogram +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="0.5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="1"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="10"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="25"} 1 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="50"} 2 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="100"} 4 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="250"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="1000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="2500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="5000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="10000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="30000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="60000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="300000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="1800000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="3600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="+Inf"} 7 +envoy_cluster_upstream_rq_time_sum{cluster="aaa"} 5532 +envoy_cluster_upstream_rq_time_count{cluster="aaa"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="0.5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="1"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="10"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="25"} 1 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="50"} 2 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="100"} 4 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="250"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="1000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="2500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="5000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="10000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="30000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="60000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="300000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="1800000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="3600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="+Inf"} 7 +envoy_cluster_upstream_rq_time_sum{cluster="bbb"} 5532 +envoy_cluster_upstream_rq_time_count{cluster="bbb"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="0.5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="1"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="5"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="10"} 0 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="25"} 1 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="50"} 2 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="100"} 4 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="250"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="1000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="2500"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="5000"} 6 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="10000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="30000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="60000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="300000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="1800000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="3600000"} 7 +envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="+Inf"} 7 +envoy_cluster_upstream_rq_time_sum{cluster="ccc"} 5532 +envoy_cluster_upstream_rq_time_count{cluster="ccc"} 7 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnly) { + addCounter("cluster.test_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_2.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_3.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_4.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram1 = + makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, + {makeStat("key2"), makeStat("value2")}}); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + addHistogram(histogram1); + EXPECT_CALL(*histogram1, cumulativeStatistics()) + .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, + true, absl::nullopt); + EXPECT_EQ(1UL, size); + + const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_rq_time histogram +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="0.5"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10"} 0 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="25"} 1 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="50"} 2 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="100"} 4 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="250"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="500"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1000"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="2500"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5000"} 6 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="30000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="60000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="300000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="600000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1800000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="3600000"} 7 +envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="+Inf"} 7 +envoy_cluster_test_1_upstream_rq_time_sum{key1="value1",key2="value2"} 5532 +envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnlyHistogram) { + const std::vector h1_values = {}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram1 = + makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, + {makeStat("key2"), makeStat("value2")}}); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + histogram1->used_ = false; + addHistogram(histogram1); + + { + const bool used_only = true; + EXPECT_CALL(*histogram1, cumulativeStatistics()).Times(0); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, + response, used_only, absl::nullopt); + EXPECT_EQ(0UL, size); + } + + { + const bool used_only = false; + EXPECT_CALL(*histogram1, cumulativeStatistics()) + .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, + response, used_only, absl::nullopt); + EXPECT_EQ(1UL, size); + } +} + +TEST_F(PrometheusStatsFormatterTest, OutputWithRegexp) { + addCounter("cluster.test_1.upstream_cx_total", + {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); + addCounter("cluster.test_2.upstream_cx_total", + {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); + addGauge("cluster.test_3.upstream_cx_total", + {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); + addGauge("cluster.test_4.upstream_cx_total", + {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); + + const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; + HistogramWrapper h1_cumulative; + h1_cumulative.setHistogramValues(h1_values); + Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); + + auto histogram1 = + makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, + {makeStat("key2"), makeStat("value2")}}); + histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; + addHistogram(histogram1); + + Buffer::OwnedImpl response; + auto size = PrometheusStatsFormatter::statsAsPrometheus( + counters_, gauges_, histograms_, response, false, + absl::optional{std::regex("cluster.test_1.upstream_cx_total")}); + EXPECT_EQ(1UL, size); + + const std::string expected_output = + R"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter +envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 + +)EOF"; + + EXPECT_EQ(expected_output, response.toString()); +} + +} // namespace Server +} // namespace Envoy diff --git a/test/server/http/stats_handler_test.cc b/test/server/http/stats_handler_test.cc index 02cc2095967e..6ce1b0cfae48 100644 --- a/test/server/http/stats_handler_test.cc +++ b/test/server/http/stats_handler_test.cc @@ -571,658 +571,5 @@ TEST_P(AdminInstanceTest, RecentLookups) { // fake symbol table. However we cover this solidly in integration tests. } -class HistogramWrapper { -public: - HistogramWrapper() : histogram_(hist_alloc()) {} - - ~HistogramWrapper() { hist_free(histogram_); } - - const histogram_t* getHistogram() { return histogram_; } - - void setHistogramValues(const std::vector& values) { - for (uint64_t value : values) { - hist_insert_intscale(histogram_, value, 0, 1); - } - } - - void setHistogramValuesWithCounts(const std::vector>& values) { - for (std::pair cv : values) { - hist_insert_intscale(histogram_, cv.first, 0, cv.second); - } - } - -private: - histogram_t* histogram_; -}; - -class PrometheusStatsFormatterTest : public testing::Test { -protected: - PrometheusStatsFormatterTest() - : symbol_table_(Stats::SymbolTableCreator::makeSymbolTable()), alloc_(*symbol_table_), - pool_(*symbol_table_) {} - - ~PrometheusStatsFormatterTest() override { clearStorage(); } - - void addCounter(const std::string& name, Stats::StatNameTagVector cluster_tags) { - Stats::StatNameManagedStorage name_storage(baseName(name, cluster_tags), *symbol_table_); - Stats::StatNameManagedStorage tag_extracted_name_storage(name, *symbol_table_); - counters_.push_back(alloc_.makeCounter(name_storage.statName(), - tag_extracted_name_storage.statName(), cluster_tags)); - } - - void addGauge(const std::string& name, Stats::StatNameTagVector cluster_tags) { - Stats::StatNameManagedStorage name_storage(baseName(name, cluster_tags), *symbol_table_); - Stats::StatNameManagedStorage tag_extracted_name_storage(name, *symbol_table_); - gauges_.push_back(alloc_.makeGauge(name_storage.statName(), - tag_extracted_name_storage.statName(), cluster_tags, - Stats::Gauge::ImportMode::Accumulate)); - } - - using MockHistogramSharedPtr = Stats::RefcountPtr>; - void addHistogram(MockHistogramSharedPtr histogram) { histograms_.push_back(histogram); } - - MockHistogramSharedPtr makeHistogram(const std::string& name, - Stats::StatNameTagVector cluster_tags) { - auto histogram = MockHistogramSharedPtr(new NiceMock()); - histogram->name_ = baseName(name, cluster_tags); - histogram->setTagExtractedName(name); - histogram->setTags(cluster_tags); - histogram->used_ = true; - return histogram; - } - - Stats::StatName makeStat(absl::string_view name) { return pool_.add(name); } - - // Format tags into the name to create a unique stat_name for each name:tag combination. - // If the same stat_name is passed to makeGauge() or makeCounter(), even with different - // tags, a copy of the previous metric will be returned. - std::string baseName(const std::string& name, Stats::StatNameTagVector cluster_tags) { - std::string result = name; - for (const auto& name_tag : cluster_tags) { - result.append(fmt::format("<{}:{}>", symbol_table_->toString(name_tag.first), - symbol_table_->toString(name_tag.second))); - } - return result; - } - - void clearStorage() { - pool_.clear(); - counters_.clear(); - gauges_.clear(); - histograms_.clear(); - EXPECT_EQ(0, symbol_table_->numSymbols()); - } - - Stats::SymbolTablePtr symbol_table_; - Stats::AllocatorImpl alloc_; - Stats::StatNamePool pool_; - std::vector counters_; - std::vector gauges_; - std::vector histograms_; -}; - -TEST_F(PrometheusStatsFormatterTest, MetricName) { - std::string raw = "vulture.eats-liver"; - std::string expected = "envoy_vulture_eats_liver"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); -} - -TEST_F(PrometheusStatsFormatterTest, SanitizeMetricName) { - std::string raw = "An.artist.plays-violin@019street"; - std::string expected = "envoy_An_artist_plays_violin_019street"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); -} - -TEST_F(PrometheusStatsFormatterTest, SanitizeMetricNameDigitFirst) { - std::string raw = "3.artists.play-violin@019street"; - std::string expected = "envoy_3_artists_play_violin_019street"; - auto actual = PrometheusStatsFormatter::metricName(raw); - EXPECT_EQ(expected, actual); -} - -TEST_F(PrometheusStatsFormatterTest, FormattedTags) { - std::vector tags; - Stats::Tag tag1 = {"a.tag-name", "a.tag-value"}; - Stats::Tag tag2 = {"another_tag_name", "another_tag-value"}; - tags.push_back(tag1); - tags.push_back(tag2); - std::string expected = "a_tag_name=\"a.tag-value\",another_tag_name=\"another_tag-value\""; - auto actual = PrometheusStatsFormatter::formattedTags(tags); - EXPECT_EQ(expected, actual); -} - -TEST_F(PrometheusStatsFormatterTest, MetricNameCollison) { - - // Create two counters and two gauges with each pair having the same name, - // but having different tag names and values. - //`statsAsPrometheus()` should return two implying it found two unique stat names - - addCounter("cluster.test_cluster_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_cluster_1.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_cluster_2.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_cluster_2.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(2UL, size); -} - -TEST_F(PrometheusStatsFormatterTest, UniqueMetricName) { - - // Create two counters and two gauges, all with unique names. - // statsAsPrometheus() should return four implying it found - // four unique stat names. - - addCounter("cluster.test_cluster_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_cluster_2.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_cluster_3.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_cluster_4.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(4UL, size); -} - -TEST_F(PrometheusStatsFormatterTest, HistogramWithNoValuesAndNoTags) { - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(std::vector(0)); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram = makeHistogram("histogram1", {}); - ON_CALL(*histogram, cumulativeStatistics()) - .WillByDefault(testing::ReturnRef(h1_cumulative_statistics)); - - addHistogram(histogram); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(1UL, size); - - const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram -envoy_histogram1_bucket{le="0.5"} 0 -envoy_histogram1_bucket{le="1"} 0 -envoy_histogram1_bucket{le="5"} 0 -envoy_histogram1_bucket{le="10"} 0 -envoy_histogram1_bucket{le="25"} 0 -envoy_histogram1_bucket{le="50"} 0 -envoy_histogram1_bucket{le="100"} 0 -envoy_histogram1_bucket{le="250"} 0 -envoy_histogram1_bucket{le="500"} 0 -envoy_histogram1_bucket{le="1000"} 0 -envoy_histogram1_bucket{le="2500"} 0 -envoy_histogram1_bucket{le="5000"} 0 -envoy_histogram1_bucket{le="10000"} 0 -envoy_histogram1_bucket{le="30000"} 0 -envoy_histogram1_bucket{le="60000"} 0 -envoy_histogram1_bucket{le="300000"} 0 -envoy_histogram1_bucket{le="600000"} 0 -envoy_histogram1_bucket{le="1800000"} 0 -envoy_histogram1_bucket{le="3600000"} 0 -envoy_histogram1_bucket{le="+Inf"} 0 -envoy_histogram1_sum{} 0 -envoy_histogram1_count{} 0 - -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - -TEST_F(PrometheusStatsFormatterTest, HistogramWithHighCounts) { - HistogramWrapper h1_cumulative; - - // Force large counts to prove that the +Inf bucket doesn't overflow to scientific notation. - h1_cumulative.setHistogramValuesWithCounts(std::vector>({ - {1, 100000}, - {100, 1000000}, - {1000, 100000000}, - })); - - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram = makeHistogram("histogram1", {}); - ON_CALL(*histogram, cumulativeStatistics()) - .WillByDefault(testing::ReturnRef(h1_cumulative_statistics)); - - addHistogram(histogram); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(1UL, size); - - const std::string expected_output = R"EOF(# TYPE envoy_histogram1 histogram -envoy_histogram1_bucket{le="0.5"} 0 -envoy_histogram1_bucket{le="1"} 0 -envoy_histogram1_bucket{le="5"} 100000 -envoy_histogram1_bucket{le="10"} 100000 -envoy_histogram1_bucket{le="25"} 100000 -envoy_histogram1_bucket{le="50"} 100000 -envoy_histogram1_bucket{le="100"} 100000 -envoy_histogram1_bucket{le="250"} 1100000 -envoy_histogram1_bucket{le="500"} 1100000 -envoy_histogram1_bucket{le="1000"} 1100000 -envoy_histogram1_bucket{le="2500"} 101100000 -envoy_histogram1_bucket{le="5000"} 101100000 -envoy_histogram1_bucket{le="10000"} 101100000 -envoy_histogram1_bucket{le="30000"} 101100000 -envoy_histogram1_bucket{le="60000"} 101100000 -envoy_histogram1_bucket{le="300000"} 101100000 -envoy_histogram1_bucket{le="600000"} 101100000 -envoy_histogram1_bucket{le="1800000"} 101100000 -envoy_histogram1_bucket{le="3600000"} 101100000 -envoy_histogram1_bucket{le="+Inf"} 101100000 -envoy_histogram1_sum{} 105105105000 -envoy_histogram1_count{} 101100000 - -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - -TEST_F(PrometheusStatsFormatterTest, OutputWithAllMetricTypes) { - addCounter("cluster.test_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_2.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_3.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_4.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(h1_values); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram1 = - makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, - {makeStat("key2"), makeStat("value2")}}); - histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; - addHistogram(histogram1); - EXPECT_CALL(*histogram1, cumulativeStatistics()) - .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(5UL, size); - - const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter -envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 - -# TYPE envoy_cluster_test_2_upstream_cx_total counter -envoy_cluster_test_2_upstream_cx_total{another_tag_name="another_tag-value"} 0 - -# TYPE envoy_cluster_test_3_upstream_cx_total gauge -envoy_cluster_test_3_upstream_cx_total{another_tag_name_3="another_tag_3-value"} 0 - -# TYPE envoy_cluster_test_4_upstream_cx_total gauge -envoy_cluster_test_4_upstream_cx_total{another_tag_name_4="another_tag_4-value"} 0 - -# TYPE envoy_cluster_test_1_upstream_rq_time histogram -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="0.5"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="25"} 1 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="50"} 2 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="100"} 4 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="250"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="500"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1000"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="2500"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5000"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="30000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="60000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="300000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="600000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1800000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="3600000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="+Inf"} 7 -envoy_cluster_test_1_upstream_rq_time_sum{key1="value1",key2="value2"} 5532 -envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 - -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - -// Test that output groups all metrics of the same name (with different tags) together, -// as required by the Prometheus exposition format spec. Additionally, groups of metrics -// should be sorted by their tags; the format specifies that it is preferred that metrics -// are always grouped in the same order, and sorting is an easy way to ensure this. -TEST_F(PrometheusStatsFormatterTest, OutputSortedByMetricName) { - const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(h1_values); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - // Create the 3 clusters in non-sorted order to exercise the sorting. - // Create two of each metric type (counter, gauge, histogram) so that - // the output for each needs to be collected together. - for (const char* cluster : {"ccc", "aaa", "bbb"}) { - const Stats::StatNameTagVector tags{{makeStat("cluster"), makeStat(cluster)}}; - addCounter("cluster.upstream_cx_total", tags); - addCounter("cluster.upstream_cx_connect_fail", tags); - addGauge("cluster.upstream_cx_active", tags); - addGauge("cluster.upstream_rq_active", tags); - - for (const char* hist_name : {"cluster.upstream_rq_time", "cluster.upstream_response_time"}) { - auto histogram1 = makeHistogram(hist_name, tags); - histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; - addHistogram(histogram1); - EXPECT_CALL(*histogram1, cumulativeStatistics()) - .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); - } - } - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - false, absl::nullopt); - EXPECT_EQ(6UL, size); - - const std::string expected_output = R"EOF(# TYPE envoy_cluster_upstream_cx_connect_fail counter -envoy_cluster_upstream_cx_connect_fail{cluster="aaa"} 0 -envoy_cluster_upstream_cx_connect_fail{cluster="bbb"} 0 -envoy_cluster_upstream_cx_connect_fail{cluster="ccc"} 0 - -# TYPE envoy_cluster_upstream_cx_total counter -envoy_cluster_upstream_cx_total{cluster="aaa"} 0 -envoy_cluster_upstream_cx_total{cluster="bbb"} 0 -envoy_cluster_upstream_cx_total{cluster="ccc"} 0 - -# TYPE envoy_cluster_upstream_cx_active gauge -envoy_cluster_upstream_cx_active{cluster="aaa"} 0 -envoy_cluster_upstream_cx_active{cluster="bbb"} 0 -envoy_cluster_upstream_cx_active{cluster="ccc"} 0 - -# TYPE envoy_cluster_upstream_rq_active gauge -envoy_cluster_upstream_rq_active{cluster="aaa"} 0 -envoy_cluster_upstream_rq_active{cluster="bbb"} 0 -envoy_cluster_upstream_rq_active{cluster="ccc"} 0 - -# TYPE envoy_cluster_upstream_response_time histogram -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="0.5"} 0 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="1"} 0 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="5"} 0 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="10"} 0 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="25"} 1 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="50"} 2 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="100"} 4 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="250"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="500"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="1000"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="2500"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="5000"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="10000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="30000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="60000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="300000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="600000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="1800000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="3600000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="aaa",le="+Inf"} 7 -envoy_cluster_upstream_response_time_sum{cluster="aaa"} 5532 -envoy_cluster_upstream_response_time_count{cluster="aaa"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="0.5"} 0 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="1"} 0 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="5"} 0 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="10"} 0 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="25"} 1 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="50"} 2 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="100"} 4 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="250"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="500"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="1000"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="2500"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="5000"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="10000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="30000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="60000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="300000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="600000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="1800000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="3600000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="bbb",le="+Inf"} 7 -envoy_cluster_upstream_response_time_sum{cluster="bbb"} 5532 -envoy_cluster_upstream_response_time_count{cluster="bbb"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="0.5"} 0 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="1"} 0 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="5"} 0 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="10"} 0 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="25"} 1 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="50"} 2 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="100"} 4 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="250"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="500"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="1000"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="2500"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="5000"} 6 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="10000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="30000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="60000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="300000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="600000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="1800000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="3600000"} 7 -envoy_cluster_upstream_response_time_bucket{cluster="ccc",le="+Inf"} 7 -envoy_cluster_upstream_response_time_sum{cluster="ccc"} 5532 -envoy_cluster_upstream_response_time_count{cluster="ccc"} 7 - -# TYPE envoy_cluster_upstream_rq_time histogram -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="0.5"} 0 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="1"} 0 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="5"} 0 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="10"} 0 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="25"} 1 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="50"} 2 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="100"} 4 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="250"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="500"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="1000"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="2500"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="5000"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="10000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="30000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="60000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="300000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="600000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="1800000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="3600000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="aaa",le="+Inf"} 7 -envoy_cluster_upstream_rq_time_sum{cluster="aaa"} 5532 -envoy_cluster_upstream_rq_time_count{cluster="aaa"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="0.5"} 0 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="1"} 0 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="5"} 0 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="10"} 0 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="25"} 1 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="50"} 2 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="100"} 4 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="250"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="500"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="1000"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="2500"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="5000"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="10000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="30000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="60000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="300000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="600000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="1800000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="3600000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="bbb",le="+Inf"} 7 -envoy_cluster_upstream_rq_time_sum{cluster="bbb"} 5532 -envoy_cluster_upstream_rq_time_count{cluster="bbb"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="0.5"} 0 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="1"} 0 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="5"} 0 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="10"} 0 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="25"} 1 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="50"} 2 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="100"} 4 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="250"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="500"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="1000"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="2500"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="5000"} 6 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="10000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="30000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="60000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="300000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="600000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="1800000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="3600000"} 7 -envoy_cluster_upstream_rq_time_bucket{cluster="ccc",le="+Inf"} 7 -envoy_cluster_upstream_rq_time_sum{cluster="ccc"} 5532 -envoy_cluster_upstream_rq_time_count{cluster="ccc"} 7 - -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - -TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnly) { - addCounter("cluster.test_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_2.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_3.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_4.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(h1_values); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram1 = - makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, - {makeStat("key2"), makeStat("value2")}}); - histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; - addHistogram(histogram1); - EXPECT_CALL(*histogram1, cumulativeStatistics()) - .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, response, - true, absl::nullopt); - EXPECT_EQ(1UL, size); - - const std::string expected_output = R"EOF(# TYPE envoy_cluster_test_1_upstream_rq_time histogram -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="0.5"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10"} 0 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="25"} 1 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="50"} 2 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="100"} 4 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="250"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="500"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1000"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="2500"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="5000"} 6 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="10000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="30000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="60000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="300000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="600000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="1800000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="3600000"} 7 -envoy_cluster_test_1_upstream_rq_time_bucket{key1="value1",key2="value2",le="+Inf"} 7 -envoy_cluster_test_1_upstream_rq_time_sum{key1="value1",key2="value2"} 5532 -envoy_cluster_test_1_upstream_rq_time_count{key1="value1",key2="value2"} 7 - -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - -TEST_F(PrometheusStatsFormatterTest, OutputWithUsedOnlyHistogram) { - const std::vector h1_values = {}; - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(h1_values); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram1 = - makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, - {makeStat("key2"), makeStat("value2")}}); - histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; - histogram1->used_ = false; - addHistogram(histogram1); - - { - const bool used_only = true; - EXPECT_CALL(*histogram1, cumulativeStatistics()).Times(0); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, - response, used_only, absl::nullopt); - EXPECT_EQ(0UL, size); - } - - { - const bool used_only = false; - EXPECT_CALL(*histogram1, cumulativeStatistics()) - .WillOnce(testing::ReturnRef(h1_cumulative_statistics)); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus(counters_, gauges_, histograms_, - response, used_only, absl::nullopt); - EXPECT_EQ(1UL, size); - } -} - -TEST_F(PrometheusStatsFormatterTest, OutputWithRegexp) { - addCounter("cluster.test_1.upstream_cx_total", - {{makeStat("a.tag-name"), makeStat("a.tag-value")}}); - addCounter("cluster.test_2.upstream_cx_total", - {{makeStat("another_tag_name"), makeStat("another_tag-value")}}); - addGauge("cluster.test_3.upstream_cx_total", - {{makeStat("another_tag_name_3"), makeStat("another_tag_3-value")}}); - addGauge("cluster.test_4.upstream_cx_total", - {{makeStat("another_tag_name_4"), makeStat("another_tag_4-value")}}); - - const std::vector h1_values = {50, 20, 30, 70, 100, 5000, 200}; - HistogramWrapper h1_cumulative; - h1_cumulative.setHistogramValues(h1_values); - Stats::HistogramStatisticsImpl h1_cumulative_statistics(h1_cumulative.getHistogram()); - - auto histogram1 = - makeHistogram("cluster.test_1.upstream_rq_time", {{makeStat("key1"), makeStat("value1")}, - {makeStat("key2"), makeStat("value2")}}); - histogram1->unit_ = Stats::Histogram::Unit::Milliseconds; - addHistogram(histogram1); - - Buffer::OwnedImpl response; - auto size = PrometheusStatsFormatter::statsAsPrometheus( - counters_, gauges_, histograms_, response, false, - absl::optional{std::regex("cluster.test_1.upstream_cx_total")}); - EXPECT_EQ(1UL, size); - - const std::string expected_output = - R"EOF(# TYPE envoy_cluster_test_1_upstream_cx_total counter -envoy_cluster_test_1_upstream_cx_total{a_tag_name="a.tag-value"} 0 - -)EOF"; - - EXPECT_EQ(expected_output, response.toString()); -} - } // namespace Server } // namespace Envoy diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index c9683f8cce4d..27bc32dbe926 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -79,7 +79,8 @@ "./source/extensions/filters/http/squash/squash_filter.h", "./source/extensions/filters/http/squash/squash_filter.cc", "./source/server/http/utils.h", "./source/server/http/utils.cc", "./source/server/http/stats_handler.h", - "./source/server/http/stats_handler.cc", "./tools/clang_tools/api_booster/main.cc", + "./source/server/http/stats_handler.cc", "./source/server/http/prometheus_stats.h", + "./source/server/http/prometheus_stats.cc", "./tools/clang_tools/api_booster/main.cc", "./tools/clang_tools/api_booster/proto_cxx_utils.cc", "./source/common/common/version.cc") # Only one C++ file should instantiate grpc_init