From 370fe9087848862d02f0e5a333fcb4cd37cf5ca0 Mon Sep 17 00:00:00 2001 From: Ashley Xu <139821907+ashleyxuu@users.noreply.github.com> Date: Fri, 22 Mar 2024 18:06:09 -0700 Subject: [PATCH] docs: add code samples for metrics.{recall_score, precision_score, f11_score} (#502) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …_score} Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [ ] Make sure to open an issue as a [bug/issue](https://togithub.com/googleapis/python-bigquery-dataframes/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea - [ ] Ensure the tests and linter pass - [ ] Code coverage does not decrease (if any source code was changed) - [ ] Appropriate docs were updated (if necessary) Fixes # 🦕 --- .../sklearn/metrics/_classification.py | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/third_party/bigframes_vendored/sklearn/metrics/_classification.py b/third_party/bigframes_vendored/sklearn/metrics/_classification.py index 35c22f4cd0..00bbf8cd60 100644 --- a/third_party/bigframes_vendored/sklearn/metrics/_classification.py +++ b/third_party/bigframes_vendored/sklearn/metrics/_classification.py @@ -128,6 +128,22 @@ def recall_score( The best value is 1 and the worst value is 0. + **Examples:** + + >>> import bigframes.pandas as bpd + >>> import bigframes.ml.metrics + >>> bpd.options.display.progress_bar = None + + >>> y_true = bpd.DataFrame([0, 1, 2, 0, 1, 2]) + >>> y_pred = bpd.DataFrame([0, 2, 1, 0, 0, 1]) + >>> recall_score = bigframes.ml.metrics.recall_score(y_true, y_pred, average=None) + >>> recall_score + 0 1 + 1 0 + 2 0 + dtype: int64 + + Args: y_true (Series or DataFrame of shape (n_samples,)): Ground truth (correct) target values. @@ -137,6 +153,7 @@ def recall_score( default='binary'): This parameter is required for multiclass/multilabel targets. Possible values are 'None', 'micro', 'macro', 'samples', 'weighted', 'binary'. + Only average=None is supported. Returns: float (if average is not None) or Series of float of shape n_unique_labels,): Recall @@ -160,6 +177,21 @@ def precision_score( The best value is 1 and the worst value is 0. + **Examples:** + + >>> import bigframes.pandas as bpd + >>> import bigframes.ml.metrics + >>> bpd.options.display.progress_bar = None + + >>> y_true = bpd.DataFrame([0, 1, 2, 0, 1, 2]) + >>> y_pred = bpd.DataFrame([0, 2, 1, 0, 0, 1]) + >>> precision_score = bigframes.ml.metrics.precision_score(y_true, y_pred, average=None) + >>> precision_score + 0 0.666667 + 1 0.000000 + 2 0.000000 + dtype: float64 + Args: y_true: Series or DataFrame of shape (n_samples,) Ground truth (correct) target values. @@ -169,6 +201,7 @@ def precision_score( default='binary' This parameter is required for multiclass/multilabel targets. Possible values are 'None', 'micro', 'macro', 'samples', 'weighted', 'binary'. + Only average=None is supported. Returns: precision: float (if average is not None) or Series of float of shape \ @@ -195,6 +228,21 @@ def f1_score( the F1 score of each class with weighting depending on the ``average`` parameter. + **Examples:** + + >>> import bigframes.pandas as bpd + >>> import bigframes.ml.metrics + >>> bpd.options.display.progress_bar = None + + >>> y_true = bpd.DataFrame([0, 1, 2, 0, 1, 2]) + >>> y_pred = bpd.DataFrame([0, 2, 1, 0, 0, 1]) + >>> f1_score = bigframes.ml.metrics.f1_score(y_true, y_pred, average=None) + >>> f1_score + 0 0.8 + 1 0.0 + 2 0.0 + dtype: float64 + Args: y_true: Series or DataFrame of shape (n_samples,) Ground truth (correct) target values.