From 9c2a5e08671ad04a51e3cabc9baa6c2357d4f449 Mon Sep 17 00:00:00 2001 From: akshatvishu Date: Tue, 17 Oct 2023 10:00:30 +0000 Subject: [PATCH 1/4] feat(losses): add hinge_embedding_loss to ivy experimental API. The test are failing locally due to error while using @with_supported_device_and_dtypes decorator. --- ivy/data_classes/array/experimental/losses.py | 77 ++++++++ .../container/experimental/losses.py | 183 ++++++++++++++++++ .../backends/jax/experimental/losses.py | 28 ++- .../backends/numpy/experimental/losses.py | 57 +++++- .../backends/paddle/experimental/losses.py | 24 +++ .../tensorflow/experimental/losses.py | 27 +++ .../backends/torch/experimental/losses.py | 31 ++- ivy/functional/ivy/experimental/losses.py | 92 +++++++++ .../test_experimental/test_nn/test_losses.py | 54 ++++++ 9 files changed, 562 insertions(+), 11 deletions(-) diff --git a/ivy/data_classes/array/experimental/losses.py b/ivy/data_classes/array/experimental/losses.py index f676338c11c89..c5dc944c990fc 100644 --- a/ivy/data_classes/array/experimental/losses.py +++ b/ivy/data_classes/array/experimental/losses.py @@ -365,3 +365,80 @@ def poisson_nll_loss( eps=eps, reduction=reduction, ) + + def hinge_embedding_loss( + self: Union[ivy.Array, ivy.NativeArray], + target: Union[ivy.Array, ivy.NativeArray], + *, + margin: float = 1.0, + reduction: str = "mean", + ) -> ivy.Array: + r"""Measures loss from input `x` and label `y` with values 1 or -1. It + evaluates if two inputs are similar or not, often used for embedding or + semi-supervised learning. + + Loss for the `n`-th sample: + .. math:: + l_n = \begin{cases} + x_n, & \text{if}\; y_n = 1,\\ + \max \{0, margin - x_n\}, & \text{if}\; y_n = -1, + \end{cases} + + Total loss: + .. math:: + \ell(x, y) = \begin{cases} + \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\ + \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} + \end{cases} + + where :math:`L = \{l_1,\dots,l_N\}^\top`. + + Parameters + ---------- + input + Input tensor with dtype float. + The shape is [N, \*], where N is batch size and `\*` represents + any number of additional dimensions. + label + Label tensor containing 1 or -1 with dtype float32 or float64. + Its shape matches that of the input. + margin + Sets the hyperparameter margin. Determines the necessary input size + for hinge_embedding_loss calculations when label is -1. Inputs smaller + than the margin are minimized with hinge_embedding_loss. + Default is 1.0. + reduction + Specifies how to aggregate the loss across the batch. Options are: + - ``'none'``: Returns the unreduced loss. + - ``'mean'``: Returns the mean loss. + - ``'sum'``: Returns the summed loss. + Default is ``'mean'``. + + Shape + ----- + - Input: :math:`(*)` where :math:`*` means, any number of dimensions. \ + The sum operation operates over all the elements. + - Target: :math:`(*)`, same shape as the input + - Output: scalar. If :attr:`reduction` is ``'none'``, + then same shape as the input + + Returns + ------- + ret + Hinge embedding loss calculated from the input and label, + shaped based on the reduction method. + + Examples + -------- + >>> input_tensor = ivy.array([1, 2, 3, 4], dtype=ivy.float64) + >>> target_tensor = ivy.array([2, 2, 2, 2], dtype=ivy.float64) + >>> loss = poisson_nll_loss(input_tensor, target_tensor, log_input=True) + >>> print(loss) + ivy.array(16.1978) + """ + return ivy.hinge_embedding_loss( + self._data, + target, + margin=margin, + reduction=reduction, + ) diff --git a/ivy/data_classes/container/experimental/losses.py b/ivy/data_classes/container/experimental/losses.py index 799c44adfcb25..bdc8dcce7f852 100644 --- a/ivy/data_classes/container/experimental/losses.py +++ b/ivy/data_classes/container/experimental/losses.py @@ -1089,3 +1089,186 @@ def poisson_nll_loss( prune_unapplied=prune_unapplied, map_sequences=map_sequences, ) + + @staticmethod + def _static_hinge_embedding_loss( + input: Union[ivy.Container, ivy.Array, ivy.NativeArray], + target: Union[ivy.Container, ivy.Array, ivy.NativeArray], + *, + margin: [Union[float, ivy.Container]] = 1.0, + reduction: [Union[str, ivy.Container]] = "mean", + key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, + to_apply: Union[bool, ivy.Container] = True, + prune_unapplied: Union[bool, ivy.Container] = False, + map_sequences: Union[bool, ivy.Container] = False, + ) -> ivy.Container: + r"""ivy.Container static method variant of ivy.hinge_embedding_loss. + This method simplywraps the function, and so the docstring for + ivy.hinge_embedding_loss also applies to this method with minimal + changes. + + Parameters + ---------- + input + input array or container containing input labels. + target + input array or container containing the target labels. + margin + Sets the hyperparameter margin. Determines the necessary input size + for hinge_embedding_loss calculations when label is -1. Inputs smaller + than the margin are minimized with hinge_embedding_loss. + Default is 1.0. + reduction + Specifies how to aggregate the loss across the batch. Options are: + - ``'none'``: Returns the unreduced loss. + - ``'mean'``: Returns the mean loss. + - ``'sum'``: Returns the summed loss. + Default is ``'mean'``. + key_chains + The key-chains to apply or not apply the method to. Default is ``None``. + to_apply + If input, the method will be applied to key_chains, otherwise key_chains + will be skipped. Default is ``input``. + prune_unapplied + Whether to prune key_chains for which the function was not applied. + Default is ``False``. + map_sequences + Whether to also map method to sequences (lists, tuples). + Default is ``False``. + + Shape + ----- + - Input: :math:`(*)` where :math:`*` means, any number of dimensions. \ + The sum operation operates over all the elements. + - Target: :math:`(*)`, same shape as the input + - Output: scalar. If :attr:`reduction` is ``'none'``, + then same shape as the input + + Returns + ------- + ret + Hinge embedding loss calculated from the input and label, + shaped based on the reduction method. + + Examples + -------- + With :class:`ivy.Container` inputs: + + >>> x = ivy.Container(a=ivy.array([[0.6, 0.2, 0.3]], dtype=ivy.float32), + ... b=ivy.array([[0.8, 0.2, 0.2]], dtype=ivy.float32)) + >>> y = ivy.Container(a=ivy.array([[1, 0, 2]], dtype=ivy.float32), + ... b=ivy.array([[3, 2, 1]], dtype=ivy.float32)) + >>> z = ivy.Container._static_poisson_nll_loss(x,y) + >>> print(z) + { + a: ivy.array(1.06446016), + b: ivy.array(0.55611551) + } + + With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs: + + >>> x = ivy.array([[1, 0, 2]], dtype=ivy.float32) + >>> y = ivy.Container(a=ivy.array([[0.6, 0.2, 0.3]], dtype=ivy.float32), + ... b=ivy.array([[0.8, 0.2, 0.2]], dtype=ivy.float32)) + >>> z = ivy.Container._static_poisson_nll_loss(x, y) + >>> print(z) + { + a: ivy.array(3.30244565), + b: ivy.array(3.30244565) + } + """ + return ContainerBase.cont_multi_map_in_function( + "hinge_embedding_loss", + input, + target, + margin=margin, + reduction=reduction, + key_chains=key_chains, + to_apply=to_apply, + prune_unapplied=prune_unapplied, + map_sequences=map_sequences, + ) + + def hinge_embedding_loss( + self: Union[ivy.Container, ivy.Array, ivy.NativeArray], + target: Union[ivy.Container, ivy.Array, ivy.NativeArray], + *, + margin: [Union[float, ivy.Container]] = 1.0, + reduction: [Union[str, ivy.Container]] = "mean", + key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, + to_apply: Union[bool, ivy.Container] = True, + prune_unapplied: Union[bool, ivy.Container] = False, + map_sequences: Union[bool, ivy.Container] = False, + ) -> ivy.Container: + r"""ivy.Container instance method variant of ivy.hinge_embedding_loss. + This method simply wraps the function, and so the docstring for + ivy.hinge_embedding_loss also applies to this method with minimal + changes. + + Parameters + ---------- + input + input array or container containing input labels. + target + input array or container containing the target labels. + margin + Sets the hyperparameter margin. Determines the necessary input size + for hinge_embedding_loss calculations when label is -1. Inputs smaller + than the margin are minimized with hinge_embedding_loss. + Default is 1.0. + reduction + Specifies how to aggregate the loss across the batch. Options are: + - ``'none'``: Returns the unreduced loss. + - ``'mean'``: Returns the mean loss. + - ``'sum'``: Returns the summed loss. + Default is ``'mean'``. + key_chains + The key-chains to apply or not apply the method to. Default is ``None``. + to_apply + If input, the method will be applied to key_chains, otherwise key_chains + will be skipped. Default is ``input``. + prune_unapplied + Whether to prune key_chains for which the function was not applied. + Default is ``False``. + map_sequences + Whether to also map method to sequences (lists, tuples). + Default is ``False``. + + Shape + ----- + - Input: :math:`(*)` where :math:`*` means, any number of dimensions. \ + The sum operation operates over all the elements. + - Target: :math:`(*)`, same shape as the input + - Output: scalar. If :attr:`reduction` is ``'none'``, + then same shape as the input + + Returns + ------- + ret + Hinge embedding loss calculated from the input and label, + shaped based on the reduction method. + + + Examples + -------- + >>> x = ivy.Container(a=ivy.array([[1, 0, 2]], dtype=ivy.float32), + ... b=ivy.array([[3, 2, 1]], dtype=ivy.float32)) + >>> y = ivy.Container(a=ivy.array([[0.6, 0.2, 0.3]], dtype=ivy.float32), + ... b=ivy.array([[0.8, 0.2, 0.2]], dtype=ivy.float32)) + >>> z = x.poisson_nll_loss(y) + >>> print(z) + { + a: ivy.array(3.30244565), + b: ivy.array(9.06429195) + } + """ + return self._static_hinge_embedding_loss( + self, + target, + margin=margin, + reduction=reduction, + key_chains=key_chains, + to_apply=to_apply, + prune_unapplied=prune_unapplied, + map_sequences=map_sequences, + ) diff --git a/ivy/functional/backends/jax/experimental/losses.py b/ivy/functional/backends/jax/experimental/losses.py index bb7e3e3020d50..e83dea74eb759 100644 --- a/ivy/functional/backends/jax/experimental/losses.py +++ b/ivy/functional/backends/jax/experimental/losses.py @@ -114,7 +114,7 @@ def _validate_poisson_nll_params( @with_supported_device_and_dtypes( { - "0.4.14 and below": { + "0.4.18 and below": { "cpu": ("float16", "float32", "float64"), } }, @@ -153,3 +153,29 @@ def poisson_nll_loss( cond = jnp.logical_and(target_arr >= zeroes, target_arr <= ones) loss = loss + jnp.where(cond, zeroes, striling_approx_term) return _apply_loss_reduction(loss, reduction) + + +@with_supported_device_and_dtypes( + { + "0.4.18 and below": { + "cpu": ("float32", "float64"), + } + }, + backend_version, +) +def hinge_embedding_loss( + input: JaxArray, + target: JaxArray, + *, + margin: float = 1.0, + reduction: str = "mean", +) -> JaxArray: + zero_ = jnp.zeros([1], dtype=input.dtype) + + relu_part = jnp.maximum(margin - input, 0) + + loss = jnp.where(target == 1.0, input, zero_) + jnp.where( + target == -1.0, relu_part, zero_ + ) + + return _apply_loss_reduction(loss, reduction) diff --git a/ivy/functional/backends/numpy/experimental/losses.py b/ivy/functional/backends/numpy/experimental/losses.py index 32e1af4c2577e..c9b0ec1d1ac98 100644 --- a/ivy/functional/backends/numpy/experimental/losses.py +++ b/ivy/functional/backends/numpy/experimental/losses.py @@ -2,13 +2,20 @@ from typing import Optional from ivy.functional.backends.numpy.helpers import _scalar_output_to_0d_array from ivy.func_wrapper import ( - with_unsupported_dtypes, + with_unsupported_device_and_dtypes, with_supported_device_and_dtypes, ) from . import backend_version -@with_unsupported_dtypes({"1.26.0 and below": ("bool",)}, backend_version) +@with_unsupported_device_and_dtypes( + { + "1.26.0 and below": { + "cpu": "bool", + } + }, + backend_version, +) @_scalar_output_to_0d_array def huber_loss( input: np.ndarray, @@ -32,7 +39,14 @@ def huber_loss( # Implementation of smooth_l1_loss in the given format -@with_unsupported_dtypes({"1.26.0 and below": ("bool",)}, backend_version) +@with_unsupported_device_and_dtypes( + { + "1.26.0 and below": { + "cpu": "bool", + } + }, + backend_version, +) @_scalar_output_to_0d_array def smooth_l1_loss( input: np.ndarray, @@ -56,7 +70,14 @@ def smooth_l1_loss( return loss -@with_unsupported_dtypes({"1.26.0 and below": ("bool",)}, backend_version) +@with_unsupported_device_and_dtypes( + { + "1.26.0 and below": { + "cpu": "bool", + } + }, + backend_version, +) @_scalar_output_to_0d_array def soft_margin_loss( input: np.ndarray, @@ -128,7 +149,7 @@ def _validate_poisson_nll_params( @with_supported_device_and_dtypes( { - "1.25.2 and below": { + "1.26.0 and below": { "cpu": ("float16", "float32", "float64"), } }, @@ -167,3 +188,29 @@ def poisson_nll_loss( cond = np.logical_and(target_arr >= zeroes, target_arr <= ones) loss = loss + np.where(cond, zeroes, striling_approx_term) return _apply_loss_reduction(loss, reduction) + + +@with_supported_device_and_dtypes( + { + "1.26.0 and below": { + "cpu": ("float32", "float64"), + } + }, + backend_version, +) +def hinge_embedding_loss( + input: np.ndarray, + target: np.ndarray, + *, + margin: float = 1.0, + reduction: str = "mean", +) -> np.ndarray: + zero_ = np.zeros([1], dtype=input.dtype) + + relu_part = np.maximum(margin - input, 0) + + loss = np.where(target == 1.0, input, zero_) + np.where( + target == -1.0, relu_part, zero_ + ) + + return _apply_loss_reduction(loss, reduction) diff --git a/ivy/functional/backends/paddle/experimental/losses.py b/ivy/functional/backends/paddle/experimental/losses.py index d2ec322ea218e..c068d6d956669 100644 --- a/ivy/functional/backends/paddle/experimental/losses.py +++ b/ivy/functional/backends/paddle/experimental/losses.py @@ -239,3 +239,27 @@ def poisson_nll_loss( cond = paddle.logical_and(target_arr >= zeroes, target_arr <= ones) loss = loss + paddle.where(cond, zeroes, striling_approx_term) return _apply_loss_reduction(loss, reduction) + + +@with_supported_device_and_dtypes( + { + "2.5.1 and below": { + "cpu": ("float32", "float64"), + "gpu": ("float16", "float32", "float64"), + } + }, + backend_version, +) +def hinge_embedding_loss( + input: paddle.Tensor, + target: paddle.Tensor, + *, + margin: float = 1.0, + reduction: str = "mean", +) -> paddle.Tensor: + return paddle.nn.functional.hinge_embedding_loss( + input, + target, + margin=margin, + reduction=reduction, + ) diff --git a/ivy/functional/backends/tensorflow/experimental/losses.py b/ivy/functional/backends/tensorflow/experimental/losses.py index e9140d6ee382e..ab7465f95d27b 100644 --- a/ivy/functional/backends/tensorflow/experimental/losses.py +++ b/ivy/functional/backends/tensorflow/experimental/losses.py @@ -156,3 +156,30 @@ def poisson_nll_loss( cond = tf.math.logical_and(target_tensor >= zeros, target_tensor <= ones) loss = loss + tf.where(cond, zeros, stirling_approx) return _apply_loss_reduction(loss, reduction) + + +@with_supported_device_and_dtypes( + { + "2.14.0 and below": { + "cpu": ("float32", "float64"), + "gpu": ("float32", "float64"), + } + }, + backend_version, +) +def hinge_embedding_loss( + input: tf.Tensor, + target: tf.Tensor, + *, + margin: float = 1.0, + reduction: str = "mean", +) -> tf.Tensor: + zero_ = tf.zeros([1], dtype=input.dtype) + + relu_part = tf.math.maximum(margin - input, 0) + + loss = tf.where(tf.equal(target, 1.0), input, zero_) + tf.where( + tf.equal(target, -1.0), relu_part, zero_ + ) + + return _apply_loss_reduction(loss, reduction) diff --git a/ivy/functional/backends/torch/experimental/losses.py b/ivy/functional/backends/torch/experimental/losses.py index adb6d6e76510b..a7a9de80750cb 100644 --- a/ivy/functional/backends/torch/experimental/losses.py +++ b/ivy/functional/backends/torch/experimental/losses.py @@ -11,7 +11,7 @@ @with_unsupported_dtypes( - {"2.1.0 and below": ("unit8", "int8", "int16", "int32", "int64", "bool")}, + {"2.1.0 and below": ("unit8", "int8", "int16", "int32", "int64", "bool")}, backend_version, ) def l1_loss( @@ -59,7 +59,7 @@ def smooth_l1_loss( @with_unsupported_dtypes( - {"2.1.0 and below": ("uint8", "int8", "int16", "int32", "int64", "bool")}, + {"2.1.0 and below": ("uint8", "int8", "int16", "int32", "int64", "bool")}, backend_version, ) def huber_loss( @@ -77,7 +77,7 @@ def huber_loss( @with_unsupported_dtypes( { - "2.1.0 and below": ( + "2.1.0 and below": ( "float16", "uint8", "int8", @@ -104,7 +104,7 @@ def soft_margin_loss( @with_supported_dtypes( - {"2.1.0 and below": ("float",)}, + {"2.1.0 and below": ("float",)}, backend_version, ) def kl_div( @@ -124,7 +124,7 @@ def kl_div( @with_supported_device_and_dtypes( { - "2.14.0 and below": { + "2.1.0 and below": { "cpu": ( "float32", "float64", @@ -152,3 +152,24 @@ def poisson_nll_loss( return torch.nn.functional.poisson_nll_loss( input, target, log_input=log_input, full=full, eps=eps, reduction=reduction ) + + +@with_supported_device_and_dtypes( + { + "2.1.0 and below": { + "cpu": ("float16", "float32", "float64"), + "gpu": ("float16", "float32", "float64"), + } + }, + backend_version, +) +def hinge_embedding_loss( + input: torch.Tensor, + target: torch.Tensor, + *, + margin: float = 1.0, + reduction: str = "mean", +) -> torch.Tensor: + return torch.nn.functional.hinge_embedding_loss( + input, target, margin=margin, reduction=reduction + ) diff --git a/ivy/functional/ivy/experimental/losses.py b/ivy/functional/ivy/experimental/losses.py index 0824093ca069d..be35791b790c4 100644 --- a/ivy/functional/ivy/experimental/losses.py +++ b/ivy/functional/ivy/experimental/losses.py @@ -571,3 +571,95 @@ def poisson_nll_loss( eps=eps, reduction=reduction, ) + + +# @handle_exceptions +# @handle_nestable +# @handle_array_like_without_promotion +# @to_native_arrays_and_back +# @handle_exceptions +# @handle_backend_invalid +# @handle_nestable +# @to_native_arrays_and_back +# @handle_array_function +# @handle_device +@handle_exceptions +@handle_nestable +@handle_array_like_without_promotion +@to_native_arrays_and_back +def hinge_embedding_loss( + input: Union[ivy.Array, ivy.NativeArray], + target: Union[ivy.Array, ivy.NativeArray], + *, + margin: float = 1.0, + reduction: str = "mean", +) -> ivy.Array: + r"""Measures loss from input `x` and label `y` with values 1 or -1. It + evaluates if two inputs are similar or not, often used for embedding or + semi-supervised learning. + + Loss for the `n`-th sample: + .. math:: + l_n = \begin{cases} + x_n, & \text{if}\; y_n = 1,\\ + \max \{0, margin - x_n\}, & \text{if}\; y_n = -1, + \end{cases} + + Total loss: + .. math:: + \ell(x, y) = \begin{cases} + \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\ + \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} + \end{cases} + + where :math:`L = \{l_1,\dots,l_N\}^\top`. + + Parameters + ---------- + input + Input tensor with dtype float. + The shape is [N, \*], where N is batch size and `\*` represents + any number of additional dimensions. + label + Label tensor containing 1 or -1 with dtype float32 or float64. + Its shape matches that of the input. + margin + Sets the hyperparameter margin. Determines the necessary input size + for hinge_embedding_loss calculations when label is -1. Inputs smaller + than the margin are minimized with hinge_embedding_loss. + Default is 1.0. + reduction + Specifies how to aggregate the loss across the batch. Options are: + - ``'none'``: Returns the unreduced loss. + - ``'mean'``: Returns the mean loss. + - ``'sum'``: Returns the summed loss. + Default is ``'mean'``. + + Shape + ----- + - Input: :math:`(*)` where :math:`*` means, any number of dimensions. \ + The sum operation operates over all the elements. + - Target: :math:`(*)`, same shape as the input + - Output: scalar. If :attr:`reduction` is ``'none'``, + then same shape as the input + + Returns + ------- + ret + Hinge embedding loss calculated from the input and label, + shaped based on the reduction method. + + Examples + -------- + >>> input_tensor = ivy.array([1, 2, 3, 4], dtype=ivy.float64) + >>> target_tensor = ivy.array([2, 2, 2, 2], dtype=ivy.float64) + >>> loss = poisson_nll_loss(input_tensor, target_tensor, log_input=True) + >>> print(loss) + ivy.array(16.1978) + """ + return ivy.current_backend().hinge_embedding_loss( + input, + target, + margin=margin, + reduction=reduction, + ) diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_losses.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_losses.py index 2161ee814304b..0b7fb8fc456bf 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_losses.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_losses.py @@ -7,6 +7,60 @@ from ivy_tests.test_ivy.helpers import handle_test +# hinge_embedding_loss +@handle_test( + fn_tree="functional.ivy.experimental.hinge_embedding_loss", + dtype_and_input=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + min_value=-10, + max_value=10, + allow_inf=False, + min_num_dims=1, + min_dim_size=1, + max_num_dims=5, + ), + dtype_and_target=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + min_value=-1, + max_value=1, + allow_inf=False, + min_num_dims=1, + min_dim_size=1, + max_num_dims=5, + ), + margin=st.floats(min_value=1, max_value=5), + reduction=st.sampled_from(["none", "sum", "mean"]), + test_with_out=st.just(False), + test_gradients=st.just(False), + ground_truth_backend="torch", +) +def test_hinge_embedding_loss( + dtype_and_input, + dtype_and_target, + margin, + reduction, + test_flags, + backend_fw, + fn_name, + on_device, +): + input_dtype, input = dtype_and_input + target_dtype, target = dtype_and_target + helpers.test_function( + input_dtypes=input_dtype, + test_flags=test_flags, + backend_to_test=backend_fw, + fn_name=fn_name, + on_device=on_device, + input=input[0], + target=target[0], + margin=margin, + reduction=reduction, + rtol_=1e-05, + atol_=1e-05, + ) + + # huber_loss @handle_test( fn_tree="functional.ivy.experimental.huber_loss", From 9b1b6fbf6fb283b136aa002c0ecabc59e294cec9 Mon Sep 17 00:00:00 2001 From: ivy-branch Date: Tue, 17 Oct 2023 12:50:40 +0000 Subject: [PATCH 2/4] =?UTF-8?q?=F0=9F=A4=96=20Lint=20code?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ivy/data_classes/array/experimental/losses.py | 7 ++++--- .../container/experimental/losses.py | 16 ++++++++-------- ivy/functional/ivy/experimental/losses.py | 6 +++--- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/ivy/data_classes/array/experimental/losses.py b/ivy/data_classes/array/experimental/losses.py index 459a17025a3d3..7070f29fc89ac 100644 --- a/ivy/data_classes/array/experimental/losses.py +++ b/ivy/data_classes/array/experimental/losses.py @@ -380,9 +380,10 @@ def hinge_embedding_loss( margin: float = 1.0, reduction: str = "mean", ) -> ivy.Array: - r"""Measures loss from input `x` and label `y` with values 1 or -1. It - evaluates if two inputs are similar or not, often used for embedding or - semi-supervised learning. + r""" + Measures loss from input `x` and label `y` with values 1 or -1. It evaluates if + two inputs are similar or not, often used for embedding or semi-supervised + learning. Loss for the `n`-th sample: .. math:: diff --git a/ivy/data_classes/container/experimental/losses.py b/ivy/data_classes/container/experimental/losses.py index 061ed3f8c5a6f..92db6b1142fe6 100644 --- a/ivy/data_classes/container/experimental/losses.py +++ b/ivy/data_classes/container/experimental/losses.py @@ -1116,10 +1116,10 @@ def _static_hinge_embedding_loss( prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, ) -> ivy.Container: - r"""ivy.Container static method variant of ivy.hinge_embedding_loss. - This method simplywraps the function, and so the docstring for - ivy.hinge_embedding_loss also applies to this method with minimal - changes. + r""" + ivy.Container static method variant of ivy.hinge_embedding_loss. This method + simplywraps the function, and so the docstring for ivy.hinge_embedding_loss also + applies to this method with minimal changes. Parameters ---------- @@ -1214,10 +1214,10 @@ def hinge_embedding_loss( prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, ) -> ivy.Container: - r"""ivy.Container instance method variant of ivy.hinge_embedding_loss. - This method simply wraps the function, and so the docstring for - ivy.hinge_embedding_loss also applies to this method with minimal - changes. + r""" + ivy.Container instance method variant of ivy.hinge_embedding_loss. This method + simply wraps the function, and so the docstring for ivy.hinge_embedding_loss + also applies to this method with minimal changes. Parameters ---------- diff --git a/ivy/functional/ivy/experimental/losses.py b/ivy/functional/ivy/experimental/losses.py index ce189e3f7e7e9..9f20c946ed3e9 100644 --- a/ivy/functional/ivy/experimental/losses.py +++ b/ivy/functional/ivy/experimental/losses.py @@ -597,9 +597,9 @@ def hinge_embedding_loss( margin: float = 1.0, reduction: str = "mean", ) -> ivy.Array: - r"""Measures loss from input `x` and label `y` with values 1 or -1. It - evaluates if two inputs are similar or not, often used for embedding or - semi-supervised learning. + r""" + Measures loss from input `x` and label `y` with values 1 or -1. It evaluates if two + inputs are similar or not, often used for embedding or semi-supervised learning. Loss for the `n`-th sample: .. math:: From e8564a920ae016fc1c74c08bf91d81f5f3249e95 Mon Sep 17 00:00:00 2001 From: akshatvishu Date: Thu, 19 Oct 2023 15:22:44 +0000 Subject: [PATCH 3/4] feat: add examples to hinge_embedding_loss docstring --- ivy/data_classes/array/experimental/losses.py | 21 +++-- .../container/experimental/losses.py | 58 ++++++------ .../backends/jax/experimental/losses.py | 7 +- .../backends/numpy/experimental/losses.py | 9 +- .../tensorflow/experimental/losses.py | 6 +- ivy/functional/ivy/experimental/losses.py | 33 +++---- .../test_experimental/test_nn/test_losses.py | 92 +++++++++++++------ 7 files changed, 132 insertions(+), 94 deletions(-) diff --git a/ivy/data_classes/array/experimental/losses.py b/ivy/data_classes/array/experimental/losses.py index 459a17025a3d3..94c60683bd2e5 100644 --- a/ivy/data_classes/array/experimental/losses.py +++ b/ivy/data_classes/array/experimental/losses.py @@ -380,9 +380,10 @@ def hinge_embedding_loss( margin: float = 1.0, reduction: str = "mean", ) -> ivy.Array: - r"""Measures loss from input `x` and label `y` with values 1 or -1. It - evaluates if two inputs are similar or not, often used for embedding or - semi-supervised learning. + r""" + Measures loss from input `x` and label `y` with values 1 or -1. It evaluates if + two inputs are similar or not, often used for embedding or semi-supervised + learning. Loss for the `n`-th sample: .. math:: @@ -398,7 +399,7 @@ def hinge_embedding_loss( \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} \end{cases} - where :math:`L = \{l_1,\dots,l_N\}^\top`. + where :math:`L = \{l_1,\dots,l_N\}^\top` Parameters ---------- @@ -438,10 +439,14 @@ def hinge_embedding_loss( Examples -------- >>> input_tensor = ivy.array([1, 2, 3, 4], dtype=ivy.float64) - >>> target_tensor = ivy.array([2, 2, 2, 2], dtype=ivy.float64) - >>> loss = poisson_nll_loss(input_tensor, target_tensor, log_input=True) - >>> print(loss) - ivy.array(16.1978) + >>> target_tensor = ivy.array([1, 1, 1, 1], dtype=ivy.float64) + >>> input_tensor.hinge_embedding_loss(target_tensor,reduction="sum") + ivy.array(10.) + + >>> input_tensor = ivy.array([1, 2, 3], dtype=ivy.float64) + >>> target_tensor = ivy.array([1, -1, -1], dtype=ivy.float64) + >>> input_tensor.hinge_embedding_loss(target_tensor, margin=2.0) + ivy.array(0.33333333) """ return ivy.hinge_embedding_loss( self._data, diff --git a/ivy/data_classes/container/experimental/losses.py b/ivy/data_classes/container/experimental/losses.py index 061ed3f8c5a6f..7c9e49aaec273 100644 --- a/ivy/data_classes/container/experimental/losses.py +++ b/ivy/data_classes/container/experimental/losses.py @@ -1116,10 +1116,10 @@ def _static_hinge_embedding_loss( prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, ) -> ivy.Container: - r"""ivy.Container static method variant of ivy.hinge_embedding_loss. - This method simplywraps the function, and so the docstring for - ivy.hinge_embedding_loss also applies to this method with minimal - changes. + r""" + ivy.Container static method variant of ivy.hinge_embedding_loss. This method + simplywraps the function, and so the docstring for ivy.hinge_embedding_loss also + applies to this method with minimal changes. Parameters ---------- @@ -1168,27 +1168,28 @@ def _static_hinge_embedding_loss( -------- With :class:`ivy.Container` inputs: - >>> x = ivy.Container(a=ivy.array([[0.6, 0.2, 0.3]], dtype=ivy.float32), - ... b=ivy.array([[0.8, 0.2, 0.2]], dtype=ivy.float32)) - >>> y = ivy.Container(a=ivy.array([[1, 0, 2]], dtype=ivy.float32), - ... b=ivy.array([[3, 2, 1]], dtype=ivy.float32)) - >>> z = ivy.Container._static_poisson_nll_loss(x,y) - >>> print(z) + >>> x = ivy.Container(a=ivy.array([[1, 0, 2]], dtype=ivy.float32), + ... b=ivy.array([[-1, 1, 1]], dtype=ivy.float32)) + >>> y = ivy.Container(a=ivy.array([[0.6, 0.2, 0.3]], dtype=ivy.float32), + ... b=ivy.array([[1, 1, 1]], dtype=ivy.float32)) + >>> z = ivy.Container._hinge_embedding_loss(x, y, reduction="none") + >>> z { - a: ivy.array(1.06446016), - b: ivy.array(0.55611551) + a: ivy.array([[0., 0., 0.]]), + b: ivy.array([[-1., 1., 1.]]) } With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs: - >>> x = ivy.array([[1, 0, 2]], dtype=ivy.float32) - >>> y = ivy.Container(a=ivy.array([[0.6, 0.2, 0.3]], dtype=ivy.float32), - ... b=ivy.array([[0.8, 0.2, 0.2]], dtype=ivy.float32)) - >>> z = ivy.Container._static_poisson_nll_loss(x, y) - >>> print(z) + >>> x = ivy.array([[10, 20, 32]], dtype=ivy.float32) + >>> y = ivy.Container(a=ivy.array([[-1, -1, -1]], dtype=ivy.float32), + ... b=ivy.array([[1, 1, 1]], dtype=ivy.float32)) + >>> z = ivy.Container._static_hinge_embedding_loss(x, y, + ... reduction="sum", margin=2.0) + >>> z { - a: ivy.array(3.30244565), - b: ivy.array(3.30244565) + a: ivy.array(0.), + b: ivy.array(62.) } """ return ContainerBase.cont_multi_map_in_function( @@ -1214,10 +1215,10 @@ def hinge_embedding_loss( prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, ) -> ivy.Container: - r"""ivy.Container instance method variant of ivy.hinge_embedding_loss. - This method simply wraps the function, and so the docstring for - ivy.hinge_embedding_loss also applies to this method with minimal - changes. + r""" + ivy.Container instance method variant of ivy.hinge_embedding_loss. This method + simply wraps the function, and so the docstring for ivy.hinge_embedding_loss + also applies to this method with minimal changes. Parameters ---------- @@ -1267,13 +1268,12 @@ def hinge_embedding_loss( -------- >>> x = ivy.Container(a=ivy.array([[1, 0, 2]], dtype=ivy.float32), ... b=ivy.array([[3, 2, 1]], dtype=ivy.float32)) - >>> y = ivy.Container(a=ivy.array([[0.6, 0.2, 0.3]], dtype=ivy.float32), - ... b=ivy.array([[0.8, 0.2, 0.2]], dtype=ivy.float32)) - >>> z = x.poisson_nll_loss(y) - >>> print(z) + >>> y = ivy.Container(a=ivy.array([[-1, -1, -1]], dtype=ivy.float32), + ... b=ivy.array([[1, 1, 1]], dtype=ivy.float32)) + >>> x.hinge_embedding_loss(y, reduction="none", margin=0.5) { - a: ivy.array(3.30244565), - b: ivy.array(9.06429195) + a: ivy.array([[0., 0.5, 0.]]), + b: ivy.array([[3., 2., 1.]]) } """ return self._static_hinge_embedding_loss( diff --git a/ivy/functional/backends/jax/experimental/losses.py b/ivy/functional/backends/jax/experimental/losses.py index e83dea74eb759..25c01439377ba 100644 --- a/ivy/functional/backends/jax/experimental/losses.py +++ b/ivy/functional/backends/jax/experimental/losses.py @@ -64,11 +64,11 @@ def soft_margin_loss( return loss -def _apply_loss_reduction(loss: JaxArray, reduction: str, axis=None) -> JaxArray: +def _apply_loss_reduction(loss: JaxArray, reduction: str) -> JaxArray: if reduction == "sum": - return jnp.sum(loss, axis=axis) + return jnp.sum(loss) elif reduction == "mean": - return jnp.mean(loss, axis=axis) + return jnp.mean(loss) else: # reduction == "none" return loss @@ -177,5 +177,4 @@ def hinge_embedding_loss( loss = jnp.where(target == 1.0, input, zero_) + jnp.where( target == -1.0, relu_part, zero_ ) - return _apply_loss_reduction(loss, reduction) diff --git a/ivy/functional/backends/numpy/experimental/losses.py b/ivy/functional/backends/numpy/experimental/losses.py index c9b0ec1d1ac98..2a0f61644ab69 100644 --- a/ivy/functional/backends/numpy/experimental/losses.py +++ b/ivy/functional/backends/numpy/experimental/losses.py @@ -96,15 +96,12 @@ def soft_margin_loss( return loss -def _apply_loss_reduction(loss: np.ndarray, reduction: str, axis, out) -> np.ndarray: +def _apply_loss_reduction(loss: np.ndarray, reduction: str) -> np.ndarray: if reduction == "sum": - return np.sum(loss, axis=axis, out=out) + return np.sum(loss) elif reduction == "mean": - return np.mean(loss, axis=axis, out=out) + return np.mean(loss) else: # reduction == "none" - if out is not None: - out[...] = loss - return out return loss diff --git a/ivy/functional/backends/tensorflow/experimental/losses.py b/ivy/functional/backends/tensorflow/experimental/losses.py index ab7465f95d27b..621ee4263d8f9 100644 --- a/ivy/functional/backends/tensorflow/experimental/losses.py +++ b/ivy/functional/backends/tensorflow/experimental/losses.py @@ -68,11 +68,11 @@ def soft_margin_loss( return loss -def _apply_loss_reduction(loss: tf.Tensor, reduction: str, axis) -> tf.Tensor: +def _apply_loss_reduction(loss: tf.Tensor, reduction: str) -> tf.Tensor: if reduction == "sum": - return tf.math.reduce_sum(loss, axis=axis) + return tf.math.reduce_sum(loss) elif reduction == "mean": - return tf.reduce_mean(loss, axis=axis) + return tf.reduce_mean(loss) else: # reduction == "none" return loss diff --git a/ivy/functional/ivy/experimental/losses.py b/ivy/functional/ivy/experimental/losses.py index ce189e3f7e7e9..bbe41582b354a 100644 --- a/ivy/functional/ivy/experimental/losses.py +++ b/ivy/functional/ivy/experimental/losses.py @@ -576,16 +576,6 @@ def poisson_nll_loss( ) -# @handle_exceptions -# @handle_nestable -# @handle_array_like_without_promotion -# @to_native_arrays_and_back -# @handle_exceptions -# @handle_backend_invalid -# @handle_nestable -# @to_native_arrays_and_back -# @handle_array_function -# @handle_device @handle_exceptions @handle_nestable @handle_array_like_without_promotion @@ -597,9 +587,9 @@ def hinge_embedding_loss( margin: float = 1.0, reduction: str = "mean", ) -> ivy.Array: - r"""Measures loss from input `x` and label `y` with values 1 or -1. It - evaluates if two inputs are similar or not, often used for embedding or - semi-supervised learning. + r""" + Measures loss from input `x` and label `y` with values 1 or -1. It evaluates if two + inputs are similar or not, often used for embedding or semi-supervised learning. Loss for the `n`-th sample: .. math:: @@ -615,7 +605,7 @@ def hinge_embedding_loss( \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} \end{cases} - where :math:`L = \{l_1,\dots,l_N\}^\top`. + where :math:`L = \{l_1,\dots,l_N\}^\top` . Parameters ---------- @@ -655,10 +645,17 @@ def hinge_embedding_loss( Examples -------- >>> input_tensor = ivy.array([1, 2, 3, 4], dtype=ivy.float64) - >>> target_tensor = ivy.array([2, 2, 2, 2], dtype=ivy.float64) - >>> loss = poisson_nll_loss(input_tensor, target_tensor, log_input=True) - >>> print(loss) - ivy.array(16.1978) + >>> target_tensor = ivy.array([1, 1, 1, 1], dtype=ivy.float64) + >>> loss = ivy.hinge_embedding_loss(input_tensor, target_tensor, reduction="none") + >>> loss + ivy.array([1., 2., 3., 4.]) + + >>> input_tensor = ivy.array([21, 22], dtype=ivy.float32) + >>> target_tensor = ivy.array([-1, 1], dtype=ivy.float32) + >>> loss = ivy.hinge_embedding_loss(input_tensor,target_tensor, + ... margin=2.0, reduction="sum") + >>> loss + ivy.array(22.) """ return ivy.current_backend().hinge_embedding_loss( input, diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_losses.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_losses.py index 0b7fb8fc456bf..8c0028d457e24 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_losses.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_losses.py @@ -7,36 +7,77 @@ from ivy_tests.test_ivy.helpers import handle_test +# --- Helpers --- # +# --------------- # + + +@st.composite +def _hinge_embedding_loss_input( + draw, min_num_dims=1, max_num_dims=5, min_dim_size=1, max_dim_size=10 +): + # determine the shape for both arrays (input and target) + shape = draw( + st.shared( + helpers.get_shape( + min_num_dims=min_num_dims, + max_num_dims=max_num_dims, + min_dim_size=min_dim_size, + max_dim_size=max_dim_size, + ), + key="shared_shape", + ) + ) + + # Generate an array of -1 and 1 with the given shape (target_array) + def _arrays_of_neg1_and_1(shape): + value_strategy = st.sampled_from([-1, 1]) + prod_shape = int(np.prod(shape)) # Convert np.int64 to int + array_data = draw( + st.lists(value_strategy, min_size=prod_shape, max_size=prod_shape) + ) + return np.asarray(array_data).reshape(shape) + + # input_array + dtype, xx = draw( + helpers.dtype_and_values( + shape=shape, + available_dtypes=helpers.get_dtypes("valid"), + safety_factor_scale="linear", + large_abs_safety_factor=2, + small_abs_safety_factor=2, + min_value=1, + max_value=10, + min_dim_size=1, + min_num_dims=1, + max_num_dims=5, + max_dim_size=5, + ) + ) + + # generate the target array 'yy' containing either 1 or -1 + yy = _arrays_of_neg1_and_1(shape=shape) + + return dtype, xx, yy + + +# --- Main --- # +# ------------ # + + # hinge_embedding_loss @handle_test( fn_tree="functional.ivy.experimental.hinge_embedding_loss", - dtype_and_input=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - min_value=-10, - max_value=10, - allow_inf=False, - min_num_dims=1, - min_dim_size=1, - max_num_dims=5, - ), - dtype_and_target=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - min_value=-1, - max_value=1, - allow_inf=False, - min_num_dims=1, - min_dim_size=1, - max_num_dims=5, - ), + dtype_and_inputs=_hinge_embedding_loss_input(), margin=st.floats(min_value=1, max_value=5), reduction=st.sampled_from(["none", "sum", "mean"]), + test_gradients=st.just( + False + ), # Gradients are failing for "jax" and "paddle" backend. test_with_out=st.just(False), - test_gradients=st.just(False), ground_truth_backend="torch", ) def test_hinge_embedding_loss( - dtype_and_input, - dtype_and_target, + dtype_and_inputs, margin, reduction, test_flags, @@ -44,16 +85,15 @@ def test_hinge_embedding_loss( fn_name, on_device, ): - input_dtype, input = dtype_and_input - target_dtype, target = dtype_and_target + dtype, xx, yy = dtype_and_inputs helpers.test_function( - input_dtypes=input_dtype, + input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, - input=input[0], - target=target[0], + input=xx[0], + target=yy, margin=margin, reduction=reduction, rtol_=1e-05, From 610ee99d4fa0cf7cdbaed42b277439010a336401 Mon Sep 17 00:00:00 2001 From: akshatvishu Date: Thu, 19 Oct 2023 15:56:41 +0000 Subject: [PATCH 4/4] fix: typo at hinge_embedding_loss container docstring --- ivy/data_classes/container/experimental/losses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ivy/data_classes/container/experimental/losses.py b/ivy/data_classes/container/experimental/losses.py index 7c9e49aaec273..cbd27d371dc1d 100644 --- a/ivy/data_classes/container/experimental/losses.py +++ b/ivy/data_classes/container/experimental/losses.py @@ -1172,7 +1172,7 @@ def _static_hinge_embedding_loss( ... b=ivy.array([[-1, 1, 1]], dtype=ivy.float32)) >>> y = ivy.Container(a=ivy.array([[0.6, 0.2, 0.3]], dtype=ivy.float32), ... b=ivy.array([[1, 1, 1]], dtype=ivy.float32)) - >>> z = ivy.Container._hinge_embedding_loss(x, y, reduction="none") + >>> z = ivy.Container._static_hinge_embedding_loss(x, y, reduction="none") >>> z { a: ivy.array([[0., 0., 0.]]),