Skip to content

Commit

Permalink
Add huber_loss tests (#53535)
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc authored May 16, 2023
1 parent 50f0acc commit 74b91bc
Show file tree
Hide file tree
Showing 6 changed files with 111 additions and 25 deletions.
11 changes: 8 additions & 3 deletions paddle/phi/kernels/gpu/huber_loss_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,11 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h"

PD_REGISTER_KERNEL(
huber_loss_grad, GPU, ALL_LAYOUT, phi::HuberLossGradKernel, float, double) {
}
PD_REGISTER_KERNEL(huber_loss_grad,
GPU,
ALL_LAYOUT,
phi::HuberLossGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
10 changes: 8 additions & 2 deletions paddle/phi/kernels/gpu/huber_loss_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -18,5 +18,11 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/huber_loss_kernel_impl.h"

PD_REGISTER_KERNEL(
huber_loss, GPU, ALL_LAYOUT, phi::HuberLossKernel, float, double) {}
PD_REGISTER_KERNEL(huber_loss,
GPU,
ALL_LAYOUT,
phi::HuberLossKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
15 changes: 8 additions & 7 deletions paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#pragma once

#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/huber_loss_grad_kernel.h"
Expand All @@ -26,14 +27,14 @@ struct HuberLossBackward {
: sign(sign), delta(delta) {}

HOSTDEVICE T operator()(const T& val) const {
T abs_val = std::abs(val);
T abs_val = abs(val);
if (abs_val <= delta) {
return sign * val;
} else {
if (val > 0) {
if (val > static_cast<T>(0)) {
return sign * delta;
} else {
return -1 * sign * delta;
return static_cast<T>(-1) * sign * delta;
}
}
}
Expand All @@ -58,16 +59,16 @@ void HuberLossGradKernel(const Context& dev_ctx,
if (input_grad) {
dev_ctx.template Alloc<T>(input_grad);
auto eigen_input_grad = EigenVector<T>::Flatten(*input_grad);
eigen_input_grad.device(place) =
eigen_residual.unaryExpr(HuberLossBackward<T>(delta_, -1.0));
eigen_input_grad.device(place) = eigen_residual.unaryExpr(
HuberLossBackward<T>(delta_, static_cast<T>(-1.0)));
eigen_input_grad.device(place) = eigen_out_grad * eigen_input_grad;
}

if (label_grad) {
dev_ctx.template Alloc<T>(label_grad);
auto eigen_label_grad = EigenVector<T>::Flatten(*label_grad);
eigen_label_grad.device(place) =
eigen_residual.unaryExpr(HuberLossBackward<T>(delta_, 1.0));
eigen_label_grad.device(place) = eigen_residual.unaryExpr(
HuberLossBackward<T>(delta_, static_cast<T>(1.0)));
eigen_label_grad.device(place) = eigen_out_grad * eigen_label_grad;
}
}
Expand Down
3 changes: 2 additions & 1 deletion paddle/phi/kernels/impl/huber_loss_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#pragma once

#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/huber_loss_kernel.h"
Expand All @@ -25,7 +26,7 @@ struct HuberLossForward {
HOSTDEVICE HuberLossForward(const T& delta) : delta(delta) {}

HOSTDEVICE T operator()(const T& val) const {
T abs_val = std::abs(val);
T abs_val = abs(val);
if (abs_val <= delta) {
return static_cast<T>(0.5) * val * val;
} else {
Expand Down
87 changes: 77 additions & 10 deletions python/paddle/fluid/tests/unittests/test_huber_loss_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,10 @@
import unittest

import numpy as np
from eager_op_test import OpTest
from eager_op_test import OpTest, convert_float_to_uint16

import paddle
from paddle.fluid import core


def huber_loss_forward(val, delta):
Expand All @@ -40,20 +41,24 @@ def setUp(self):
self.python_api = huber_loss_wraper

self.delta = 1.0
self.init_dtype()
self.init_input()
shape = self.set_shape()
residual = self.inputs['Y'] - self.inputs['X']
loss = np.vectorize(huber_loss_forward)(residual, self.delta).astype(
'float32'
self.dtype
)
self.attrs = {'delta': self.delta}
self.outputs = {'Residual': residual, 'Out': loss.reshape(shape)}

def init_dtype(self):
self.dtype = np.float32

def init_input(self):
shape = self.set_shape()
self.inputs = {
'X': np.random.uniform(0, 1.0, shape).astype('float32'),
'Y': np.random.uniform(0, 1.0, shape).astype('float32'),
'X': np.random.uniform(0, 1.0, shape).astype(self.dtype),
'Y': np.random.uniform(0, 1.0, shape).astype(self.dtype),
}

def set_shape(self):
Expand All @@ -66,14 +71,10 @@ def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out')

def test_check_grad_ingore_x(self):
self.check_grad(
['Y'], 'Out', max_relative_error=0.008, no_grad_set=set("residual")
)
self.check_grad(['Y'], 'Out', no_grad_set=set("residual"))

def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.008, no_grad_set=set('residual')
)
self.check_grad(['X'], 'Out', no_grad_set=set('residual'))


def TestHuberLossOp1(TestHuberLossOp):
Expand All @@ -91,6 +92,72 @@ def set_shape(self):
return (6, 6, 1)


class TestHuberLossFP16Op(TestHuberLossOp):
def init_dtype(self):
self.dtype = np.float16


@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support bfloat16",
)
class TestHuberLossBF16Op(OpTest):
def setUp(self):
self.op_type = 'huber_loss'
self.python_out_sig = ["Out"]
self.python_api = huber_loss_wraper

self.delta = 1.0
self.init_dtype()
self.init_input()
shape = self.set_shape()
residual = self.inputs['Y'] - self.inputs['X']
loss = np.vectorize(huber_loss_forward)(residual, self.delta).astype(
self.np_dtype
)
self.attrs = {'delta': self.delta}
self.outputs = {'Residual': residual, 'Out': loss.reshape(shape)}

self.place = core.CUDAPlace(0)
self.inputs['X'] = convert_float_to_uint16(self.inputs['X'])
self.inputs['Y'] = convert_float_to_uint16(self.inputs['Y'])
self.outputs['Residual'] = convert_float_to_uint16(
self.outputs['Residual']
)
self.outputs['Out'] = convert_float_to_uint16(self.outputs['Out'])

def init_dtype(self):
self.dtype = np.uint16
self.np_dtype = np.float32

def init_input(self):
shape = self.set_shape()
self.inputs = {
'X': np.random.uniform(0, 1.0, shape).astype(self.np_dtype),
'Y': np.random.uniform(0, 1.0, shape).astype(self.np_dtype),
}

def set_shape(self):
return (100, 1)

def test_check_output(self):
self.check_output_with_place(self.place)

def test_check_grad_normal(self):
self.check_grad_with_place(self.place, ['X', 'Y'], 'Out')

def test_check_grad_ingore_x(self):
self.check_grad_with_place(
self.place, ['Y'], 'Out', no_grad_set=set("residual")
)

def test_check_grad_ingore_y(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', no_grad_set=set('residual')
)


if __name__ == '__main__':
paddle.enable_static()
unittest.main()
10 changes: 8 additions & 2 deletions python/paddle/nn/functional/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -1084,10 +1084,16 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None):
out = _C_ops.huber_loss(input, label, delta)
else:
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'smooth_l1_loss'
input,
'input',
['float16', 'float32', 'float64', 'uint16'],
'smooth_l1_loss',
)
check_variable_and_dtype(
label, 'label', ['float32', 'float64'], 'smooth_l1_loss'
label,
'label',
['float16', 'float32', 'float64', 'uint16'],
'smooth_l1_loss',
)
helper = LayerHelper('huber_loss', **locals())
residual = helper.create_variable_for_type_inference(
Expand Down

0 comments on commit 74b91bc

Please sign in to comment.