From 8e8356cf66bf988fe720fd31f13254e144e760fc Mon Sep 17 00:00:00 2001 From: Sergey Zagoruyko Date: Sat, 3 Oct 2015 23:19:01 -0400 Subject: [PATCH] use double/float corresponding math functions --- generic/AbsCriterion.c | 2 +- generic/DistKLDivCriterion.c | 2 +- generic/L1Cost.c | 2 +- generic/LogSigmoid.c | 4 ++-- generic/LogSoftMax.c | 4 ++-- generic/Sigmoid.c | 2 +- generic/SmoothL1Criterion.c | 2 +- generic/SoftPlus.c | 4 ++-- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/generic/AbsCriterion.c b/generic/AbsCriterion.c index 397e9ddd4..164f31fa1 100644 --- a/generic/AbsCriterion.c +++ b/generic/AbsCriterion.c @@ -11,7 +11,7 @@ static int nn_(AbsCriterion_updateOutput)(lua_State *L) sum = 0; TH_TENSOR_APPLY2(real, input, real, target, - sum += fabs(*input_data - *target_data);) + sum += TH_ABS(*input_data - *target_data);) if(sizeAverage) sum /= THTensor_(nElement)(input); diff --git a/generic/DistKLDivCriterion.c b/generic/DistKLDivCriterion.c index 1e433c238..ab990f939 100644 --- a/generic/DistKLDivCriterion.c +++ b/generic/DistKLDivCriterion.c @@ -11,7 +11,7 @@ static int nn_(DistKLDivCriterion_updateOutput)(lua_State *L) sum = 0; TH_TENSOR_APPLY2(real, input, real, target, - sum += *target_data > 0 ? *target_data * (log(*target_data) - *input_data) : 0;) + sum += *target_data > 0 ? *target_data * (TH_LOG(*target_data) - *input_data) : 0;) if(sizeAverage) sum /= THTensor_(nElement)(input); diff --git a/generic/L1Cost.c b/generic/L1Cost.c index a450e06e1..9f282c621 100644 --- a/generic/L1Cost.c +++ b/generic/L1Cost.c @@ -8,7 +8,7 @@ static int nn_(L1Cost_updateOutput)(lua_State *L) accreal sum; sum = 0; - TH_TENSOR_APPLY(real, input, sum += fabs(*input_data);); + TH_TENSOR_APPLY(real, input, sum += TH_ABS(*input_data);); lua_pushnumber(L, sum); lua_setfield(L, 1, "output"); diff --git a/generic/LogSigmoid.c b/generic/LogSigmoid.c index 9b47a3240..0e1fc7d4c 100644 --- a/generic/LogSigmoid.c +++ b/generic/LogSigmoid.c @@ -12,9 +12,9 @@ static int nn_(LogSigmoid_updateOutput)(lua_State *L) THTensor_(resizeAs)(buffer, input); TH_TENSOR_APPLY3(real, output, real, input, real, buffer, \ - real z = exp(-*input_data); \ + real z = TH_EXP(-*input_data); \ *buffer_data = z; \ - *output_data = -log(1. + z);) + *output_data = -TH_LOG(1. + z);) return 1; } diff --git a/generic/LogSoftMax.c b/generic/LogSoftMax.c index 75b8587d8..429c5ef9d 100644 --- a/generic/LogSoftMax.c +++ b/generic/LogSoftMax.c @@ -45,7 +45,7 @@ static int nn_(LogSoftMax_updateOutput)(lua_State *L) for(d = 0; d < dim; d++) logsum += THExpMinusApprox(maxInput-input_data[d]); - logsum = maxInput + log(logsum); + logsum = maxInput + TH_LOG(logsum); for(d = 0; d < dim; d++) output_data[d] = input_data[d] - logsum; @@ -96,7 +96,7 @@ static int nn_(LogSoftMax_updateGradInput)(lua_State *L) sum += gradOutput_data[d]; for(d = 0; d < dim; d++) - gradInput_data[d] = gradOutput_data[d] - exp(output_data[d])*sum; + gradInput_data[d] = gradOutput_data[d] - TH_EXP(output_data[d])*sum; } return 1; diff --git a/generic/Sigmoid.c b/generic/Sigmoid.c index 057ebc4f5..68f6d0874 100644 --- a/generic/Sigmoid.c +++ b/generic/Sigmoid.c @@ -10,7 +10,7 @@ static int nn_(Sigmoid_updateOutput)(lua_State *L) THTensor_(resizeAs)(output, input); TH_TENSOR_APPLY2(real, output, real, input, \ - *output_data = 1./(1.+ exp(- *input_data));) + *output_data = 1./(1.+ TH_EXP(- *input_data));) return 1; } diff --git a/generic/SmoothL1Criterion.c b/generic/SmoothL1Criterion.c index 51cab0c46..60219d035 100644 --- a/generic/SmoothL1Criterion.c +++ b/generic/SmoothL1Criterion.c @@ -11,7 +11,7 @@ static int nn_(SmoothL1Criterion_updateOutput)(lua_State *L) sum = 0; TH_TENSOR_APPLY2(real, input, real, target, - real z = fabs(*input_data - *target_data); + real z = TH_ABS(*input_data - *target_data); sum += z < 1 ? 0.5*z*z : z - 0.5;) if(sizeAverage) diff --git a/generic/SoftPlus.c b/generic/SoftPlus.c index 81f2a7ce1..3ff4f7e7b 100644 --- a/generic/SoftPlus.c +++ b/generic/SoftPlus.c @@ -14,7 +14,7 @@ static int nn_(SoftPlus_updateOutput)(lua_State *L) /* f(x) = 1/beta * log(1 + exp(beta * x)) */ TH_TENSOR_APPLY2(real, output, real, input, \ - *output_data = (*input_data * beta) > threshold ? *input_data : THLog1p(exp(*input_data * beta)) / beta;) + *output_data = (*input_data * beta) > threshold ? *input_data : THLog1p(TH_EXP(*input_data * beta)) / beta;) return 1; } @@ -35,7 +35,7 @@ static int nn_(SoftPlus_updateGradInput)(lua_State *L) THTensor_(resizeAs)(gradInput, output); TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, \ - real z = exp(*output_data * beta); \ + real z = TH_EXP(*output_data * beta); \ *gradInput_data = (*output_data * beta) > threshold ? *gradOutput_data : *gradOutput_data * (z - 1.)/z;) return 1; }