Commit 63f4f73b authored by Davis King's avatar Davis King

Added tt::log(), tt::exp(), and tt::log10()

parent dd4850b5
......@@ -261,6 +261,57 @@ namespace dlib
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_exp(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::exp(src[i]);
}
void exp (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_exp, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_log(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::log(src[i]);
}
void log (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_log, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_log10(float* dest, const float* src, size_t n)
{
for (auto i : grid_stride_range(0, n))
dest[i] = ::log10(src[i]);
}
void log10 (
tensor& dest,
const tensor& src
)
{
DLIB_ASSERT(dest.size() == src.size());
launch_kernel(_cuda_log10, max_jobs(src.size()), dest.device(), src.device(), src.size());
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_multiply1(float* d, const float* s1, const float* s2, size_t n)
......@@ -1085,6 +1136,7 @@ namespace dlib
grad.device(), src.device(), gradient_input.device(), grad.size(),
param.device(), params_grad.device());
}
// ----------------------------------------------------------------------------------------
void copy_tensor(
......@@ -1117,6 +1169,7 @@ namespace dlib
src_p += src_sample_size;
}
}
// ----------------------------------------------------------------------------------------
}
......
......@@ -142,6 +142,21 @@ namespace dlib
const tensor& v2
);
void exp (
tensor& dest,
const tensor& src
);
void log (
tensor& dest,
const tensor& src
);
void log10 (
tensor& dest,
const tensor& src
);
// ------------------------------------------------------------------------------------
void set_tensor (
......
......@@ -135,6 +135,54 @@ namespace dlib { namespace tt
#endif
}
// ----------------------------------------------------------------------------------------
void exp (
tensor& dest,
const tensor& src
)
{
DLIB_CASSERT(dest.size() == src.size());
#ifdef DLIB_USE_CUDA
cuda::exp(dest,src);
#else
dest = exp(mat(src));
#endif
}
// ----------------------------------------------------------------------------------------
void log (
tensor& dest,
const tensor& src
)
{
DLIB_CASSERT(dest.size() == src.size());
#ifdef DLIB_USE_CUDA
cuda::log(dest,src);
#else
dest = log(mat(src));
#endif
}
// ----------------------------------------------------------------------------------------
void log10 (
tensor& dest,
const tensor& src
)
{
DLIB_CASSERT(dest.size() == src.size());
#ifdef DLIB_USE_CUDA
cuda::log10(dest,src);
#else
dest = log10(mat(src));
#endif
}
// ----------------------------------------------------------------------------------------
void gemm (
......
......@@ -98,6 +98,45 @@ namespace dlib { namespace tt
out = beta*out + scale_rows(mat(m1) - scale_rows(mat(m2),mat(v1)), mat(v2));
!*/
// ----------------------------------------------------------------------------------------
void exp (
tensor& dest,
const tensor& src
);
/*!
requires
- dest.size() == src.size()
ensures
- performs: dest = exp(mat(src))
!*/
// ----------------------------------------------------------------------------------------
void log (
tensor& dest,
const tensor& src
);
/*!
requires
- dest.size() == src.size()
ensures
- performs: dest = log(mat(src))
!*/
// ----------------------------------------------------------------------------------------
void log10 (
tensor& dest,
const tensor& src
);
/*!
requires
- dest.size() == src.size()
ensures
- performs: dest = log10(mat(src))
!*/
// ----------------------------------------------------------------------------------------
void gemm (
......
......@@ -692,6 +692,36 @@ namespace
cpu::add(2, AA, 3, BB);
DLIB_TEST_MSG(max(abs(mat(A)-mat(AA) )) < 1e-6, max(abs(mat(A)-mat(AA) )));
}
{
print_spinner();
resizable_tensor dest1(123,456), dest2(123,456);
resizable_tensor src1(123,456), src2(123,456);
tt::tensor_rand rnd;
rnd.fill_uniform(src1); tt::affine_transform(src1, src1, 1, 2); src2 = src1; // random in range [2, 3]
dest1 = exp(mat(src1));
tt::exp(dest2, src2);
tt::exp(src2, src2); // should work in place
DLIB_TEST_MSG(max(abs(mat(dest1)-mat(dest2))) < 1e-5, max(abs(mat(dest1)-mat(dest2))));
DLIB_TEST(max(abs(mat(dest1)-mat(src2))) < 1e-5);
rnd.fill_uniform(src1); tt::affine_transform(src1, src1, 1, 2); src2 = src1; // random in range [2, 3]
dest1 = log(mat(src1));
tt::log(dest2, src2);
tt::log(src2, src2); // should work in place
DLIB_TEST(max(abs(mat(dest1)-mat(dest2))) < 1e-5);
DLIB_TEST(max(abs(mat(dest1)-mat(src2))) < 1e-5);
rnd.fill_uniform(src1); tt::affine_transform(src1, src1, 1, 2); src2 = src1; // random in range [2, 3]
dest1 = log10(mat(src1));
tt::log10(dest2, src2);
tt::log10(src2, src2); // should work in place
DLIB_TEST(max(abs(mat(dest1)-mat(dest2))) < 1e-5);
DLIB_TEST(max(abs(mat(dest1)-mat(src2))) < 1e-5);
}
}
// ----------------------------------------------------------------------------------------
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment