Commit a930fe80 authored by Davis King's avatar Davis King

merged

parents 5d259cd7 c0b7bf9e
...@@ -5,10 +5,10 @@ Also, **the issue tracker is not a code writing service, do not ask for someone ...@@ -5,10 +5,10 @@ Also, **the issue tracker is not a code writing service, do not ask for someone
Finally, before you ask a question, check google for a solution, [the dlib FAQ](http://dlib.net/faq.html), or generally consult the dlib documentation. Every single function in dlib is documented in detail. If you post an issue but you obviously haven't read the documentation for the part of dlib you are using then you won't get an answer. Finally, before you ask a question, check google for a solution, [the dlib FAQ](http://dlib.net/faq.html), or generally consult the dlib documentation. Every single function in dlib is documented in detail. If you post an issue but you obviously haven't read the documentation for the part of dlib you are using then you won't get an answer.
If you aren't reporting a bug or problem with dlib then delete this template and write whatever you want here. If you aren't reporting a bug or problem with dlib then delete this template and write whatever you want here.
<!-- ================================================================ --> <!-- ================================================================ -->
<!-- ========i============= BUG REPORT TEMPLATE ===================== --> <!-- ===================== BUG REPORT TEMPLATE ===================== -->
<!-- ================================================================ --> <!-- ================================================================ -->
......
...@@ -238,6 +238,13 @@ namespace dlib ...@@ -238,6 +238,13 @@ namespace dlib
}; };
template <typename T>
T safe_log(T input, T epsilon = 1e-10)
{
// Prevent trying to calculate the logarithm of a very small number (let alone zero)
return std::log(std::max(input, epsilon));
}
template <typename SUBNET> template <typename SUBNET>
using loss_binary_log = add_loss_layer<loss_binary_log_, SUBNET>; using loss_binary_log = add_loss_layer<loss_binary_log_, SUBNET>;
...@@ -317,7 +324,7 @@ namespace dlib ...@@ -317,7 +324,7 @@ namespace dlib
const unsigned long idx = i*output_tensor.k()+k; const unsigned long idx = i*output_tensor.k()+k;
if (k == y) if (k == y)
{ {
loss += scale*-std::log(g[idx]); loss += scale*-safe_log(g[idx]);
g[idx] = scale*(g[idx]-1); g[idx] = scale*(g[idx]-1);
} }
else else
...@@ -2139,7 +2146,7 @@ namespace dlib ...@@ -2139,7 +2146,7 @@ namespace dlib
const size_t idx = tensor_index(output_tensor, i, k, r, c); const size_t idx = tensor_index(output_tensor, i, k, r, c);
if (k == y) if (k == y)
{ {
loss += scale*-std::log(g[idx]); loss += scale*-safe_log(g[idx]);
g[idx] = scale*(g[idx] - 1); g[idx] = scale*(g[idx] - 1);
} }
else if (y == label_to_ignore) else if (y == label_to_ignore)
...@@ -2285,7 +2292,7 @@ namespace dlib ...@@ -2285,7 +2292,7 @@ namespace dlib
const size_t idx = tensor_index(output_tensor, i, k, r, c); const size_t idx = tensor_index(output_tensor, i, k, r, c);
if (k == y) if (k == y)
{ {
loss += weight*scale*-std::log(g[idx]); loss += weight*scale*-safe_log(g[idx]);
g[idx] = weight*scale*(g[idx] - 1); g[idx] = weight*scale*(g[idx] - 1);
} }
else else
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment