Commit d4da6c53 authored by Dennis Francis's avatar Dennis Francis

adapt to dlib indentation style

parent af76e826
...@@ -1305,70 +1305,70 @@ namespace dlib ...@@ -1305,70 +1305,70 @@ namespace dlib
typename SUB_TYPE, typename SUB_TYPE,
typename label_iterator typename label_iterator
> >
void to_label ( void to_label (
const tensor& input_tensor, const tensor& input_tensor,
const SUB_TYPE& sub, const SUB_TYPE& sub,
label_iterator iter label_iterator iter
) const ) const
{ {
DLIB_CASSERT(sub.sample_expansion_factor() == 1); DLIB_CASSERT(sub.sample_expansion_factor() == 1);
const tensor& output_tensor = sub.get_output(); const tensor& output_tensor = sub.get_output();
DLIB_CASSERT(output_tensor.nr() == 1 && DLIB_CASSERT(output_tensor.nr() == 1 &&
output_tensor.nc() == 1 && output_tensor.nc() == 1 &&
output_tensor.k() == 1); output_tensor.k() == 1);
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples()); DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples());
const float* out_data = output_tensor.host(); const float* out_data = output_tensor.host();
for (long i = 0; i < output_tensor.num_samples(); ++i) for (long i = 0; i < output_tensor.num_samples(); ++i)
{ {
*iter++ = out_data[i]; *iter++ = out_data[i];
}
} }
}
template < template <
typename const_label_iterator, typename const_label_iterator,
typename SUBNET typename SUBNET
> >
double compute_loss_value_and_gradient ( double compute_loss_value_and_gradient (
const tensor& input_tensor, const tensor& input_tensor,
const_label_iterator truth, const_label_iterator truth,
SUBNET& sub SUBNET& sub
) const ) const
{
const tensor& output_tensor = sub.get_output();
tensor& grad = sub.get_gradient_input();
DLIB_CASSERT(sub.sample_expansion_factor() == 1);
DLIB_CASSERT(input_tensor.num_samples() != 0);
DLIB_CASSERT(input_tensor.num_samples()%sub.sample_expansion_factor() == 0);
DLIB_CASSERT(input_tensor.num_samples() == grad.num_samples());
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples());
DLIB_CASSERT(output_tensor.nr() == 1 &&
output_tensor.nc() == 1 &&
output_tensor.k() == 1);
DLIB_CASSERT(grad.nr() == 1 &&
grad.nc() == 1 &&
grad.k() == 1);
// The loss we output is the average loss over the mini-batch.
const double scale = 1.0/output_tensor.num_samples();
double loss = 0;
float* g = grad.host_write_only();
const float* out_data = output_tensor.host();
for (long i = 0; i < output_tensor.num_samples(); ++i)
{ {
const tensor& output_tensor = sub.get_output(); const float y = *truth++;
tensor& grad = sub.get_gradient_input(); const float temp1 = y - out_data[i];
const float temp2 = scale*temp1;
DLIB_CASSERT(sub.sample_expansion_factor() == 1); loss += 0.5*temp2*temp1;
DLIB_CASSERT(input_tensor.num_samples() != 0); g[i] = -temp2;
DLIB_CASSERT(input_tensor.num_samples()%sub.sample_expansion_factor() == 0);
DLIB_CASSERT(input_tensor.num_samples() == grad.num_samples());
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples());
DLIB_CASSERT(output_tensor.nr() == 1 &&
output_tensor.nc() == 1 &&
output_tensor.k() == 1);
DLIB_CASSERT(grad.nr() == 1 &&
grad.nc() == 1 &&
grad.k() == 1);
// The loss we output is the average loss over the mini-batch.
const double scale = 1.0/output_tensor.num_samples();
double loss = 0;
float* g = grad.host_write_only();
const float* out_data = output_tensor.host();
for (long i = 0; i < output_tensor.num_samples(); ++i)
{
const float y = *truth++;
const float temp1 = y - out_data[i];
const float temp2 = scale*temp1;
loss += 0.5*temp2*temp1;
g[i] = -temp2;
}
return loss;
} }
return loss;
}
friend void serialize(const loss_mean_squared_& , std::ostream& out) friend void serialize(const loss_mean_squared_& , std::ostream& out)
{ {
...@@ -1397,7 +1397,7 @@ namespace dlib ...@@ -1397,7 +1397,7 @@ namespace dlib
}; };
template <typename SUBNET> template <typename SUBNET>
using loss_mean_squared = add_loss_layer<loss_mean_squared_, SUBNET>; using loss_mean_squared = add_loss_layer<loss_mean_squared_, SUBNET>;
// ---------------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------------
......
...@@ -584,7 +584,6 @@ namespace dlib ...@@ -584,7 +584,6 @@ namespace dlib
template <typename SUBNET> template <typename SUBNET>
using loss_mean_squared = add_loss_layer<loss_mean_squared_, SUBNET>; using loss_mean_squared = add_loss_layer<loss_mean_squared_, SUBNET>;
} }
#endif // DLIB_DNn_LOSS_ABSTRACT_H_ #endif // DLIB_DNn_LOSS_ABSTRACT_H_
......
...@@ -1758,11 +1758,7 @@ namespace ...@@ -1758,11 +1758,7 @@ namespace
y[ii] = (true_intercept + true_slope*static_cast<float>(val) + distribution(generator)); y[ii] = (true_intercept + true_slope*static_cast<float>(val) + distribution(generator));
} }
using net_type = loss_mean_squared< using net_type = loss_mean_squared<fc<1, input<matrix<double>>>>;
fc<
1, input<matrix<double>>
>
>;
net_type net; net_type net;
layer<1>(net).layer_details().set_bias_learning_rate_multiplier(300); layer<1>(net).layer_details().set_bias_learning_rate_multiplier(300);
sgd defsolver; sgd defsolver;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment