Commit dcfff1c4 authored by Davis King's avatar Davis King

Added process() and process_batch() to add_loss_layer. These routines let you

easily pass arguments to any optional parameters of a loss layer's to_tensor()
routine.  For instance, it makes it more convenient to set loss_mmod_'s
adjust_threshold parameter.
parent 78103a58
......@@ -2291,6 +2291,38 @@ namespace dlib
return temp_label;
}
template <typename ...T>
const output_label_type& process (const input_type& x, T&& ...args)
{
to_tensor(&x,&x+1,temp_tensor);
subnetwork.forward(temp_tensor);
const dimpl::subnet_wrapper<subnet_type> wsub(subnetwork);
loss.to_label(temp_tensor, wsub, &temp_label, std::forward<T>(args)...);
return temp_label;
}
template <typename iterable_type, typename ...T>
std::vector<output_label_type> process_batch (const iterable_type& data, size_t batch_size = 128, T&& ...args)
{
std::vector<output_label_type> results(std::distance(data.begin(), data.end()));
auto o = results.begin();
auto i = data.begin();
auto num_remaining = results.size();
while(num_remaining != 0)
{
auto inc = std::min(batch_size, num_remaining);
to_tensor(i,i+inc,temp_tensor);
subnetwork.forward(temp_tensor);
const dimpl::subnet_wrapper<subnet_type> wsub(subnetwork);
loss.to_label(temp_tensor, wsub, o, std::forward<T>(args)...);
i += inc;
o += inc;
num_remaining -= inc;
}
return results;
}
template <typename iterable_type>
std::vector<output_label_type> operator() (
const iterable_type& data,
......
......@@ -834,6 +834,49 @@ namespace dlib
output_label_type.
!*/
template <typename ...T>
const output_label_type& process (
const input_type& x,
T&& ...args
);
/*!
ensures
- This function is just like (*this)(x), i.e. it runs a single object, x,
through the network and returns the output. But we additionally pass the
given args to loss_details().to_label() as the 4th argument (or more,
depending on how many things are in args) when converting the network
output to an output_label_type. This is useful, for instance, with loss
layers like loss_mmod_ which has an optional adjust_threshold argument to
to_label() that adjusts the detection threshold. Therefore, for such
networks you could call them like: net.process(some_image, -0.5), and -0.5
would be passed so the adjust_threshold argument of to_tensor().
!*/
template <typename iterable_type, typename ...T>
std::vector<output_label_type> process_batch (
const iterable_type& data,
size_t batch_size = 128,
T&& ...args
);
/*!
requires
- batch_size > 0
- data must have a .begin() and .end() that supply iterators over a
sequence of input_type elements. E.g. data could have a type of
std::vector<input_type>
ensures
- This function is just like (*this)(data,batch_size), i.e. it runs a
bunch of objects through the network and returns the outputs. But we
additionally pass the given args to loss_details().to_label() as the 4th
argument (or more, depending on how many things are in args) when
converting the network output to output_label_types. This is useful,
for instance, with loss layers like loss_mmod_ which has an optional
adjust_threshold argument to to_label() that adjusts the detection
threshold. Therefore, for such networks you could call them like:
net.process_batch(std::vector<image_type>({some_image, another_image}), 128, -0.5),
and -0.5 would be passed so the adjust_threshold argument of to_tensor().
!*/
// -------------
template <typename label_iterator>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment