Commit d1035855 authored by Davis King's avatar Davis King

Changed the DNN API so that sample_expansion_factor is a runtime variable

rather than a compile time constant.  This also removes it from the input layer
interface since the DNN core infers its value at runtime, meaning users that
define their own input layers don't need to specify it anymore.
parent 390c8e90
......@@ -73,7 +73,6 @@ namespace dlib
!*/
public:
typedef int input_type;
const static unsigned int sample_expansion_factor = 1;
template <typename forward_iterator>
void to_tensor (
......@@ -503,13 +502,15 @@ namespace dlib
subnet_wrapper(const subnet_wrapper&) = delete;
subnet_wrapper& operator=(const subnet_wrapper&) = delete;
subnet_wrapper(T& l_) : l(l_) {}
subnet_wrapper(T& l_, unsigned int sef) : l(l_),_sample_expansion_factor(sef) {}
// Not much here because in this case T is one of the input layer types
// that doesn't have anything in it.
typedef T layer_details_type;
const layer_details_type& layer_details() const { return l; }
unsigned int sample_expansion_factor() const { return _sample_expansion_factor; }
private:
T& l;
unsigned int _sample_expansion_factor;
};
template <typename T>
......@@ -525,7 +526,7 @@ namespace dlib
const static size_t num_layers = T::num_layers;
typedef typename T::layer_details_type layer_details_type;
subnet_wrapper(T& l_) : l(l_),subnetwork(l.subnet()) {}
subnet_wrapper(T& l_, unsigned int = 0) : l(l_),subnetwork(l.subnet(), l.sample_expansion_factor()) {}
const tensor& get_output() const { return l.private_get_output(); }
tensor& get_gradient_input() { return l.private_get_gradient_input(); }
......@@ -534,6 +535,7 @@ namespace dlib
const subnet_wrapper<typename T::subnet_type,false>& subnet() const { return subnetwork; }
subnet_wrapper<typename T::subnet_type,false>& subnet() { return subnetwork; }
unsigned int sample_expansion_factor() const { return l.sample_expansion_factor(); }
private:
T& l;
......@@ -553,7 +555,7 @@ namespace dlib
const static size_t num_layers = T::num_layers;
typedef typename T::layer_details_type layer_details_type;
subnet_wrapper(T& l_) : l(l_),subnetwork(l.subnet()) {}
subnet_wrapper(T& l_, unsigned int = 0) : l(l_),subnetwork(l.subnet(), l.sample_expansion_factor()) {}
const tensor& get_output() const { return l.get_output(); }
tensor& get_gradient_input() { return l.get_gradient_input(); }
......@@ -562,6 +564,7 @@ namespace dlib
const subnet_wrapper<typename T::subnet_type,false>& subnet() const { return subnetwork; }
subnet_wrapper<typename T::subnet_type,false>& subnet() { return subnetwork; }
unsigned int sample_expansion_factor() const { return l.sample_expansion_factor(); }
private:
T& l;
......@@ -588,7 +591,6 @@ namespace dlib
typedef typename subnet_type::input_type input_type;
const static size_t num_layers = subnet_type::num_layers + 1;
const static size_t num_computational_layers = subnet_type::num_computational_layers + 1;
const static unsigned int sample_expansion_factor = subnet_type::sample_expansion_factor;
add_layer(
):
......@@ -882,6 +884,8 @@ namespace dlib
const layer_details_type& layer_details() const { return details; }
layer_details_type& layer_details() { return details; }
unsigned int sample_expansion_factor() const { return subnet().sample_expansion_factor(); }
void clean()
{
x_grad.clear();
......@@ -1004,17 +1008,15 @@ namespace dlib
typedef LAYER_DETAILS layer_details_type;
typedef INPUT_LAYER subnet_type;
typedef typename INPUT_LAYER::input_type input_type;
const static unsigned int sample_expansion_factor = INPUT_LAYER::sample_expansion_factor;
const static size_t num_layers = 2;
const static size_t num_computational_layers = 1;
static_assert(sample_expansion_factor >= 1,
"The input layer can't produce fewer output tensors than there are inputs.");
add_layer(
):
this_layer_setup_called(false),
gradient_input_is_stale(true),
get_output_and_gradient_input_disabled(false)
get_output_and_gradient_input_disabled(false),
_sample_expansion_factor(0)
{}
add_layer(const add_layer&) = default;
......@@ -1044,6 +1046,7 @@ namespace dlib
this_layer_setup_called(item.this_layer_setup_called),
gradient_input_is_stale(item.gradient_input_is_stale),
get_output_and_gradient_input_disabled(false),
_sample_expansion_factor(item._sample_expansion_factor),
x_grad(item.x_grad),
cached_output(item.cached_output),
grad_final(item.grad_final)
......@@ -1056,7 +1059,8 @@ namespace dlib
details(layer_det),
this_layer_setup_called(false),
gradient_input_is_stale(true),
get_output_and_gradient_input_disabled(false)
get_output_and_gradient_input_disabled(false),
_sample_expansion_factor(0)
{}
add_layer(
......@@ -1065,7 +1069,8 @@ namespace dlib
input_layer(il),
this_layer_setup_called(false),
gradient_input_is_stale(true),
get_output_and_gradient_input_disabled(false)
get_output_and_gradient_input_disabled(false),
_sample_expansion_factor(0)
{}
add_layer(
......@@ -1074,7 +1079,8 @@ namespace dlib
details(std::move(layer_det)),
this_layer_setup_called(false),
gradient_input_is_stale(true),
get_output_and_gradient_input_disabled(false)
get_output_and_gradient_input_disabled(false),
_sample_expansion_factor(0)
{}
add_layer(
......@@ -1085,7 +1091,8 @@ namespace dlib
input_layer(std::move(il)),
this_layer_setup_called(false),
gradient_input_is_stale(true),
get_output_and_gradient_input_disabled(false)
get_output_and_gradient_input_disabled(false),
_sample_expansion_factor(0)
{}
add_layer(
......@@ -1122,7 +1129,12 @@ namespace dlib
{
input_layer.to_tensor(ibegin, iend, data);
// make sure the input layer's to_tensor() function is implemented properly.
DLIB_CASSERT(std::distance(ibegin,iend)*sample_expansion_factor == data.num_samples(),"");
DLIB_CASSERT(data.num_samples() >= std::distance(ibegin,iend),
"The input layer can't produce fewer output tensors than there are inputs.");
DLIB_CASSERT(data.num_samples()%std::distance(ibegin,iend) == 0,
"The number of tensors produced by the input layer must be an integer multiple of the number of input objects.");
_sample_expansion_factor = data.num_samples()/std::distance(ibegin,iend);
data.async_copy_to_device();
}
......@@ -1145,8 +1157,9 @@ namespace dlib
const tensor& forward (const tensor& x)
{
DLIB_CASSERT(x.num_samples()%sample_expansion_factor == 0,"");
subnet_wrapper wsub(x, grad_final);
DLIB_CASSERT(sample_expansion_factor() != 0, "You must call to_tensor() before this function can be used.");
DLIB_CASSERT(x.num_samples()%sample_expansion_factor() == 0,"");
subnet_wrapper wsub(x, grad_final, _sample_expansion_factor);
if (!this_layer_setup_called)
{
details.setup(wsub);
......@@ -1199,7 +1212,7 @@ namespace dlib
grad_final.copy_size(x);
grad_final = 0;
subnet_wrapper wsub(x, grad_final);
subnet_wrapper wsub(x, grad_final, _sample_expansion_factor);
params_grad.copy_size(details.get_layer_params());
impl::call_layer_backward(details, private_get_output(),
gradient_input, wsub, static_cast<tensor&>(params_grad));
......@@ -1233,6 +1246,8 @@ namespace dlib
const layer_details_type& layer_details() const { return details; }
layer_details_type& layer_details() { return details; }
unsigned int sample_expansion_factor() const { return _sample_expansion_factor; }
void clean()
{
x_grad.clear();
......@@ -1245,7 +1260,7 @@ namespace dlib
friend void serialize(const add_layer& item, std::ostream& out)
{
int version = 2;
int version = 3;
serialize(version, out);
serialize(item.input_layer, out);
serialize(item.details, out);
......@@ -1255,13 +1270,14 @@ namespace dlib
serialize(item.x_grad, out);
serialize(item.cached_output, out);
serialize(item.grad_final, out);
serialize(item._sample_expansion_factor, out);
}
friend void deserialize(add_layer& item, std::istream& in)
{
int version = 0;
deserialize(version, in);
if (version != 2)
if (!(2 <= version && version <= 3))
throw serialization_error("Unexpected version found while deserializing dlib::add_layer.");
deserialize(item.input_layer, in);
deserialize(item.details, in);
......@@ -1271,6 +1287,10 @@ namespace dlib
deserialize(item.x_grad, in);
deserialize(item.cached_output, in);
deserialize(item.grad_final, in);
if (version >= 3)
deserialize(item._sample_expansion_factor, in);
else
item._sample_expansion_factor = 1; // all layer types set this to 1 in older dlib versions, so that's what we put here.
}
friend std::ostream& operator<< (std::ostream& out, const add_layer& item)
......@@ -1295,19 +1315,20 @@ namespace dlib
bool this_layer_requires_forward_output(
)
{
subnet_wrapper wsub(grad_final, grad_final);
subnet_wrapper wsub(grad_final, grad_final, _sample_expansion_factor);
return impl::backward_requires_forward_output(details, wsub);
}
class subnet_wrapper
{
public:
subnet_wrapper(const tensor& x_, resizable_tensor& grad_final_) :
x(x_), grad_final(grad_final_) {}
subnet_wrapper(const tensor& x_, resizable_tensor& grad_final_, unsigned int sef) :
x(x_), grad_final(grad_final_), _sample_expansion_factor(sef) {}
subnet_wrapper(const subnet_wrapper&) = delete;
subnet_wrapper& operator=(const subnet_wrapper&) = delete;
unsigned int sample_expansion_factor() const { return _sample_expansion_factor;}
const tensor& get_output() const { return x; }
tensor& get_gradient_input()
{
......@@ -1322,6 +1343,7 @@ namespace dlib
private:
const tensor& x;
resizable_tensor& grad_final;
unsigned int _sample_expansion_factor;
};
void swap(add_layer& item)
......@@ -1334,6 +1356,7 @@ namespace dlib
std::swap(x_grad, item.x_grad);
std::swap(cached_output, item.cached_output);
std::swap(grad_final, item.grad_final);
std::swap(_sample_expansion_factor, item._sample_expansion_factor);
}
subnet_type input_layer;
......@@ -1341,6 +1364,7 @@ namespace dlib
bool this_layer_setup_called;
bool gradient_input_is_stale;
bool get_output_and_gradient_input_disabled;
mutable unsigned int _sample_expansion_factor;
resizable_tensor x_grad;
resizable_tensor cached_output;
resizable_tensor grad_final;
......@@ -1373,10 +1397,7 @@ namespace dlib
typedef int layer_details_type; // not really used anywhere, but required by subnet_wrapper.
const static size_t num_layers = subnet_type::num_layers + 1;
const static size_t num_computational_layers = subnet_type::num_computational_layers;
const static unsigned int sample_expansion_factor = subnet_type::sample_expansion_factor;
const static unsigned long id = ID;
static_assert(sample_expansion_factor >= 1,
"The input layer can't produce fewer output tensors than there are inputs.");
add_tag_layer() {};
add_tag_layer(const add_tag_layer&) = default;
......@@ -1461,6 +1482,8 @@ namespace dlib
const subnet_type& subnet() const { return subnetwork; }
subnet_type& subnet() { return subnetwork; }
unsigned int sample_expansion_factor() const { return subnet().sample_expansion_factor(); }
void clean()
{
subnetwork.clean();
......@@ -1576,7 +1599,6 @@ namespace dlib
const static size_t layers_in_repeated_group = layers_in_each_group*num;
const static size_t num_layers = subnet_type::num_layers + layers_in_repeated_group;
const static unsigned int sample_expansion_factor = SUBNET::sample_expansion_factor;
typedef REPEATED_LAYER<impl::repeat_input_layer> repeated_layer_type;
......@@ -1745,6 +1767,8 @@ namespace dlib
const subnet_type& subnet() const { return subnetwork; }
subnet_type& subnet() { return subnetwork; }
unsigned int sample_expansion_factor() const { return subnet().sample_expansion_factor(); }
void clean()
{
temp_tensor.clear();
......@@ -1842,12 +1866,9 @@ namespace dlib
typedef int layer_details_type; // not really used anywhere, but required by subnet_wrapper.
const static size_t num_computational_layers = 0;
const static size_t num_layers = 2;
const static unsigned int sample_expansion_factor = subnet_type::sample_expansion_factor;
const static unsigned long id = ID;
static_assert(sample_expansion_factor >= 1,
"The input layer can't produce fewer output tensors than there are inputs.");
add_tag_layer():cached_output_ptr(nullptr),gradient_input_is_stale(true) {}
add_tag_layer():cached_output_ptr(nullptr),gradient_input_is_stale(true),_sample_expansion_factor(0) {}
add_tag_layer(const add_tag_layer&) = default;
add_tag_layer& operator=(const add_tag_layer&) = default;
......@@ -1861,7 +1882,8 @@ namespace dlib
cached_output(item.cached_output),
cached_output_ptr(nullptr),
grad_final(item.grad_final),
gradient_input_is_stale(item.gradient_input_is_stale)
gradient_input_is_stale(item.gradient_input_is_stale),
_sample_expansion_factor(0)
{}
template <typename ...T>
......@@ -1870,7 +1892,8 @@ namespace dlib
) :
input_layer(std::move(args)...),
cached_output_ptr(nullptr),
gradient_input_is_stale(true)
gradient_input_is_stale(true),
_sample_expansion_factor(0)
{
}
......@@ -1878,7 +1901,8 @@ namespace dlib
std::tuple<>
) :
cached_output_ptr(nullptr),
gradient_input_is_stale(true)
gradient_input_is_stale(true),
_sample_expansion_factor(0)
{}
template <typename forward_iterator>
......@@ -1889,8 +1913,19 @@ namespace dlib
) const
{
input_layer.to_tensor(ibegin,iend,data);
// make sure the input layer's to_tensor() function is implemented properly.
DLIB_CASSERT(data.num_samples() >= std::distance(ibegin,iend),
"The input layer can't produce fewer output tensors than there are inputs.");
DLIB_CASSERT(data.num_samples()%std::distance(ibegin,iend) == 0,
"The number of tensors produced by the input layer must be an integer multiple of the number of input objects.");
_sample_expansion_factor = data.num_samples()/std::distance(ibegin,iend);
data.async_copy_to_device();
}
unsigned int sample_expansion_factor() const { return _sample_expansion_factor; }
template <typename forward_iterator>
const tensor& operator() (
forward_iterator ibegin,
......@@ -1971,25 +2006,31 @@ namespace dlib
friend void serialize(const add_tag_layer& item, std::ostream& out)
{
int version = 1;
int version = 2;
serialize(version, out);
serialize(item.input_layer, out);
serialize(item.cached_output, out);
serialize(item.grad_final, out);
serialize(item.gradient_input_is_stale, out);
serialize(item._sample_expansion_factor, out);
}
friend void deserialize(add_tag_layer& item, std::istream& in)
{
int version = 0;
deserialize(version, in);
if (version != 1)
if (!(1 <= version && version <= 2))
throw serialization_error("Unexpected version found while deserializing dlib::add_tag_layer.");
deserialize(item.input_layer, in);
deserialize(item.cached_output, in);
deserialize(item.grad_final, in);
deserialize(item.gradient_input_is_stale, in);
item.cached_output_ptr = nullptr;
if (version >= 2)
deserialize(item._sample_expansion_factor, in);
else
item._sample_expansion_factor = 1; // all layer types set this to 1 in older dlib versions, so that's what we put here.
}
friend std::ostream& operator<< (std::ostream& out, const add_tag_layer& item)
......@@ -2049,6 +2090,7 @@ namespace dlib
std::swap(cached_output_ptr, item.cached_output_ptr);
std::swap(grad_final, item.grad_final);
std::swap(gradient_input_is_stale, item.gradient_input_is_stale);
std::swap(_sample_expansion_factor, item._sample_expansion_factor);
}
subnet_type input_layer;
......@@ -2056,6 +2098,7 @@ namespace dlib
tensor* cached_output_ptr;
resizable_tensor grad_final;
bool gradient_input_is_stale;
mutable unsigned int _sample_expansion_factor;
};
template <unsigned long ID, typename U, typename E>
......@@ -2107,13 +2150,10 @@ namespace dlib
const static size_t num_layers = subnet_type::num_layers + 1;
// Note that the loss layer doesn't count as an additional computational layer.
const static size_t num_computational_layers = subnet_type::num_computational_layers;
const static unsigned int sample_expansion_factor = subnet_type::sample_expansion_factor;
typedef typename get_loss_layer_label_type<LOSS_DETAILS>::type label_type;
static_assert(is_nonloss_layer_type<SUBNET>::value,
"SUBNET must be of type add_layer, add_skip_layer, or add_tag_layer.");
static_assert(sample_expansion_factor == LOSS_DETAILS::sample_expansion_factor,
"The loss layer and input layer must agree on the sample_expansion_factor.");
add_loss_layer() {};
......@@ -2168,6 +2208,8 @@ namespace dlib
subnetwork.to_tensor(ibegin,iend,data);
}
unsigned int sample_expansion_factor() const { return subnet().sample_expansion_factor(); }
template <typename output_iterator>
void operator() (
const tensor& x,
......@@ -2580,10 +2622,7 @@ namespace dlib
typedef int layer_details_type; // not really used anywhere, but required by subnet_wrapper.
const static size_t num_layers = subnet_type::num_layers + 1;
const static size_t num_computational_layers = subnet_type::num_computational_layers;
const static unsigned int sample_expansion_factor = subnet_type::sample_expansion_factor;
const static unsigned long id = tag_id<TAG_TYPE>::id;
static_assert(sample_expansion_factor >= 1,
"The input layer can't produce fewer output tensors than there are inputs.");
add_skip_layer() {};
add_skip_layer(const add_skip_layer&) = default;
......@@ -2681,6 +2720,8 @@ namespace dlib
return subnetwork;
}
unsigned int sample_expansion_factor() const { return subnet().sample_expansion_factor(); }
void clean()
{
subnetwork.clean();
......
......@@ -204,7 +204,6 @@ namespace dlib
typedef LAYER_DETAILS layer_details_type;
typedef SUBNET subnet_type;
typedef typename subnet_type::input_type input_type;
const static unsigned int sample_expansion_factor = subnet_type::sample_expansion_factor;
// num_computational_layers will always give the number of layers in the network
// that transform tensors (i.e. layers defined by something that implements the
// EXAMPLE_COMPUTATIONAL_LAYER_ interface). This is all the layers except for
......@@ -218,6 +217,7 @@ namespace dlib
/*!
ensures
- default constructs all the layers in this network.
- #sample_expansion_factor() == 0
!*/
add_layer(const add_layer&) = default;
......@@ -240,6 +240,7 @@ namespace dlib
each other.
- #layer_details() == layer_details_type(item.layer_details())
- #subnet() == subnet_type(item.subnet())
- #sample_expansion_factor() == item.sample_expansion_factor()
!*/
template <typename ...T, typename LD, typename ...U>
......@@ -251,6 +252,7 @@ namespace dlib
ensures
- #layer_details() == layer_details_type(tuple_head(layer_det))
- #subnet() == subnet_type(tuple_tail(layer_det),args)
- #sample_expansion_factor() == 0
!*/
template <typename ...T>
......@@ -262,6 +264,7 @@ namespace dlib
ensures
- #layer_details() == layer_details_type(layer_det)
- #subnet() == subnet_type(args)
- #sample_expansion_factor() == 0
!*/
template <typename ...T>
......@@ -275,6 +278,7 @@ namespace dlib
args are simply passed on to the sub layers in their entirety.
- #layer_details() == layer_details_type()
- #subnet() == subnet_type(args)
- #sample_expansion_factor() == 0
!*/
template <typename ...T>
......@@ -286,6 +290,7 @@ namespace dlib
ensures
- #layer_details() == layer_det
- #subnet() == subnet_type(args)
- #sample_expansion_factor() == 0
!*/
template <typename forward_iterator>
......@@ -300,15 +305,29 @@ namespace dlib
- std::distance(ibegin,iend) > 0
ensures
- Converts the iterator range into a tensor and stores it into #data.
- #data.num_samples() == distance(ibegin,iend)*sample_expansion_factor.
- #data.num_samples()%distance(ibegin,iend) == 0.
- #sample_expansion_factor() == #data.num_samples()/distance(ibegin,iend).
- #sample_expansion_factor() > 0
- The data in the ith sample of #data corresponds to the input_type object
*(ibegin+i/sample_expansion_factor).
*(ibegin+i/#sample_expansion_factor()).
- Invokes data.async_copy_to_device() so that the data begins transferring
to the GPU device, if present.
- This function is implemented by calling the to_tensor() routine defined
at the input layer of this network.
!*/
unsigned int sample_expansion_factor (
) const;
/*!
ensures
- When to_tensor() is invoked on this network's input layer it converts N
input objects into M samples, all stored inside a resizable_tensor. It
is always the case that M is some integer multiple of N.
sample_expansion_factor() returns the value of this multiplier. To be
very specific, it is always true that M==I*N where I is some integer.
This integer I is what is returned by sample_expansion_factor().
!*/
const subnet_type& subnet(
) const;
/*!
......@@ -379,7 +398,10 @@ namespace dlib
);
/*!
requires
- x.num_samples()%sample_expansion_factor == 0
- sample_expansion_factor() != 0
(i.e. to_tensor() must have been called to set sample_expansion_factor()
to something non-zero.)
- x.num_samples()%sample_expansion_factor() == 0
- x.num_samples() > 0
ensures
- Runs x through the network and returns the results. In particular, this
......@@ -574,8 +596,6 @@ namespace dlib
REQUIREMENTS ON LOSS_DETAILS
- Must be a type that implements the EXAMPLE_LOSS_LAYER_ interface defined
in loss_abstract.h
- LOSS_DETAILS::sample_expansion_factor == SUBNET::sample_expansion_factor
i.e. The loss layer and input layer must agree on the sample_expansion_factor.
REQUIREMENTS ON SUBNET
- One of the following must be true:
......@@ -599,7 +619,6 @@ namespace dlib
typedef typename subnet_type::input_type input_type;
const static size_t num_computational_layers = subnet_type::num_computational_layers;
const static size_t num_layers = subnet_type::num_layers + 1;
const static unsigned int sample_expansion_factor = subnet_type::sample_expansion_factor;
// If LOSS_DETAILS is an unsupervised loss then label_type==no_label_type.
// Otherwise it is defined as follows:
typedef typename LOSS_DETAILS::label_type label_type;
......@@ -708,15 +727,29 @@ namespace dlib
- std::distance(ibegin,iend) > 0
ensures
- Converts the iterator range into a tensor and stores it into #data.
- #data.num_samples() == distance(ibegin,iend)*sample_expansion_factor.
- #data.num_samples()%distance(ibegin,iend) == 0.
- #sample_expansion_factor() == #data.num_samples()/distance(ibegin,iend).
- #sample_expansion_factor() > 0
- The data in the ith sample of #data corresponds to the input_type object
*(ibegin+i/sample_expansion_factor).
*(ibegin+i/sample_expansion_factor()).
- Invokes data.async_copy_to_device() so that the data begins transferring
to the GPU device, if present.
- This function is implemented by calling the to_tensor() routine defined
at the input layer of this network.
!*/
unsigned int sample_expansion_factor (
) const;
/*!
ensures
- When to_tensor() is invoked on this network's input layer it converts N
input objects into M samples, all stored inside a resizable_tensor. It
is always the case that M is some integer multiple of N.
sample_expansion_factor() returns the value of this multiplier. To be
very specific, it is always true that M==I*N where I is some integer.
This integer I is what is returned by sample_expansion_factor().
!*/
// -------------
template <typename output_iterator>
......@@ -726,10 +759,13 @@ namespace dlib
);
/*!
requires
- x.num_samples()%sample_expansion_factor == 0
- sample_expansion_factor() != 0
(i.e. to_tensor() must have been called to set sample_expansion_factor()
to something non-zero.)
- x.num_samples()%sample_expansion_factor() == 0
- x.num_samples() > 0
- obegin == iterator pointing to the start of a range of
x.num_samples()/sample_expansion_factor label_type elements.
x.num_samples()/sample_expansion_factor() label_type elements.
ensures
- runs x through the network and writes the output to the range at obegin.
- loss_details().to_label() is used to write the network output into
......@@ -799,15 +835,18 @@ namespace dlib
);
/*!
requires
- x.num_samples()%sample_expansion_factor == 0
- sample_expansion_factor() != 0
(i.e. to_tensor() must have been called to set sample_expansion_factor()
to something non-zero.)
- x.num_samples()%sample_expansion_factor() == 0
- x.num_samples() > 0
- lbegin == iterator pointing to the start of a range of
x.num_samples()/sample_expansion_factor label_type elements.
x.num_samples()/sample_expansion_factor() label_type elements.
ensures
- runs x through the network, compares the output to the expected output
pointed to by lbegin, and returns the resulting loss.
- for all valid k:
- the expected label of the kth sample in x is *(lbegin+k/sample_expansion_factor).
- the expected label of the kth sample in x is *(lbegin+k/sample_expansion_factor()).
- This function does not update the network parameters.
!*/
......@@ -839,7 +878,10 @@ namespace dlib
/*!
requires
- LOSS_DETAILS is an unsupervised loss. i.e. label_type==no_label_type.
- x.num_samples()%sample_expansion_factor == 0
- sample_expansion_factor() != 0
(i.e. to_tensor() must have been called to set sample_expansion_factor()
to something non-zero.)
- x.num_samples()%sample_expansion_factor() == 0
- x.num_samples() > 0
ensures
- runs x through the network and returns the resulting loss.
......@@ -870,10 +912,13 @@ namespace dlib
);
/*!
requires
- x.num_samples()%sample_expansion_factor == 0
- sample_expansion_factor() != 0
(i.e. to_tensor() must have been called to set sample_expansion_factor()
to something non-zero.)
- x.num_samples()%sample_expansion_factor() == 0
- x.num_samples() > 0
- lbegin == iterator pointing to the start of a range of
x.num_samples()/sample_expansion_factor label_type elements.
x.num_samples()/sample_expansion_factor() label_type elements.
ensures
- runs x through the network, compares the output to the expected output
pointed to by lbegin, and computes parameter and data gradients with
......@@ -881,7 +926,7 @@ namespace dlib
updates get_final_data_gradient() and also, for each layer, the tensor
returned by get_parameter_gradient().
- for all valid k:
- the expected label of the kth sample in x is *(lbegin+k/sample_expansion_factor).
- the expected label of the kth sample in x is *(lbegin+k/sample_expansion_factor()).
- returns compute_loss(x,lbegin)
!*/
......@@ -914,7 +959,10 @@ namespace dlib
/*!
requires
- LOSS_DETAILS is an unsupervised loss. i.e. label_type==no_label_type.
- x.num_samples()%sample_expansion_factor == 0
- sample_expansion_factor() != 0
(i.e. to_tensor() must have been called to set sample_expansion_factor()
to something non-zero.)
- x.num_samples()%sample_expansion_factor() == 0
- x.num_samples() > 0
ensures
- runs x through the network and computes parameter and data gradients with
......@@ -1047,7 +1095,6 @@ namespace dlib
typedef typename SUBNET::input_type input_type;
const static size_t num_computational_layers = (REPEATED_LAYER<SUBNET>::num_computational_layers-SUBNET::num_computational_layers)*num + SUBNET::num_computational_layers;
const static size_t num_layers = (REPEATED_LAYER<SUBNET>::num_layers-SUBNET::num_layers)*num + SUBNET::num_layers;
const static unsigned int sample_expansion_factor = SUBNET::sample_expansion_factor;
typedef REPEATED_LAYER<an_unspecified_input_type> repeated_layer_type;
template <typename T, typename ...U>
......
......@@ -32,7 +32,6 @@ namespace dlib
{
public:
typedef matrix<rgb_pixel> input_type;
const static unsigned int sample_expansion_factor = 1;
input_rgb_image (
) :
......@@ -154,7 +153,6 @@ namespace dlib
static_assert(NR != 0 && NC != 0, "The input image can't be empty.");
typedef matrix<rgb_pixel> input_type;
const static unsigned int sample_expansion_factor = 1;
input_rgb_image_sized (
) :
......@@ -293,7 +291,6 @@ namespace dlib
{
public:
typedef matrix<T,NR,NC,MM,L> input_type;
const static unsigned int sample_expansion_factor = 1;
input() {}
input(const input&) {}
......@@ -387,7 +384,6 @@ namespace dlib
{
public:
typedef array2d<T,MM> input_type;
const static unsigned int sample_expansion_factor = 1;
input() {}
input(const input&) {}
......
......@@ -61,8 +61,6 @@ namespace dlib
allows you to easily convert between related deep neural network types.
!*/
// sample_expansion_factor must be > 0
const static unsigned int sample_expansion_factor;
typedef whatever_type_to_tensor_expects input_type;
template <typename forward_iterator>
......@@ -77,12 +75,13 @@ namespace dlib
- std::distance(ibegin,iend) > 0
ensures
- Converts the iterator range into a tensor and stores it into #data.
- #data.num_samples() == distance(ibegin,iend)*sample_expansion_factor.
- #data.num_samples()%distance(ibegin,iend) == 0.
Normally you would have #data.num_samples() == distance(ibegin,iend) but
you can also expand the output by some integer factor so long as the loss
you use can deal with it correctly.
- The data in the ith sample of #data corresponds to the input_type object
*(ibegin+i/sample_expansion_factor).
where sample_expansion_factor==#data.num_samples()/distance(ibegin,iend).
!*/
};
......@@ -120,7 +119,6 @@ namespace dlib
!*/
public:
const static unsigned int sample_expansion_factor = 1;
typedef T input_type;
template <typename forward_iterator>
......@@ -166,7 +164,6 @@ namespace dlib
!*/
public:
typedef matrix<rgb_pixel> input_type;
const static unsigned int sample_expansion_factor = 1;
input_rgb_image (
);
......
......@@ -94,6 +94,25 @@ namespace dlib
implementing the EXAMPLE_COMPUTATIONAL_LAYER_ interface that defines the
layer's behavior.
!*/
unsigned int sample_expansion_factor (
) const;
/*!
ensures
- When to_tensor() is invoked on this network's input layer it converts N
input objects into M samples, all stored inside a resizable_tensor. It
is always the case that M is some integer multiple of N.
sample_expansion_factor() returns the value of this multiplier. To be
very specific, it is always true that M==I*N where I is some integer.
This integer I is what is returned by sample_expansion_factor().
It should be noted that computational layers likely do not care about the
sample expansion factor. It is only really of concern inside a loss
layer where you need to know its value so that tensor samples can be
matched against truth objects. Moreover, in most cases the sample
expansion factor is 1.
!*/
};
// ----------------------------------------------------------------------------------------
......
......@@ -17,7 +17,6 @@ namespace dlib
{
public:
const static unsigned int sample_expansion_factor = 1;
typedef float label_type;
template <
......@@ -30,6 +29,8 @@ namespace dlib
label_iterator iter
) const
{
DLIB_CASSERT(sub.sample_expansion_factor() == 1,"");
const tensor& output_tensor = sub.get_output();
DLIB_CASSERT(output_tensor.nr() == 1 &&
output_tensor.nc() == 1 &&
......@@ -56,8 +57,9 @@ namespace dlib
const tensor& output_tensor = sub.get_output();
tensor& grad = sub.get_gradient_input();
DLIB_CASSERT(sub.sample_expansion_factor() == 1,"");
DLIB_CASSERT(input_tensor.num_samples() != 0,"");
DLIB_CASSERT(input_tensor.num_samples()%sample_expansion_factor == 0,"");
DLIB_CASSERT(input_tensor.num_samples()%sub.sample_expansion_factor() == 0,"");
DLIB_CASSERT(input_tensor.num_samples() == grad.num_samples(),"");
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples(),"");
DLIB_CASSERT(output_tensor.nr() == 1 &&
......@@ -122,7 +124,6 @@ namespace dlib
{
public:
const static unsigned int sample_expansion_factor = 1;
typedef float label_type;
template <
......@@ -135,6 +136,8 @@ namespace dlib
label_iterator iter
) const
{
DLIB_CASSERT(sub.sample_expansion_factor() == 1,"");
const tensor& output_tensor = sub.get_output();
DLIB_CASSERT(output_tensor.nr() == 1 &&
output_tensor.nc() == 1 &&
......@@ -162,8 +165,9 @@ namespace dlib
const tensor& output_tensor = sub.get_output();
tensor& grad = sub.get_gradient_input();
DLIB_CASSERT(sub.sample_expansion_factor() == 1,"");
DLIB_CASSERT(input_tensor.num_samples() != 0,"");
DLIB_CASSERT(input_tensor.num_samples()%sample_expansion_factor == 0,"");
DLIB_CASSERT(input_tensor.num_samples()%sub.sample_expansion_factor() == 0,"");
DLIB_CASSERT(input_tensor.num_samples() == grad.num_samples(),"");
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples(),"");
DLIB_CASSERT(output_tensor.nr() == 1 &&
......@@ -236,7 +240,6 @@ namespace dlib
{
public:
const static unsigned int sample_expansion_factor = 1;
typedef unsigned long label_type;
template <
......@@ -250,6 +253,7 @@ namespace dlib
) const
{
const tensor& output_tensor = sub.get_output();
DLIB_CASSERT(sub.sample_expansion_factor() == 1,"");
DLIB_CASSERT(output_tensor.nr() == 1 &&
output_tensor.nc() == 1 ,"");
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples(),"");
......@@ -278,8 +282,9 @@ namespace dlib
const tensor& output_tensor = sub.get_output();
tensor& grad = sub.get_gradient_input();
DLIB_CASSERT(sub.sample_expansion_factor() == 1,"");
DLIB_CASSERT(input_tensor.num_samples() != 0,"");
DLIB_CASSERT(input_tensor.num_samples()%sample_expansion_factor == 0,"");
DLIB_CASSERT(input_tensor.num_samples()%sub.sample_expansion_factor() == 0,"");
DLIB_CASSERT(input_tensor.num_samples() == grad.num_samples(),"");
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples(),"");
DLIB_CASSERT(output_tensor.nr() == 1 &&
......
......@@ -39,8 +39,6 @@ namespace dlib
public:
// sample_expansion_factor must be > 0
const static unsigned int sample_expansion_factor;
typedef whatever_type_you_use_for_labels label_type;
EXAMPLE_LOSS_LAYER_ (
......@@ -75,15 +73,15 @@ namespace dlib
- input_tensor was given as input to the network sub and the outputs are
now visible in layer<i>(sub).get_output(), for all valid i.
- input_tensor.num_samples() > 0
- input_tensor.num_samples()%sample_expansion_factor == 0.
- input_tensor.num_samples()%sub.sample_expansion_factor() == 0.
- iter == an iterator pointing to the beginning of a range of
input_tensor.num_samples()/sample_expansion_factor elements. Moreover,
input_tensor.num_samples()/sub.sample_expansion_factor() elements. Moreover,
they must be label_type elements.
ensures
- Converts the output of the provided network to label_type objects and
stores the results into the range indicated by iter. In particular, for
all valid i, it will be the case that:
*(iter+i/sample_expansion_factor) is populated based on the output of
*(iter+i/sub.sample_expansion_factor()) is populated based on the output of
sub and corresponds to the ith sample in input_tensor.
!*/
......@@ -103,15 +101,15 @@ namespace dlib
- input_tensor was given as input to the network sub and the outputs are
now visible in layer<i>(sub).get_output(), for all valid i.
- input_tensor.num_samples() > 0
- input_tensor.num_samples()%sample_expansion_factor == 0.
- input_tensor.num_samples()%sub.sample_expansion_factor() == 0.
- for all valid i:
- layer<i>(sub).get_gradient_input() has the same dimensions as
layer<i>(sub).get_output().
- truth == an iterator pointing to the beginning of a range of
input_tensor.num_samples()/sample_expansion_factor elements. Moreover,
input_tensor.num_samples()/sub.sample_expansion_factor() elements. Moreover,
they must be label_type elements.
- for all valid i:
- *(truth+i/sample_expansion_factor) is the label of the ith sample in
- *(truth+i/sub.sample_expansion_factor()) is the label of the ith sample in
input_tensor.
ensures
- This function computes a loss function that describes how well the output
......@@ -168,7 +166,6 @@ namespace dlib
!*/
public:
const static unsigned int sample_expansion_factor = 1;
typedef float label_type;
template <
......@@ -187,6 +184,7 @@ namespace dlib
- sub.get_output().nc() == 1
- sub.get_output().k() == 1
- sub.get_output().num_samples() == input_tensor.num_samples()
- sub.sample_expansion_factor() == 1
and the output label is the raw score for each classified object. If the score
is > 0 then the classifier is predicting the +1 class, otherwise it is
predicting the -1 class.
......@@ -208,6 +206,7 @@ namespace dlib
- sub.get_output().nc() == 1
- sub.get_output().k() == 1
- sub.get_output().num_samples() == input_tensor.num_samples()
- sub.sample_expansion_factor() == 1
- all values pointed to by truth are +1 or -1.
!*/
......@@ -234,7 +233,6 @@ namespace dlib
!*/
public:
const static unsigned int sample_expansion_factor = 1;
typedef float label_type;
template <
......@@ -253,6 +251,7 @@ namespace dlib
- sub.get_output().nc() == 1
- sub.get_output().k() == 1
- sub.get_output().num_samples() == input_tensor.num_samples()
- sub.sample_expansion_factor() == 1
and the output label is the raw score for each classified object. If the score
is > 0 then the classifier is predicting the +1 class, otherwise it is
predicting the -1 class.
......@@ -274,6 +273,7 @@ namespace dlib
- sub.get_output().nc() == 1
- sub.get_output().k() == 1
- sub.get_output().num_samples() == input_tensor.num_samples()
- sub.sample_expansion_factor() == 1
- all values pointed to by truth are +1 or -1.
!*/
......@@ -305,7 +305,6 @@ namespace dlib
public:
const static unsigned int sample_expansion_factor = 1;
typedef unsigned long label_type;
template <
......@@ -323,6 +322,7 @@ namespace dlib
- sub.get_output().nr() == 1
- sub.get_output().nc() == 1
- sub.get_output().num_samples() == input_tensor.num_samples()
- sub.sample_expansion_factor() == 1
and the output label is the predicted class for each classified object. The number
of possible output classes is sub.get_output().k().
!*/
......@@ -342,6 +342,7 @@ namespace dlib
- sub.get_output().nr() == 1
- sub.get_output().nc() == 1
- sub.get_output().num_samples() == input_tensor.num_samples()
- sub.sample_expansion_factor() == 1
- all values pointed to by truth are < sub.get_output().k()
!*/
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment