Commit d1035855 authored by Davis King's avatar Davis King

Changed the DNN API so that sample_expansion_factor is a runtime variable

rather than a compile time constant.  This also removes it from the input layer
interface since the DNN core infers its value at runtime, meaning users that
define their own input layers don't need to specify it anymore.
parent 390c8e90
This diff is collapsed.
This diff is collapsed.
......@@ -32,7 +32,6 @@ namespace dlib
{
public:
typedef matrix<rgb_pixel> input_type;
const static unsigned int sample_expansion_factor = 1;
input_rgb_image (
) :
......@@ -154,7 +153,6 @@ namespace dlib
static_assert(NR != 0 && NC != 0, "The input image can't be empty.");
typedef matrix<rgb_pixel> input_type;
const static unsigned int sample_expansion_factor = 1;
input_rgb_image_sized (
) :
......@@ -293,7 +291,6 @@ namespace dlib
{
public:
typedef matrix<T,NR,NC,MM,L> input_type;
const static unsigned int sample_expansion_factor = 1;
input() {}
input(const input&) {}
......@@ -387,7 +384,6 @@ namespace dlib
{
public:
typedef array2d<T,MM> input_type;
const static unsigned int sample_expansion_factor = 1;
input() {}
input(const input&) {}
......
......@@ -61,8 +61,6 @@ namespace dlib
allows you to easily convert between related deep neural network types.
!*/
// sample_expansion_factor must be > 0
const static unsigned int sample_expansion_factor;
typedef whatever_type_to_tensor_expects input_type;
template <typename forward_iterator>
......@@ -77,12 +75,13 @@ namespace dlib
- std::distance(ibegin,iend) > 0
ensures
- Converts the iterator range into a tensor and stores it into #data.
- #data.num_samples() == distance(ibegin,iend)*sample_expansion_factor.
- #data.num_samples()%distance(ibegin,iend) == 0.
Normally you would have #data.num_samples() == distance(ibegin,iend) but
you can also expand the output by some integer factor so long as the loss
you use can deal with it correctly.
- The data in the ith sample of #data corresponds to the input_type object
*(ibegin+i/sample_expansion_factor).
where sample_expansion_factor==#data.num_samples()/distance(ibegin,iend).
!*/
};
......@@ -120,7 +119,6 @@ namespace dlib
!*/
public:
const static unsigned int sample_expansion_factor = 1;
typedef T input_type;
template <typename forward_iterator>
......@@ -166,7 +164,6 @@ namespace dlib
!*/
public:
typedef matrix<rgb_pixel> input_type;
const static unsigned int sample_expansion_factor = 1;
input_rgb_image (
);
......
......@@ -94,6 +94,25 @@ namespace dlib
implementing the EXAMPLE_COMPUTATIONAL_LAYER_ interface that defines the
layer's behavior.
!*/
unsigned int sample_expansion_factor (
) const;
/*!
ensures
- When to_tensor() is invoked on this network's input layer it converts N
input objects into M samples, all stored inside a resizable_tensor. It
is always the case that M is some integer multiple of N.
sample_expansion_factor() returns the value of this multiplier. To be
very specific, it is always true that M==I*N where I is some integer.
This integer I is what is returned by sample_expansion_factor().
It should be noted that computational layers likely do not care about the
sample expansion factor. It is only really of concern inside a loss
layer where you need to know its value so that tensor samples can be
matched against truth objects. Moreover, in most cases the sample
expansion factor is 1.
!*/
};
// ----------------------------------------------------------------------------------------
......
......@@ -17,7 +17,6 @@ namespace dlib
{
public:
const static unsigned int sample_expansion_factor = 1;
typedef float label_type;
template <
......@@ -30,6 +29,8 @@ namespace dlib
label_iterator iter
) const
{
DLIB_CASSERT(sub.sample_expansion_factor() == 1,"");
const tensor& output_tensor = sub.get_output();
DLIB_CASSERT(output_tensor.nr() == 1 &&
output_tensor.nc() == 1 &&
......@@ -56,8 +57,9 @@ namespace dlib
const tensor& output_tensor = sub.get_output();
tensor& grad = sub.get_gradient_input();
DLIB_CASSERT(sub.sample_expansion_factor() == 1,"");
DLIB_CASSERT(input_tensor.num_samples() != 0,"");
DLIB_CASSERT(input_tensor.num_samples()%sample_expansion_factor == 0,"");
DLIB_CASSERT(input_tensor.num_samples()%sub.sample_expansion_factor() == 0,"");
DLIB_CASSERT(input_tensor.num_samples() == grad.num_samples(),"");
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples(),"");
DLIB_CASSERT(output_tensor.nr() == 1 &&
......@@ -122,7 +124,6 @@ namespace dlib
{
public:
const static unsigned int sample_expansion_factor = 1;
typedef float label_type;
template <
......@@ -135,6 +136,8 @@ namespace dlib
label_iterator iter
) const
{
DLIB_CASSERT(sub.sample_expansion_factor() == 1,"");
const tensor& output_tensor = sub.get_output();
DLIB_CASSERT(output_tensor.nr() == 1 &&
output_tensor.nc() == 1 &&
......@@ -162,8 +165,9 @@ namespace dlib
const tensor& output_tensor = sub.get_output();
tensor& grad = sub.get_gradient_input();
DLIB_CASSERT(sub.sample_expansion_factor() == 1,"");
DLIB_CASSERT(input_tensor.num_samples() != 0,"");
DLIB_CASSERT(input_tensor.num_samples()%sample_expansion_factor == 0,"");
DLIB_CASSERT(input_tensor.num_samples()%sub.sample_expansion_factor() == 0,"");
DLIB_CASSERT(input_tensor.num_samples() == grad.num_samples(),"");
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples(),"");
DLIB_CASSERT(output_tensor.nr() == 1 &&
......@@ -236,7 +240,6 @@ namespace dlib
{
public:
const static unsigned int sample_expansion_factor = 1;
typedef unsigned long label_type;
template <
......@@ -250,6 +253,7 @@ namespace dlib
) const
{
const tensor& output_tensor = sub.get_output();
DLIB_CASSERT(sub.sample_expansion_factor() == 1,"");
DLIB_CASSERT(output_tensor.nr() == 1 &&
output_tensor.nc() == 1 ,"");
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples(),"");
......@@ -278,8 +282,9 @@ namespace dlib
const tensor& output_tensor = sub.get_output();
tensor& grad = sub.get_gradient_input();
DLIB_CASSERT(sub.sample_expansion_factor() == 1,"");
DLIB_CASSERT(input_tensor.num_samples() != 0,"");
DLIB_CASSERT(input_tensor.num_samples()%sample_expansion_factor == 0,"");
DLIB_CASSERT(input_tensor.num_samples()%sub.sample_expansion_factor() == 0,"");
DLIB_CASSERT(input_tensor.num_samples() == grad.num_samples(),"");
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples(),"");
DLIB_CASSERT(output_tensor.nr() == 1 &&
......
......@@ -39,8 +39,6 @@ namespace dlib
public:
// sample_expansion_factor must be > 0
const static unsigned int sample_expansion_factor;
typedef whatever_type_you_use_for_labels label_type;
EXAMPLE_LOSS_LAYER_ (
......@@ -75,15 +73,15 @@ namespace dlib
- input_tensor was given as input to the network sub and the outputs are
now visible in layer<i>(sub).get_output(), for all valid i.
- input_tensor.num_samples() > 0
- input_tensor.num_samples()%sample_expansion_factor == 0.
- input_tensor.num_samples()%sub.sample_expansion_factor() == 0.
- iter == an iterator pointing to the beginning of a range of
input_tensor.num_samples()/sample_expansion_factor elements. Moreover,
input_tensor.num_samples()/sub.sample_expansion_factor() elements. Moreover,
they must be label_type elements.
ensures
- Converts the output of the provided network to label_type objects and
stores the results into the range indicated by iter. In particular, for
all valid i, it will be the case that:
*(iter+i/sample_expansion_factor) is populated based on the output of
*(iter+i/sub.sample_expansion_factor()) is populated based on the output of
sub and corresponds to the ith sample in input_tensor.
!*/
......@@ -103,15 +101,15 @@ namespace dlib
- input_tensor was given as input to the network sub and the outputs are
now visible in layer<i>(sub).get_output(), for all valid i.
- input_tensor.num_samples() > 0
- input_tensor.num_samples()%sample_expansion_factor == 0.
- input_tensor.num_samples()%sub.sample_expansion_factor() == 0.
- for all valid i:
- layer<i>(sub).get_gradient_input() has the same dimensions as
layer<i>(sub).get_output().
- truth == an iterator pointing to the beginning of a range of
input_tensor.num_samples()/sample_expansion_factor elements. Moreover,
input_tensor.num_samples()/sub.sample_expansion_factor() elements. Moreover,
they must be label_type elements.
- for all valid i:
- *(truth+i/sample_expansion_factor) is the label of the ith sample in
- *(truth+i/sub.sample_expansion_factor()) is the label of the ith sample in
input_tensor.
ensures
- This function computes a loss function that describes how well the output
......@@ -168,7 +166,6 @@ namespace dlib
!*/
public:
const static unsigned int sample_expansion_factor = 1;
typedef float label_type;
template <
......@@ -187,6 +184,7 @@ namespace dlib
- sub.get_output().nc() == 1
- sub.get_output().k() == 1
- sub.get_output().num_samples() == input_tensor.num_samples()
- sub.sample_expansion_factor() == 1
and the output label is the raw score for each classified object. If the score
is > 0 then the classifier is predicting the +1 class, otherwise it is
predicting the -1 class.
......@@ -208,6 +206,7 @@ namespace dlib
- sub.get_output().nc() == 1
- sub.get_output().k() == 1
- sub.get_output().num_samples() == input_tensor.num_samples()
- sub.sample_expansion_factor() == 1
- all values pointed to by truth are +1 or -1.
!*/
......@@ -234,7 +233,6 @@ namespace dlib
!*/
public:
const static unsigned int sample_expansion_factor = 1;
typedef float label_type;
template <
......@@ -253,6 +251,7 @@ namespace dlib
- sub.get_output().nc() == 1
- sub.get_output().k() == 1
- sub.get_output().num_samples() == input_tensor.num_samples()
- sub.sample_expansion_factor() == 1
and the output label is the raw score for each classified object. If the score
is > 0 then the classifier is predicting the +1 class, otherwise it is
predicting the -1 class.
......@@ -274,6 +273,7 @@ namespace dlib
- sub.get_output().nc() == 1
- sub.get_output().k() == 1
- sub.get_output().num_samples() == input_tensor.num_samples()
- sub.sample_expansion_factor() == 1
- all values pointed to by truth are +1 or -1.
!*/
......@@ -305,7 +305,6 @@ namespace dlib
public:
const static unsigned int sample_expansion_factor = 1;
typedef unsigned long label_type;
template <
......@@ -323,6 +322,7 @@ namespace dlib
- sub.get_output().nr() == 1
- sub.get_output().nc() == 1
- sub.get_output().num_samples() == input_tensor.num_samples()
- sub.sample_expansion_factor() == 1
and the output label is the predicted class for each classified object. The number
of possible output classes is sub.get_output().k().
!*/
......@@ -342,6 +342,7 @@ namespace dlib
- sub.get_output().nr() == 1
- sub.get_output().nc() == 1
- sub.get_output().num_samples() == input_tensor.num_samples()
- sub.sample_expansion_factor() == 1
- all values pointed to by truth are < sub.get_output().k()
!*/
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment