Commit f858ada0 authored by Davis King's avatar Davis King

Made the subnetwork objects passed to layers non-copyable. Also filled out

more of the layers_abstract.h file.
parent f8b1a3de
...@@ -107,6 +107,9 @@ namespace dlib ...@@ -107,6 +107,9 @@ namespace dlib
!*/ !*/
public: public:
sub_net_wrapper(const sub_net_wrapper&) = delete;
sub_net_wrapper& operator=(const sub_net_wrapper&) = delete;
sub_net_wrapper(T& l_) {} sub_net_wrapper(T& l_) {}
// Nothing here because in this case T is one of the input layer types // Nothing here because in this case T is one of the input layer types
// that doesn't have anything in it. // that doesn't have anything in it.
...@@ -117,6 +120,9 @@ namespace dlib ...@@ -117,6 +120,9 @@ namespace dlib
{ {
public: public:
sub_net_wrapper(const sub_net_wrapper&) = delete;
sub_net_wrapper& operator=(const sub_net_wrapper&) = delete;
typedef T wrapped_type; typedef T wrapped_type;
const static size_t num_layers = T::num_layers; const static size_t num_layers = T::num_layers;
...@@ -501,6 +507,9 @@ namespace dlib ...@@ -501,6 +507,9 @@ namespace dlib
sub_net_wrapper(const tensor& x_, resizable_tensor& grad_final_ignored_) : sub_net_wrapper(const tensor& x_, resizable_tensor& grad_final_ignored_) :
x(x_), grad_final_ignored(grad_final_ignored_) {} x(x_), grad_final_ignored(grad_final_ignored_) {}
sub_net_wrapper(const sub_net_wrapper&) = delete;
sub_net_wrapper& operator=(const sub_net_wrapper&) = delete;
const tensor& get_output() const { return x; } const tensor& get_output() const { return x; }
tensor& get_gradient_input() tensor& get_gradient_input()
{ {
......
...@@ -23,8 +23,8 @@ namespace dlib ...@@ -23,8 +23,8 @@ namespace dlib
Note that there is no dlib::EXAMPLE_INPUT_LAYER type. It is shown here Note that there is no dlib::EXAMPLE_INPUT_LAYER type. It is shown here
purely to document the interface that an input layer object must implement. purely to document the interface that an input layer object must implement.
If you are using some kind of image or matrix object as your input_type If you are using some kind of image or matrix object as your input_type
then you can use the provided dlib::input layer type defined below. then you can use the provided dlib::input layer defined below. Otherwise,
Otherwise, you need to define your own custom input layer. you need to define your own custom input layer.
!*/ !*/
public: public:
...@@ -33,8 +33,8 @@ namespace dlib ...@@ -33,8 +33,8 @@ namespace dlib
/*! /*!
ensures ensures
- Default constructs this object. This function is not required to do - Default constructs this object. This function is not required to do
anything in particular but it is required that layer objects be default anything in particular but it must exist, that is, it is required that
constructable. layer objects be default constructable.
!*/ !*/
EXAMPLE_INPUT_LAYER( EXAMPLE_INPUT_LAYER(
...@@ -70,10 +70,10 @@ namespace dlib ...@@ -70,10 +70,10 @@ namespace dlib
ensures ensures
- Converts the iterator range into a tensor and stores it into #data. - Converts the iterator range into a tensor and stores it into #data.
- #data.num_samples() == distance(ibegin,iend)*sample_expansion_factor. - #data.num_samples() == distance(ibegin,iend)*sample_expansion_factor.
- Normally you would have #data.num_samples() == distance(ibegin,iend) but Normally you would have #data.num_samples() == distance(ibegin,iend) but
you can also expand the output by some integer factor so long as the loss you can also expand the output by some integer factor so long as the loss
you use can deal with it correctly. you use can deal with it correctly.
- The data in the ith sample in #data corresponds to - The data in the ith sample of #data corresponds to the input_type object
*(ibegin+i/sample_expansion_factor). *(ibegin+i/sample_expansion_factor).
!*/ !*/
}; };
......
...@@ -16,26 +16,68 @@ namespace dlib ...@@ -16,26 +16,68 @@ namespace dlib
{ {
/*! /*!
WHAT THIS OBJECT REPRESENTS WHAT THIS OBJECT REPRESENTS
This object represents a deep neural network. In particular, it is
By "Sub net" we mean the part of the network closer to the input. Whenever the simplified interface through which layer objects interact with their
you get a SUB_NET it will always have computed its outputs and they will be subnetworks. A layer's two important tasks are to (1) take outputs from its
available in get_output(). subnetwork and forward propagate them though itself and (2) to backwards
propagate an error gradient through itself and onto its subnetwork.
The idea of a subnetwork is illustrated in the following diagram:
+---------------------------------------------------------+
| loss <-- layer1 <-- layer2 <-- ... <-- layern <-- input |
+---------------------------------------------------------+
^ ^
\__ subnetwork for layer1 __/
Therefore, by "subnetwork" we mean the part of the network closer to the
input.
!*/ !*/
public: public:
// You aren't allowed to copy subnetworks from inside a layer.
SUB_NET(const SUB_NET&) = delete;
SUB_NET& operator=(const SUB_NET&) = delete;
const tensor& get_output( const tensor& get_output(
) const; ) const;
/*!
ensures
- returns the output of this subnetwork. This is the data that the next
layer in the network will take as input.
- have_same_dimensions(#get_gradient_input(), get_output()) == true
!*/
tensor& get_gradient_input( tensor& get_gradient_input(
); );
/*!
ensures
- returns the error gradient for this subnetwork. That is, this is the
error gradient that this network will use to update itself. Therefore,
when performing back propagation, layers that sit on top of this
subnetwork write their back propagated error gradients into
get_gradient_input(). Or to put it another way, during back propagation,
layers take the contents of their get_gradient_input() and back propagate
it through themselves and store the results into their subnetwork's
get_gradient_input().
!*/
const NEXT_SUB_NET& sub_net( const NEXT_SUB_NET& sub_net(
) const; ) const;
/*!
ensures
- returns the subnetwork of *this network. With respect to the diagram
above, if *this was layer1 then sub_net() would return the network that
begins with layer2.
!*/
NEXT_SUB_NET& sub_net( NEXT_SUB_NET& sub_net(
); );
/*!
ensures
- returns the subnetwork of *this network. With respect to the diagram
above, if *this was layer1 then sub_net() would return the network that
begins with layer2.
!*/
}; };
// ---------------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------------
...@@ -64,8 +106,8 @@ namespace dlib ...@@ -64,8 +106,8 @@ namespace dlib
/*! /*!
ensures ensures
- Default constructs this object. This function is not required to do - Default constructs this object. This function is not required to do
anything in particular but it is required that layer objects be default anything in particular but it must exist, that is, it is required that
constructable. layer objects be default constructable.
!*/ !*/
EXAMPLE_LAYER_( EXAMPLE_LAYER_(
...@@ -107,7 +149,7 @@ namespace dlib ...@@ -107,7 +149,7 @@ namespace dlib
- SUB_NET implements the SUB_NET interface defined at the top of this file. - SUB_NET implements the SUB_NET interface defined at the top of this file.
- setup() has been called. - setup() has been called.
ensures ensures
- Runs the output of the sub-network through this layer and stores the - Runs the output of the subnetwork through this layer and stores the
output into #output. In particular, forward() can use any of the outputs output into #output. In particular, forward() can use any of the outputs
in sub (e.g. sub.get_output(), sub.sub_net().get_output(), etc.) to in sub (e.g. sub.get_output(), sub.sub_net().get_output(), etc.) to
compute whatever it wants. compute whatever it wants.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment