Commit fa812881 authored by Davis King's avatar Davis King

Just removed the _ from sub_net.

parent 31757a21
This diff is collapsed.
This diff is collapsed.
......@@ -23,20 +23,20 @@ namespace dlib
con_()
{}
template <typename SUB_NET>
void setup (const SUB_NET& sub)
template <typename SUBNET>
void setup (const SUBNET& sub)
{
// TODO
}
template <typename SUB_NET>
void forward(const SUB_NET& sub, resizable_tensor& output)
template <typename SUBNET>
void forward(const SUBNET& sub, resizable_tensor& output)
{
// TODO
}
template <typename SUB_NET>
void backward(const tensor& gradient_input, SUB_NET& sub, tensor& params_grad)
template <typename SUBNET>
void backward(const tensor& gradient_input, SUBNET& sub, tensor& params_grad)
{
// TODO
}
......@@ -49,8 +49,8 @@ namespace dlib
resizable_tensor params;
};
template <typename SUB_NET>
using con = add_layer<con_, SUB_NET>;
template <typename SUBNET>
using con = add_layer<con_, SUBNET>;
// ----------------------------------------------------------------------------------------
......@@ -71,8 +71,8 @@ namespace dlib
unsigned long get_num_outputs (
) const { return num_outputs; }
template <typename SUB_NET>
void setup (const SUB_NET& sub)
template <typename SUBNET>
void setup (const SUBNET& sub)
{
num_inputs = sub.get_output().nr()*sub.get_output().nc()*sub.get_output().k();
params.set_size(num_inputs, num_outputs);
......@@ -82,16 +82,16 @@ namespace dlib
randomize_parameters(params, num_inputs+num_outputs, rnd);
}
template <typename SUB_NET>
void forward(const SUB_NET& sub, resizable_tensor& output)
template <typename SUBNET>
void forward(const SUBNET& sub, resizable_tensor& output)
{
output.set_size(sub.get_output().num_samples(), num_outputs);
output = mat(sub.get_output())*mat(params);
}
template <typename SUB_NET>
void backward(const tensor& gradient_input, SUB_NET& sub, tensor& params_grad)
template <typename SUBNET>
void backward(const tensor& gradient_input, SUBNET& sub, tensor& params_grad)
{
// d1*W*p1 + d2*W*p2
// total gradient = [d1*W; d2*W; d3*W; ...] == D*W
......@@ -116,8 +116,8 @@ namespace dlib
};
template <typename SUB_NET>
using fc = add_layer<fc_, SUB_NET>;
template <typename SUBNET>
using fc = add_layer<fc_, SUBNET>;
// ----------------------------------------------------------------------------------------
......@@ -128,20 +128,20 @@ namespace dlib
{
}
template <typename SUB_NET>
void setup (const SUB_NET& sub)
template <typename SUBNET>
void setup (const SUBNET& sub)
{
}
template <typename SUB_NET>
void forward(const SUB_NET& sub, resizable_tensor& output)
template <typename SUBNET>
void forward(const SUBNET& sub, resizable_tensor& output)
{
output.copy_size(sub.get_output());
output = lowerbound(mat(sub.get_output()), 0);
}
template <typename SUB_NET>
void backward(const tensor& gradient_input, SUB_NET& sub, tensor& params_grad)
template <typename SUBNET>
void backward(const tensor& gradient_input, SUBNET& sub, tensor& params_grad)
{
const float* grad = gradient_input.host();
const float* in = sub.get_output().host();
......@@ -163,8 +163,8 @@ namespace dlib
};
template <typename SUB_NET>
using relu = add_layer<relu_, SUB_NET>;
template <typename SUBNET>
using relu = add_layer<relu_, SUBNET>;
// ----------------------------------------------------------------------------------------
......@@ -176,8 +176,8 @@ namespace dlib
}
template <typename SUB_NET>
void setup (const SUB_NET& sub)
template <typename SUBNET>
void setup (const SUBNET& sub)
{
num_inputs = sub.get_output().nr()*sub.get_output().nc()*sub.get_output().k();
params.set_size(1, num_inputs);
......@@ -189,8 +189,8 @@ namespace dlib
randomize_parameters(params, num_inputs+num_outputs, rnd);
}
template <typename SUB_NET>
void forward(const SUB_NET& sub, resizable_tensor& output)
template <typename SUBNET>
void forward(const SUBNET& sub, resizable_tensor& output)
{
DLIB_CASSERT( sub.get_output().nr()*sub.get_output().nc()*sub.get_output().k() == params.size(), "");
DLIB_CASSERT( sub.get_output().nr()*sub.get_output().nc()*sub.get_output().k() == num_inputs, "");
......@@ -208,8 +208,8 @@ namespace dlib
}
}
template <typename SUB_NET>
void backward(const tensor& gradient_input, SUB_NET& sub, tensor& params_grad)
template <typename SUBNET>
void backward(const tensor& gradient_input, SUBNET& sub, tensor& params_grad)
{
params_grad += sum_rows(pointwise_multiply(mat(sub.get_output()),mat(gradient_input)));
......@@ -230,8 +230,8 @@ namespace dlib
dlib::rand rnd;
};
template <typename SUB_NET>
using multiply = add_layer<multiply_, SUB_NET>;
template <typename SUBNET>
using multiply = add_layer<multiply_, SUBNET>;
// ----------------------------------------------------------------------------------------
......
......@@ -12,7 +12,7 @@ namespace dlib
// ----------------------------------------------------------------------------------------
class SUB_NET
class SUBNET
{
/*!
WHAT THIS OBJECT REPRESENTS
......@@ -35,8 +35,8 @@ namespace dlib
public:
// You aren't allowed to copy subnetworks from inside a layer.
SUB_NET(const SUB_NET&) = delete;
SUB_NET& operator=(const SUB_NET&) = delete;
SUBNET(const SUBNET&) = delete;
SUBNET& operator=(const SUBNET&) = delete;
const tensor& get_output(
) const;
......@@ -61,21 +61,21 @@ namespace dlib
get_gradient_input().
!*/
const NEXT_SUB_NET& sub_net(
const NEXT_SUBNET& subnet(
) const;
/*!
ensures
- returns the subnetwork of *this network. With respect to the diagram
above, if *this was layer1 then sub_net() would return the network that
above, if *this was layer1 then subnet() would return the network that
begins with layer2.
!*/
NEXT_SUB_NET& sub_net(
NEXT_SUBNET& subnet(
);
/*!
ensures
- returns the subnetwork of *this network. With respect to the diagram
above, if *this was layer1 then sub_net() would return the network that
above, if *this was layer1 then subnet() would return the network that
begins with layer2.
!*/
};
......@@ -126,45 +126,45 @@ namespace dlib
allows you to easily convert between related deep neural network types.
!*/
template <typename SUB_NET>
template <typename SUBNET>
void setup (
const SUB_NET& sub
const SUBNET& sub
);
/*!
requires
- SUB_NET implements the SUB_NET interface defined at the top of this file.
- SUBNET implements the SUBNET interface defined at the top of this file.
ensures
- performs any necessary initial memory allocations and/or sets parameters
to their initial values prior to learning. Therefore, calling setup
destroys any previously learned parameters.
!*/
template <typename SUB_NET>
template <typename SUBNET>
void forward(
const SUB_NET& sub,
const SUBNET& sub,
resizable_tensor& output
);
/*!
requires
- SUB_NET implements the SUB_NET interface defined at the top of this file.
- SUBNET implements the SUBNET interface defined at the top of this file.
- setup() has been called.
ensures
- Runs the output of the subnetwork through this layer and stores the
output into #output. In particular, forward() can use any of the outputs
in sub (e.g. sub.get_output(), sub.sub_net().get_output(), etc.) to
in sub (e.g. sub.get_output(), sub.subnet().get_output(), etc.) to
compute whatever it wants.
- #output.num_samples() == sub.get_output().num_samples()
!*/
template <typename SUB_NET>
template <typename SUBNET>
void backward(
const tensor& gradient_input,
SUB_NET& sub,
SUBNET& sub,
tensor& params_grad
);
/*!
requires
- SUB_NET implements the SUB_NET interface defined at the top of this file.
- SUBNET implements the SUBNET interface defined at the top of this file.
- setup() has been called.
- gradient_input has the same dimensions as the output of forward(sub,output).
- have_same_dimensions(sub.get_gradient_input(), sub.get_output()) == true
......@@ -183,7 +183,7 @@ namespace dlib
- for all valid I:
- DATA_GRADIENT_I == gradient of f(sub,get_layer_params()) with
respect to layer<I>(sub).get_output() (recall that forward() can
draw inputs from the immediate sub layer, sub.sub_net(), or
draw inputs from the immediate sub layer, sub.subnet(), or
any earlier layer. So you must consider the gradients with
respect to all inputs drawn from sub)
Finally, backward() adds these gradients into the output by performing:
......@@ -211,8 +211,8 @@ namespace dlib
// For each layer you define, always define an add_layer template so that layers can be
// easily composed. Moreover, the convention is that the layer class ends with an _
// while the add_layer template has the same name but without the trailing _.
template <typename SUB_NET>
using EXAMPLE_LAYER = add_layer<EXAMPLE_LAYER_, SUB_NET>;
template <typename SUBNET>
using EXAMPLE_LAYER = add_layer<EXAMPLE_LAYER_, SUBNET>;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
......@@ -254,9 +254,9 @@ namespace dlib
- The rest of the dimensions of T will be 1.
!*/
template <typename SUB_NET> void setup (const SUB_NET& sub);
template <typename SUB_NET> void forward(const SUB_NET& sub, resizable_tensor& output);
template <typename SUB_NET> void backward(const tensor& gradient_input, SUB_NET& sub, tensor& params_grad);
template <typename SUBNET> void setup (const SUBNET& sub);
template <typename SUBNET> void forward(const SUBNET& sub, resizable_tensor& output);
template <typename SUBNET> void backward(const tensor& gradient_input, SUBNET& sub, tensor& params_grad);
const tensor& get_layer_params() const;
tensor& get_layer_params();
/*!
......@@ -265,8 +265,8 @@ namespace dlib
};
template <typename SUB_NET>
using fc = add_layer<fc_, SUB_NET>;
template <typename SUBNET>
using fc = add_layer<fc_, SUBNET>;
// ----------------------------------------------------------------------------------------
......@@ -277,9 +277,9 @@ namespace dlib
relu_(
);
template <typename SUB_NET> void setup (const SUB_NET& sub);
template <typename SUB_NET> void forward(const SUB_NET& sub, resizable_tensor& output);
template <typename SUB_NET> void backward(const tensor& gradient_input, SUB_NET& sub, tensor& params_grad);
template <typename SUBNET> void setup (const SUBNET& sub);
template <typename SUBNET> void forward(const SUBNET& sub, resizable_tensor& output);
template <typename SUBNET> void backward(const tensor& gradient_input, SUBNET& sub, tensor& params_grad);
const tensor& get_layer_params() const;
tensor& get_layer_params();
/*!
......@@ -288,8 +288,8 @@ namespace dlib
};
template <typename SUB_NET>
using relu = add_layer<relu_, SUB_NET>;
template <typename SUBNET>
using relu = add_layer<relu_, SUBNET>;
// ----------------------------------------------------------------------------------------
......
......@@ -43,12 +43,12 @@ namespace dlib
template <
typename const_label_iterator,
typename SUB_NET
typename SUBNET
>
double compute_loss (
const tensor& input_tensor,
const_label_iterator truth,
SUB_NET& sub
SUBNET& sub
) const
{
const tensor& output_tensor = sub.get_output();
......@@ -83,8 +83,8 @@ namespace dlib
};
template <typename SUB_NET>
using loss_binary_hinge = add_loss_layer<loss_binary_hinge_, SUB_NET>;
template <typename SUBNET>
using loss_binary_hinge = add_loss_layer<loss_binary_hinge_, SUBNET>;
// ----------------------------------------------------------------------------------------
......@@ -95,11 +95,11 @@ namespace dlib
const static unsigned int sample_expansion_factor = 1;
template <
typename SUB_NET
typename SUBNET
>
double compute_loss (
const tensor& input_tensor,
SUB_NET& sub
SUBNET& sub
) const
{
return 0;
......@@ -107,8 +107,8 @@ namespace dlib
};
template <typename SUB_NET>
using loss_no_label = add_loss_layer<loss_no_label_, SUB_NET>;
template <typename SUBNET>
using loss_no_label = add_loss_layer<loss_no_label_, SUBNET>;
// ----------------------------------------------------------------------------------------
......
......@@ -54,7 +54,7 @@ namespace dlib
) const;
/*!
requires
- SUB_NET implements the SUB_NET interface defined at the top of
- SUBNET implements the SUBNET interface defined at the top of
layers_abstract.h.
- sub.get_output().num_samples()%sample_expansion_factor == 0
- All outputs in each layer of sub have the same number of samples. That
......@@ -73,16 +73,16 @@ namespace dlib
template <
typename const_label_iterator,
typename SUB_NET
typename SUBNET
>
double compute_loss (
const tensor& input_tensor,
const_label_iterator truth,
SUB_NET& sub
SUBNET& sub
) const;
/*!
requires
- SUB_NET implements the SUB_NET interface defined at the top of
- SUBNET implements the SUBNET interface defined at the top of
layers_abstract.h.
- input_tensor was given as input to the network sub and the outputs are
now visible in layer<i>(sub).get_output(), for all valid i.
......@@ -114,8 +114,8 @@ namespace dlib
// layers can be easily composed. Moreover, the convention is that the layer class
// ends with an _ while the add_loss_layer template has the same name but without the
// trailing _.
template <typename SUB_NET>
using EXAMPLE_LOSS_LAYER = add_loss_layer<EXAMPLE_LOSS_LAYER_, SUB_NET>;
template <typename SUBNET>
using EXAMPLE_LOSS_LAYER = add_loss_layer<EXAMPLE_LOSS_LAYER_, SUBNET>;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
......@@ -151,12 +151,12 @@ namespace dlib
template <
typename const_label_iterator,
typename SUB_NET
typename SUBNET
>
double compute_loss (
const tensor& input_tensor,
const_label_iterator truth,
SUB_NET& sub
SUBNET& sub
) const;
/*!
This function has the same interface as EXAMPLE_LOSS_LAYER_::to_label() except
......@@ -169,8 +169,8 @@ namespace dlib
};
template <typename SUB_NET>
using loss_binary_hinge = add_loss_layer<loss_binary_hinge_, SUB_NET>;
template <typename SUBNET>
using loss_binary_hinge = add_loss_layer<loss_binary_hinge_, SUBNET>;
// ----------------------------------------------------------------------------------------
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment