Commit 6168781a authored by Lucas Clemente Vella's avatar Lucas Clemente Vella Committed by Davis E. King

Adding specific parameters interface on fc_ layer (#213)

parent 48df23a0
...@@ -1086,6 +1086,30 @@ namespace dlib ...@@ -1086,6 +1086,30 @@ namespace dlib
tt::gemm(1,sub.get_gradient_input(), 1,gradient_input,false, w,true); tt::gemm(1,sub.get_gradient_input(), 1,gradient_input,false, w,true);
} }
alias_tensor_instance get_weights()
{
return weights(params, 0);
}
alias_tensor_const_instance get_weights() const
{
return weights(params, 0);
}
alias_tensor_instance get_biases()
{
static_assert(bias_mode == FC_HAS_BIAS, "This fc_ layer doesn't have a bias vector "
"to be retrieved, as per template parameter 'bias_mode'.");
return biases(params, weights.size());
}
alias_tensor_const_instance get_biases() const
{
static_assert(bias_mode == FC_HAS_BIAS, "This fc_ layer doesn't have a bias vector "
"to be retrieved, as per template parameter 'bias_mode'.");
return biases(params, weights.size());
}
const tensor& get_layer_params() const { return params; } const tensor& get_layer_params() const { return params; }
tensor& get_layer_params() { return params; } tensor& get_layer_params() { return params; }
......
...@@ -539,6 +539,62 @@ namespace dlib ...@@ -539,6 +539,62 @@ namespace dlib
- #get_bias_weight_decay_multiplier() == val - #get_bias_weight_decay_multiplier() == val
!*/ !*/
alias_tensor_const_instance get_weights(
) const;
/*!
ensures
- returns an alias of get_layer_params(), containing the weights matrix of
the fully connected layer.
- #get_weights().num_samples() is the number of elements in input sample,
i.e. sublayer's output's k * nc * nr.
- #get_bias().k() == #get_num_outputs()
- if get_bias_mode() == FC_HAS_BIAS:
- #get_layer_params().size() == (#get_weights().size() + #get_biases().size())
- else:
- #get_layer_params().size() == #get_weights().size()
!*/
alias_tensor_instance get_weights(
);
/*!
ensures
- returns an alias of get_layer_params(), containing the weights matrix of
the fully connected layer.
- #get_weights().num_samples() is the number of elements in input sample,
i.e. sublayer's output's k * nc * nr.
- #get_bias().k() == #get_num_outputs()
- if get_bias_mode() == FC_HAS_BIAS:
- #get_layer_params().size() == (#get_weights().size() + #get_biases().size())
- else:
- #get_layer_params().size() == #get_weights().size()
!*/
alias_tensor_const_instance get_biases(
) const;
/*!
requires
- #get_bias_mode() == FC_HAS_BIAS
ensures
- returns an alias of get_layer_params(), containing the bias vector of
the fully connected layer.
- #get_bias().num_samples() == 1
- #get_bias().k() == #get_num_outputs()
- #get_layer_params().size() == (#get_weights().size() + #get_biases().size())
!*/
alias_tensor_instance get_biases(
);
/*!
requires
- #get_bias_mode() == FC_HAS_BIAS
ensures
- returns an alias of get_layer_params(), containing the bias vector of
the fully connected layer.
- #get_bias().num_samples() == 1
- #get_bias().k() == #get_num_outputs()
- #get_layer_params().size() == (#get_weights().size() + #get_biases().size())
!*/
template <typename SUBNET> void setup (const SUBNET& sub); template <typename SUBNET> void setup (const SUBNET& sub);
template <typename SUBNET> void forward(const SUBNET& sub, resizable_tensor& output); template <typename SUBNET> void forward(const SUBNET& sub, resizable_tensor& output);
template <typename SUBNET> void backward(const tensor& gradient_input, SUBNET& sub, tensor& params_grad); template <typename SUBNET> void backward(const tensor& gradient_input, SUBNET& sub, tensor& params_grad);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment