Commit 6578c1b5 authored by Davis King's avatar Davis King

Added net_to_xml()

parent e2d4b3c4
......@@ -12,6 +12,7 @@
#include "dnn/trainer.h"
#include "dnn/cpu_dlib.h"
#include "dnn/tensor_tools.h"
#include "dnn/utilities.h"
#endif // DLIB_DNn_
......
......@@ -125,6 +125,11 @@ namespace dlib
return out;
}
friend void to_xml(const input_rgb_image& item, std::ostream& out)
{
out << "<input_rgb_image r='"<<item.avg_red<<"' g='"<<item.avg_green<<"' b='"<<item.avg_blue<<"'/>";
}
private:
float avg_red;
float avg_green;
......@@ -213,6 +218,10 @@ namespace dlib
return out;
}
friend void to_xml(const input& item, std::ostream& out)
{
out << "<input/>";
}
};
// ----------------------------------------------------------------------------------------
......@@ -295,6 +304,10 @@ namespace dlib
return out;
}
friend void to_xml(const input& item, std::ostream& out)
{
out << "<input/>";
}
};
// ----------------------------------------------------------------------------------------
......
......@@ -91,6 +91,12 @@ namespace dlib
print a string describing this layer.
!*/
void to_xml(const EXAMPLE_INPUT_LAYER& item, std::ostream& out);
/*!
This function is optional, but required if you want to print your networks with
net_to_xml(). Therefore, to_xml() prints a layer as XML.
!*/
void serialize(const EXAMPLE_INPUT_LAYER& item, std::ostream& out);
void deserialize(EXAMPLE_INPUT_LAYER& item, std::istream& in);
/*!
......
......@@ -262,6 +262,23 @@ namespace dlib
return out;
}
friend void to_xml(const con_& item, std::ostream& out)
{
out << "<con"
<< " num_filters='"<<_num_filters<<"'"
<< " nr='"<<_nr<<"'"
<< " nc='"<<_nc<<"'"
<< " stride_y='"<<_stride_y<<"'"
<< " stride_x='"<<_stride_x<<"'"
<< " padding_y='"<<item.padding_y_<<"'"
<< " padding_x='"<<item.padding_x_<<"'"
<< " learning_rate_mult='"<<item.learning_rate_multiplier<<"'"
<< " weight_decay_mult='"<<item.weight_decay_multiplier<<"'"
<< " bias_learning_rate_mult='"<<item.bias_learning_rate_multiplier<<"'"
<< " bias_weight_decay_mult='"<<item.bias_weight_decay_multiplier<<"'>\n";
out << mat(item.params);
out << "</con>";
}
private:
......@@ -443,6 +460,18 @@ namespace dlib
return out;
}
friend void to_xml(const max_pool_& item, std::ostream& out)
{
out << "<max_pool"
<< " nr='"<<_nr<<"'"
<< " nc='"<<_nc<<"'"
<< " stride_y='"<<_stride_y<<"'"
<< " stride_x='"<<_stride_x<<"'"
<< " padding_y='"<<item.padding_y_<<"'"
<< " padding_x='"<<item.padding_x_<<"'"
<< "/>\n";
}
private:
......@@ -619,6 +648,18 @@ namespace dlib
<< ")";
return out;
}
friend void to_xml(const avg_pool_& item, std::ostream& out)
{
out << "<avg_pool"
<< " nr='"<<_nr<<"'"
<< " nc='"<<_nc<<"'"
<< " stride_y='"<<_stride_y<<"'"
<< " stride_x='"<<_stride_x<<"'"
<< " padding_y='"<<item.padding_y_<<"'"
<< " padding_x='"<<item.padding_x_<<"'"
<< "/>\n";
}
private:
tt::pooling ap;
......@@ -850,6 +891,28 @@ namespace dlib
return out;
}
friend void to_xml(const bn_& item, std::ostream& out)
{
if (mode==CONV_MODE)
out << "<bn_con";
else
out << "<bn_fc";
out << " eps='"<<item.eps<<"'";
out << " learning_rate_mult='"<<item.learning_rate_multiplier<<"'";
out << " weight_decay_mult='"<<item.weight_decay_multiplier<<"'";
out << " bias_learning_rate_mult='"<<item.bias_learning_rate_multiplier<<"'";
out << " bias_weight_decay_mult='"<<item.bias_weight_decay_multiplier<<"'";
out << ">\n";
out << mat(item.params);
if (mode==CONV_MODE)
out << "</bn_con>\n";
else
out << "</bn_fc>\n";
}
private:
friend class affine_;
......@@ -1053,6 +1116,32 @@ namespace dlib
return out;
}
friend void to_xml(const fc_& item, std::ostream& out)
{
if (bias_mode==FC_HAS_BIAS)
{
out << "<fc"
<< " num_outputs='"<<item.num_outputs<<"'"
<< " learning_rate_mult='"<<item.learning_rate_multiplier<<"'"
<< " weight_decay_mult='"<<item.weight_decay_multiplier<<"'"
<< " bias_learning_rate_mult='"<<item.bias_learning_rate_multiplier<<"'"
<< " bias_weight_decay_mult='"<<item.bias_weight_decay_multiplier<<"'";
out << ">\n";
out << mat(item.params);
out << "</fc>\n";
}
else
{
out << "<fc_no_bias"
<< " num_outputs='"<<item.num_outputs<<"'"
<< " learning_rate_mult='"<<item.learning_rate_multiplier<<"'"
<< " weight_decay_mult='"<<item.weight_decay_multiplier<<"'";
out << ">\n";
out << mat(item.params);
out << "</fc_no_bias>\n";
}
}
private:
unsigned long num_outputs;
......@@ -1167,6 +1256,13 @@ namespace dlib
return out;
}
friend void to_xml(const dropout_& item, std::ostream& out)
{
out << "<dropout"
<< " drop_rate='"<<item.drop_rate<<"'";
out << "/>\n";
}
private:
float drop_rate;
resizable_tensor mask;
......@@ -1257,6 +1353,12 @@ namespace dlib
return out;
}
friend void to_xml(const multiply_& item, std::ostream& out)
{
out << "<multiply"
<< " val='"<<item.val<<"'";
out << "/>\n";
}
private:
float val;
resizable_tensor params; // unused
......@@ -1418,6 +1520,18 @@ namespace dlib
return out;
}
friend void to_xml(const affine_& item, std::ostream& out)
{
out << "<affine";
if (item.mode==CONV_MODE)
out << " mode='conv'";
else
out << " mode='fc'";
out << ">\n";
out << mat(item.params);
out << "</affine>\n";
}
private:
resizable_tensor params, empty_params;
alias_tensor gamma, beta;
......@@ -1489,6 +1603,10 @@ namespace dlib
return out;
}
friend void to_xml(const add_prev_& item, std::ostream& out)
{
out << "<add_prev tag='"<<id<<"'/>\n";
}
private:
resizable_tensor params;
......@@ -1573,6 +1691,10 @@ namespace dlib
return out;
}
friend void to_xml(const relu_& /*item*/, std::ostream& out)
{
out << "<relu/>\n";
}
private:
resizable_tensor params;
......@@ -1652,6 +1774,13 @@ namespace dlib
return out;
}
friend void to_xml(const prelu_& item, std::ostream& out)
{
out << "<prelu initial_param_value='"<<item.initial_param_value<<"'>\n";
out << mat(item.params);
out << "</prelu>\n";
}
private:
resizable_tensor params;
float initial_param_value;
......@@ -1711,6 +1840,11 @@ namespace dlib
return out;
}
friend void to_xml(const sig_& /*item*/, std::ostream& out)
{
out << "<sig/>\n";
}
private:
resizable_tensor params;
......@@ -1771,6 +1905,11 @@ namespace dlib
return out;
}
friend void to_xml(const htan_& /*item*/, std::ostream& out)
{
out << "<htan/>\n";
}
private:
resizable_tensor params;
......@@ -1831,6 +1970,11 @@ namespace dlib
return out;
}
friend void to_xml(const softmax_& /*item*/, std::ostream& out)
{
out << "<softmax/>\n";
}
private:
resizable_tensor params;
};
......@@ -1847,6 +1991,11 @@ namespace dlib
template <template<typename> class TAG_TYPE>
struct concat_helper_impl<TAG_TYPE>{
constexpr static size_t tag_count() {return 1;}
static void list_tags(std::ostream& out)
{
out << tag_id<TAG_TYPE>::id;
}
template<typename SUBNET>
static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k)
{
......@@ -1870,6 +2019,11 @@ namespace dlib
struct concat_helper_impl<TAG_TYPE, TAG_TYPES...>{
constexpr static size_t tag_count() {return 1 + concat_helper_impl<TAG_TYPES...>::tag_count();}
static void list_tags(std::ostream& out)
{
out << tag_id<TAG_TYPE>::id << ",";
concat_helper_impl<TAG_TYPES...>::list_tags(out);
}
template<typename SUBNET>
static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k)
......@@ -1901,6 +2055,8 @@ namespace dlib
>
class concat_
{
static void list_tags(std::ostream& out) { impl::concat_helper_impl<TAG_TYPES...>::list_tags(out);};
public:
constexpr static size_t tag_count() {return impl::concat_helper_impl<TAG_TYPES...>::tag_count();};
......@@ -1952,12 +2108,19 @@ namespace dlib
friend std::ostream& operator<<(std::ostream& out, const concat_& item)
{
out << "concat\t ("
<< tag_count()
<< ")";
out << "concat\t (";
list_tags(out);
out << ")";
return out;
}
friend void to_xml(const concat_& item, std::ostream& out)
{
out << "<concat tags='";
list_tags(out);
out << "'/>\n";
}
private:
resizable_tensor params; // unused
};
......
......@@ -324,6 +324,12 @@ namespace dlib
print a string describing this layer.
!*/
void to_xml(const EXAMPLE_COMPUTATIONAL_LAYER_& item, std::ostream& out);
/*!
This function is optional, but required if you want to print your networks with
net_to_xml(). Therefore, to_xml() prints a layer as XML.
!*/
void serialize(const EXAMPLE_COMPUTATIONAL_LAYER_& item, std::ostream& out);
void deserialize(EXAMPLE_COMPUTATIONAL_LAYER_& item, std::istream& in);
/*!
......
......@@ -106,6 +106,11 @@ namespace dlib
return out;
}
friend void to_xml(const loss_binary_hinge_& /*item*/, std::ostream& out)
{
out << "<loss_binary_hinge/>";
}
};
template <typename SUBNET>
......@@ -215,6 +220,11 @@ namespace dlib
return out;
}
friend void to_xml(const loss_binary_log_& /*item*/, std::ostream& out)
{
out << "<loss_binary_log/>";
}
};
template <typename SUBNET>
......@@ -325,6 +335,10 @@ namespace dlib
return out;
}
friend void to_xml(const loss_multiclass_log_& /*item*/, std::ostream& out)
{
out << "<loss_multiclass_log/>";
}
};
......
......@@ -132,6 +132,12 @@ namespace dlib
print a string describing this layer.
!*/
void to_xml(const EXAMPLE_LOSS_LAYER_& item, std::ostream& out);
/*!
This function is optional, but required if you want to print your networks with
net_to_xml(). Therefore, to_xml() prints a layer as XML.
!*/
void serialize(const EXAMPLE_LOSS_LAYER_& item, std::ostream& out);
void deserialize(EXAMPLE_LOSS_LAYER_& item, std::istream& in);
/*!
......
// Copyright (C) 2016 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#ifndef DLIB_DNn_UTILITIES_H_
#define DLIB_DNn_UTILITIES_H_
#include "core.h"
#include "utilities_abstract.h"
namespace dlib
{
// ----------------------------------------------------------------------------------------
namespace impl
{
class visitor_net_to_xml
{
public:
visitor_net_to_xml(std::ostream& out_) : out(out_) {}
template<typename input_layer_type>
void operator()(size_t idx, const input_layer_type& l)
{
out << "<layer idx='"<<idx<<"' type='input'>\n";
to_xml(l,out);
out << "</layer>\n";
}
template <typename T, typename U>
void operator()(size_t idx, const add_loss_layer<T,U>& l)
{
out << "<layer idx='"<<idx<<"' type='loss'>\n";
to_xml(l.loss_details(),out);
out << "</layer>\n";
}
template <typename T, typename U, typename E>
void operator()(size_t idx, const add_layer<T,U,E>& l)
{
out << "<layer idx='"<<idx<<"' type='comp'>\n";
to_xml(l.layer_details(),out);
out << "</layer>\n";
}
template <unsigned long ID, typename U, typename E>
void operator()(size_t idx, const add_tag_layer<ID,U,E>& l)
{
out << "<layer idx='"<<idx<<"' type='tag' id='"<<ID<<"'/>\n";
}
template <template<typename> class T, typename U>
void operator()(size_t idx, const add_skip_layer<T,U>& l)
{
out << "<layer idx='"<<idx<<"' type='skip' id='"<<(tag_id<T>::id)<<"'/>\n";
}
private:
std::ostream& out;
};
}
template <typename net_type>
void net_to_xml (
const net_type& net,
std::ostream& out
)
{
out << "<net>\n";
visit_layers(net, impl::visitor_net_to_xml(out));
out << "</net>\n";
}
// ----------------------------------------------------------------------------------------
}
#endif // DLIB_DNn_UTILITIES_H_
// Copyright (C) 2016 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#undef DLIB_DNn_UTILITIES_ABSTRACT_H_
#ifdef DLIB_DNn_UTILITIES_ABSTRACT_H_
#include "core_abstract.h"
namespace dlib
{
// ----------------------------------------------------------------------------------------
template <typename net_type>
void net_to_xml (
const net_type& net,
std::ostream& out
);
/*!
requires
- net_type is an object of type add_layer, add_loss_layer, add_skip_layer, or
add_tag_layer.
- All layers in the net must provide to_xml() functions.
ensures
- Prints the given neural network object as an XML document to the given output
stream.
!*/
// ----------------------------------------------------------------------------------------
}
#endif // DLIB_DNn_UTILITIES_ABSTRACT_H_
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment