Commit cbb69de2 authored by Fm's avatar Fm

Visual studio now compiles dnn_mnist_advanced, inception and dtest

parent 943a07cb
......@@ -78,6 +78,8 @@ elseif (MSVC OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") # else if using Visu
message(STATUS "Enabling SSE2 instructions")
add_definitions(-DDLIB_HAVE_SSE2)
endif()
# DNN module produces long type names for NN definitions - disable this warning for MSVC
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4503")
endif()
......
......@@ -1985,68 +1985,85 @@ namespace dlib
// ----------------------------------------------------------------------------------------
namespace impl{
// helper classes for layer concat processing
template <template<typename> class... TAG_TYPES>
struct concat_helper_impl {
};
template <template<typename> class TAG_TYPE>
struct concat_helper_impl<TAG_TYPE>{
constexpr static size_t tag_count() {return 1;}
static void list_tags(std::ostream& out)
{
out << tag_id<TAG_TYPE>::id;
// // helper classes for layer concat processing
// template <template<typename> class... TAG_TYPES>
// struct concat_helper_impl {
// // this specialization will be used only by MSVC
// constexpr static size_t tag_count() {return 0;}
// static void list_tags(std::ostream& out)
// {
// }
// template<typename SUBNET>
// static void resize_out(resizable_tensor&, const SUBNET&, long)
// {
// }
// template<typename SUBNET>
// static void concat(tensor&, const SUBNET&, size_t)
// {
// }
// template<typename SUBNET>
// static void split(const tensor&, SUBNET&, size_t)
// {
// }
// };
template <template<typename> class TAG_TYPE, template<typename> class... TAG_TYPES>
struct concat_helper_impl{
constexpr static size_t tag_count() {return 1 + concat_helper_impl<TAG_TYPES...>::tag_count();}
static void list_tags(std::ostream& out)
{
out << tag_id<TAG_TYPE>::id << (tag_count() > 1 ? "," : "");
concat_helper_impl<TAG_TYPES...>::list_tags(out);
}
template<typename SUBNET>
static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k)
{
auto& t = layer<TAG_TYPE>(sub).get_output();
out.set_size(t.num_samples(), t.k() + sum_k, t.nr(), t.nc());
concat_helper_impl<TAG_TYPES...>::resize_out(out, sub, sum_k + t.k());
}
template<typename SUBNET>
static void concat(tensor& out, const SUBNET& sub, size_t k_offset)
{
auto& t = layer<TAG_TYPE>(sub).get_output();
tt::copy_tensor(out, k_offset, t, 0, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::concat(out, sub, k_offset);
}
template<typename SUBNET>
static void split(const tensor& input, SUBNET& sub, size_t k_offset)
{
auto& t = layer<TAG_TYPE>(sub).get_gradient_input();
tt::copy_tensor(t, 0, input, k_offset, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::split(input, sub, k_offset);
}
};
template <template<typename> class TAG_TYPE, template<typename> class... TAG_TYPES>
struct concat_helper_impl<TAG_TYPE, TAG_TYPES...>{
constexpr static size_t tag_count() {return 1 + concat_helper_impl<TAG_TYPES...>::tag_count();}
template <template<typename> class TAG_TYPE>
struct concat_helper_impl<TAG_TYPE>{
constexpr static size_t tag_count() {return 1;}
static void list_tags(std::ostream& out)
{
out << tag_id<TAG_TYPE>::id << ",";
concat_helper_impl<TAG_TYPES...>::list_tags(out);
out << tag_id<TAG_TYPE>::id;
}
template<typename SUBNET>
static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k)
{
auto& t = layer<TAG_TYPE>(sub).get_output();
concat_helper_impl<TAG_TYPES...>::resize_out(out, sub, sum_k + t.k());
out.set_size(t.num_samples(), t.k() + sum_k, t.nr(), t.nc());
}
template<typename SUBNET>
static void concat(tensor& out, const SUBNET& sub, size_t k_offset)
{
auto& t = layer<TAG_TYPE>(sub).get_output();
tt::copy_tensor(out, k_offset, t, 0, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::concat(out, sub, k_offset);
}
template<typename SUBNET>
static void split(const tensor& input, SUBNET& sub, size_t k_offset)
{
auto& t = layer<TAG_TYPE>(sub).get_gradient_input();
tt::copy_tensor(t, 0, input, k_offset, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::split(input, sub, k_offset);
}
};
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment