Commit cbb69de2 authored by Fm's avatar Fm

Visual studio now compiles dnn_mnist_advanced, inception and dtest

parent 943a07cb
...@@ -78,6 +78,8 @@ elseif (MSVC OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") # else if using Visu ...@@ -78,6 +78,8 @@ elseif (MSVC OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") # else if using Visu
message(STATUS "Enabling SSE2 instructions") message(STATUS "Enabling SSE2 instructions")
add_definitions(-DDLIB_HAVE_SSE2) add_definitions(-DDLIB_HAVE_SSE2)
endif() endif()
# DNN module produces long type names for NN definitions - disable this warning for MSVC
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4503")
endif() endif()
......
...@@ -1985,68 +1985,85 @@ namespace dlib ...@@ -1985,68 +1985,85 @@ namespace dlib
// ---------------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------------
namespace impl{ namespace impl{
// helper classes for layer concat processing // // helper classes for layer concat processing
template <template<typename> class... TAG_TYPES> // template <template<typename> class... TAG_TYPES>
struct concat_helper_impl { // struct concat_helper_impl {
}; // // this specialization will be used only by MSVC
template <template<typename> class TAG_TYPE> // constexpr static size_t tag_count() {return 0;}
struct concat_helper_impl<TAG_TYPE>{ // static void list_tags(std::ostream& out)
constexpr static size_t tag_count() {return 1;} // {
static void list_tags(std::ostream& out) // }
{ // template<typename SUBNET>
out << tag_id<TAG_TYPE>::id; // static void resize_out(resizable_tensor&, const SUBNET&, long)
// {
// }
// template<typename SUBNET>
// static void concat(tensor&, const SUBNET&, size_t)
// {
// }
// template<typename SUBNET>
// static void split(const tensor&, SUBNET&, size_t)
// {
// }
// };
template <template<typename> class TAG_TYPE, template<typename> class... TAG_TYPES>
struct concat_helper_impl{
constexpr static size_t tag_count() {return 1 + concat_helper_impl<TAG_TYPES...>::tag_count();}
static void list_tags(std::ostream& out)
{
out << tag_id<TAG_TYPE>::id << (tag_count() > 1 ? "," : "");
concat_helper_impl<TAG_TYPES...>::list_tags(out);
} }
template<typename SUBNET> template<typename SUBNET>
static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k) static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k)
{ {
auto& t = layer<TAG_TYPE>(sub).get_output(); auto& t = layer<TAG_TYPE>(sub).get_output();
out.set_size(t.num_samples(), t.k() + sum_k, t.nr(), t.nc()); concat_helper_impl<TAG_TYPES...>::resize_out(out, sub, sum_k + t.k());
} }
template<typename SUBNET> template<typename SUBNET>
static void concat(tensor& out, const SUBNET& sub, size_t k_offset) static void concat(tensor& out, const SUBNET& sub, size_t k_offset)
{ {
auto& t = layer<TAG_TYPE>(sub).get_output(); auto& t = layer<TAG_TYPE>(sub).get_output();
tt::copy_tensor(out, k_offset, t, 0, t.k()); tt::copy_tensor(out, k_offset, t, 0, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::concat(out, sub, k_offset);
} }
template<typename SUBNET> template<typename SUBNET>
static void split(const tensor& input, SUBNET& sub, size_t k_offset) static void split(const tensor& input, SUBNET& sub, size_t k_offset)
{ {
auto& t = layer<TAG_TYPE>(sub).get_gradient_input(); auto& t = layer<TAG_TYPE>(sub).get_gradient_input();
tt::copy_tensor(t, 0, input, k_offset, t.k()); tt::copy_tensor(t, 0, input, k_offset, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::split(input, sub, k_offset);
} }
}; };
template <template<typename> class TAG_TYPE, template<typename> class... TAG_TYPES> template <template<typename> class TAG_TYPE>
struct concat_helper_impl<TAG_TYPE, TAG_TYPES...>{ struct concat_helper_impl<TAG_TYPE>{
constexpr static size_t tag_count() {return 1;}
constexpr static size_t tag_count() {return 1 + concat_helper_impl<TAG_TYPES...>::tag_count();}
static void list_tags(std::ostream& out) static void list_tags(std::ostream& out)
{ {
out << tag_id<TAG_TYPE>::id << ","; out << tag_id<TAG_TYPE>::id;
concat_helper_impl<TAG_TYPES...>::list_tags(out);
} }
template<typename SUBNET> template<typename SUBNET>
static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k) static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k)
{ {
auto& t = layer<TAG_TYPE>(sub).get_output(); auto& t = layer<TAG_TYPE>(sub).get_output();
concat_helper_impl<TAG_TYPES...>::resize_out(out, sub, sum_k + t.k()); out.set_size(t.num_samples(), t.k() + sum_k, t.nr(), t.nc());
} }
template<typename SUBNET> template<typename SUBNET>
static void concat(tensor& out, const SUBNET& sub, size_t k_offset) static void concat(tensor& out, const SUBNET& sub, size_t k_offset)
{ {
auto& t = layer<TAG_TYPE>(sub).get_output(); auto& t = layer<TAG_TYPE>(sub).get_output();
tt::copy_tensor(out, k_offset, t, 0, t.k()); tt::copy_tensor(out, k_offset, t, 0, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::concat(out, sub, k_offset);
} }
template<typename SUBNET> template<typename SUBNET>
static void split(const tensor& input, SUBNET& sub, size_t k_offset) static void split(const tensor& input, SUBNET& sub, size_t k_offset)
{ {
auto& t = layer<TAG_TYPE>(sub).get_gradient_input(); auto& t = layer<TAG_TYPE>(sub).get_gradient_input();
tt::copy_tensor(t, 0, input, k_offset, t.k()); tt::copy_tensor(t, 0, input, k_offset, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::split(input, sub, k_offset);
} }
}; };
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment