Commit ea9cba7e authored by Davis E. King's avatar Davis E. King Committed by GitHub

Merge pull request #139 from e-fominov/dnn_vs2015_up3

DNN Visual Studio 2015 Update3 support
parents 8e6d8ae0 cc387727
...@@ -78,6 +78,12 @@ elseif (MSVC OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") # else if using Visu ...@@ -78,6 +78,12 @@ elseif (MSVC OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") # else if using Visu
message(STATUS "Enabling SSE2 instructions") message(STATUS "Enabling SSE2 instructions")
add_definitions(-DDLIB_HAVE_SSE2) add_definitions(-DDLIB_HAVE_SSE2)
endif() endif()
# By default Visual Studio does not support .obj files with more than 65k sections
# Code generated by file_to_code_ex and code using DNN module can have them
# this flag enables > 65k sections, but produces .obj files that will not be readable by
# VS 2005
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj")
endif() endif()
......
...@@ -3,6 +3,12 @@ ...@@ -3,6 +3,12 @@
#ifndef DLIB_DNn_ #ifndef DLIB_DNn_
#define DLIB_DNn_ #define DLIB_DNn_
// DNN module uses template-based network declaration that leads to very long
// type names. Visual Studio will produce Warning C4503 in such cases
#ifdef _MSC_VER
# pragma warning( disable: 4503 )
#endif
#include "dnn/tensor.h" #include "dnn/tensor.h"
#include "dnn/input.h" #include "dnn/input.h"
#include "dnn/layers.h" #include "dnn/layers.h"
......
...@@ -208,6 +208,8 @@ namespace dlib ...@@ -208,6 +208,8 @@ namespace dlib
}; };
template <typename T> struct alwaysbool { typedef bool type; }; template <typename T> struct alwaysbool { typedef bool type; };
// one more structure for VS 2015 UP3 support workaround
template <typename T> struct alwaysbool2 { typedef bool type; };
resizable_tensor& rt(); resizable_tensor& rt();
...@@ -254,7 +256,7 @@ namespace dlib ...@@ -254,7 +256,7 @@ namespace dlib
constexpr auto has_inplace_backward( constexpr auto has_inplace_backward(
layer_type& layer, layer_type& layer,
SUBNET& sub SUBNET& sub
) -> typename alwaysbool<decltype(layer.backward(rt(),rt(),sub,rt()))>::type ) -> typename alwaysbool2<decltype(layer.backward(rt(),rt(),sub,rt()))>::type
{ {
return false; return false;
} }
...@@ -263,7 +265,7 @@ namespace dlib ...@@ -263,7 +265,7 @@ namespace dlib
constexpr auto has_inplace_backward( constexpr auto has_inplace_backward(
layer_type& layer, layer_type& layer,
SUBNET& sub SUBNET& sub
) -> typename alwaysbool<decltype(layer.backward(rt(),sub,rt()))>::type ) -> typename alwaysbool2<decltype(layer.backward(rt(),sub,rt()))>::type
{ {
return false; return false;
} }
...@@ -272,7 +274,7 @@ namespace dlib ...@@ -272,7 +274,7 @@ namespace dlib
constexpr auto has_inplace_backward( constexpr auto has_inplace_backward(
layer_type& layer, layer_type& layer,
SUBNET& sub SUBNET& sub
) -> typename alwaysbool<decltype(layer.backward_inplace(rt(),rt(),sub.get_gradient_input(),rt()))>::type ) -> typename alwaysbool2<decltype(layer.backward_inplace(rt(),rt(),sub.get_gradient_input(),rt()))>::type
{ {
return true; return true;
} }
...@@ -281,7 +283,7 @@ namespace dlib ...@@ -281,7 +283,7 @@ namespace dlib
constexpr auto has_inplace_backward( constexpr auto has_inplace_backward(
layer_type& layer, layer_type& layer,
SUBNET& sub SUBNET& sub
) -> typename alwaysbool<decltype(layer.backward_inplace(rt(),sub.get_gradient_input(),rt()))>::type ) -> typename alwaysbool2<decltype(layer.backward_inplace(rt(),sub.get_gradient_input(),rt()))>::type
{ {
return true; return true;
} }
...@@ -290,7 +292,7 @@ namespace dlib ...@@ -290,7 +292,7 @@ namespace dlib
constexpr auto is_inplace_layer( constexpr auto is_inplace_layer(
layer_type& layer, layer_type& layer,
const SUBNET& sub const SUBNET& sub
) -> typename alwaysbool<decltype(layer.forward(sub,rt()))>::type ) -> typename alwaysbool2<decltype(layer.forward(sub,rt()))>::type
{ {
return false; return false;
} }
...@@ -1363,7 +1365,7 @@ namespace dlib ...@@ -1363,7 +1365,7 @@ namespace dlib
static_assert(sample_expansion_factor >= 1, static_assert(sample_expansion_factor >= 1,
"The input layer can't produce fewer output tensors than there are inputs."); "The input layer can't produce fewer output tensors than there are inputs.");
add_tag_layer() = default; add_tag_layer() {};
add_tag_layer(const add_tag_layer&) = default; add_tag_layer(const add_tag_layer&) = default;
add_tag_layer(add_tag_layer&&) = default; add_tag_layer(add_tag_layer&&) = default;
add_tag_layer& operator=(add_tag_layer&&) = default; add_tag_layer& operator=(add_tag_layer&&) = default;
...@@ -2552,7 +2554,7 @@ namespace dlib ...@@ -2552,7 +2554,7 @@ namespace dlib
static_assert(sample_expansion_factor >= 1, static_assert(sample_expansion_factor >= 1,
"The input layer can't produce fewer output tensors than there are inputs."); "The input layer can't produce fewer output tensors than there are inputs.");
add_skip_layer() = default; add_skip_layer() {};
add_skip_layer(const add_skip_layer&) = default; add_skip_layer(const add_skip_layer&) = default;
add_skip_layer(add_skip_layer&&) = default; add_skip_layer(add_skip_layer&&) = default;
add_skip_layer& operator=(add_skip_layer&&) = default; add_skip_layer& operator=(add_skip_layer&&) = default;
......
...@@ -2025,69 +2025,66 @@ namespace dlib ...@@ -2025,69 +2025,66 @@ namespace dlib
using softmax = add_layer<softmax_, SUBNET>; using softmax = add_layer<softmax_, SUBNET>;
// ---------------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------------
namespace impl{ namespace impl
// helper classes for layer concat processing {
template <template<typename> class... TAG_TYPES> template <template<typename> class TAG_TYPE, template<typename> class... TAG_TYPES>
struct concat_helper_impl { struct concat_helper_impl{
};
template <template<typename> class TAG_TYPE> constexpr static size_t tag_count() {return 1 + concat_helper_impl<TAG_TYPES...>::tag_count();}
struct concat_helper_impl<TAG_TYPE>{ static void list_tags(std::ostream& out)
constexpr static size_t tag_count() {return 1;} {
static void list_tags(std::ostream& out) out << tag_id<TAG_TYPE>::id << (tag_count() > 1 ? "," : "");
{ concat_helper_impl<TAG_TYPES...>::list_tags(out);
out << tag_id<TAG_TYPE>::id;
} }
template<typename SUBNET> template<typename SUBNET>
static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k) static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k)
{ {
auto& t = layer<TAG_TYPE>(sub).get_output(); auto& t = layer<TAG_TYPE>(sub).get_output();
out.set_size(t.num_samples(), t.k() + sum_k, t.nr(), t.nc()); concat_helper_impl<TAG_TYPES...>::resize_out(out, sub, sum_k + t.k());
} }
template<typename SUBNET> template<typename SUBNET>
static void concat(tensor& out, const SUBNET& sub, size_t k_offset) static void concat(tensor& out, const SUBNET& sub, size_t k_offset)
{ {
auto& t = layer<TAG_TYPE>(sub).get_output(); auto& t = layer<TAG_TYPE>(sub).get_output();
tt::copy_tensor(out, k_offset, t, 0, t.k()); tt::copy_tensor(out, k_offset, t, 0, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::concat(out, sub, k_offset);
} }
template<typename SUBNET> template<typename SUBNET>
static void split(const tensor& input, SUBNET& sub, size_t k_offset) static void split(const tensor& input, SUBNET& sub, size_t k_offset)
{ {
auto& t = layer<TAG_TYPE>(sub).get_gradient_input(); auto& t = layer<TAG_TYPE>(sub).get_gradient_input();
tt::copy_tensor(t, 0, input, k_offset, t.k()); tt::copy_tensor(t, 0, input, k_offset, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::split(input, sub, k_offset);
} }
}; };
template <template<typename> class TAG_TYPE, template<typename> class... TAG_TYPES> template <template<typename> class TAG_TYPE>
struct concat_helper_impl<TAG_TYPE, TAG_TYPES...>{ struct concat_helper_impl<TAG_TYPE>{
constexpr static size_t tag_count() {return 1;}
constexpr static size_t tag_count() {return 1 + concat_helper_impl<TAG_TYPES...>::tag_count();}
static void list_tags(std::ostream& out) static void list_tags(std::ostream& out)
{ {
out << tag_id<TAG_TYPE>::id << ","; out << tag_id<TAG_TYPE>::id;
concat_helper_impl<TAG_TYPES...>::list_tags(out);
} }
template<typename SUBNET> template<typename SUBNET>
static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k) static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k)
{ {
auto& t = layer<TAG_TYPE>(sub).get_output(); auto& t = layer<TAG_TYPE>(sub).get_output();
concat_helper_impl<TAG_TYPES...>::resize_out(out, sub, sum_k + t.k()); out.set_size(t.num_samples(), t.k() + sum_k, t.nr(), t.nc());
} }
template<typename SUBNET> template<typename SUBNET>
static void concat(tensor& out, const SUBNET& sub, size_t k_offset) static void concat(tensor& out, const SUBNET& sub, size_t k_offset)
{ {
auto& t = layer<TAG_TYPE>(sub).get_output(); auto& t = layer<TAG_TYPE>(sub).get_output();
tt::copy_tensor(out, k_offset, t, 0, t.k()); tt::copy_tensor(out, k_offset, t, 0, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::concat(out, sub, k_offset);
} }
template<typename SUBNET> template<typename SUBNET>
static void split(const tensor& input, SUBNET& sub, size_t k_offset) static void split(const tensor& input, SUBNET& sub, size_t k_offset)
{ {
auto& t = layer<TAG_TYPE>(sub).get_gradient_input(); auto& t = layer<TAG_TYPE>(sub).get_gradient_input();
tt::copy_tensor(t, 0, input, k_offset, t.k()); tt::copy_tensor(t, 0, input, k_offset, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::split(input, sub, k_offset);
} }
}; };
} }
......
...@@ -61,7 +61,6 @@ else() ...@@ -61,7 +61,6 @@ else()
";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_delegating_constructors;" AND ";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_delegating_constructors;" AND
";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_thread_local;" AND ";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_thread_local;" AND
";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_constexpr;" AND ";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_constexpr;" AND
";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_decltype_incomplete_return_types;" AND
";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_auto_type;") ";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_auto_type;")
set(COMPILER_CAN_DO_CPP_11 1) set(COMPILER_CAN_DO_CPP_11 1)
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
- Accessing and configuring layers in a network - Accessing and configuring layers in a network
*/ */
#include <dlib/dnn.h> #include <dlib/dnn.h>
#include <iostream> #include <iostream>
#include <dlib/data_io.h> #include <dlib/data_io.h>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment