Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
ea9cba7e
Commit
ea9cba7e
authored
Jun 22, 2016
by
Davis E. King
Committed by
GitHub
Jun 22, 2016
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #139 from e-fominov/dnn_vs2015_up3
DNN Visual Studio 2015 Update3 support
parents
8e6d8ae0
cc387727
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
41 additions
and
32 deletions
+41
-32
cmake
dlib/cmake
+6
-0
dnn.h
dlib/dnn.h
+6
-0
core.h
dlib/dnn/core.h
+9
-7
layers.h
dlib/dnn/layers.h
+20
-23
use_cpp_11.cmake
dlib/use_cpp_11.cmake
+0
-1
dnn_mnist_advanced_ex.cpp
examples/dnn_mnist_advanced_ex.cpp
+0
-1
No files found.
dlib/cmake
View file @
ea9cba7e
...
...
@@ -78,6 +78,12 @@ elseif (MSVC OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") # else if using Visu
message(STATUS "Enabling SSE2 instructions")
add_definitions(-DDLIB_HAVE_SSE2)
endif()
# By default Visual Studio does not support .obj files with more than 65k sections
# Code generated by file_to_code_ex and code using DNN module can have them
# this flag enables > 65k sections, but produces .obj files that will not be readable by
# VS 2005
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj")
endif()
...
...
dlib/dnn.h
View file @
ea9cba7e
...
...
@@ -3,6 +3,12 @@
#ifndef DLIB_DNn_
#define DLIB_DNn_
// DNN module uses template-based network declaration that leads to very long
// type names. Visual Studio will produce Warning C4503 in such cases
#ifdef _MSC_VER
# pragma warning( disable: 4503 )
#endif
#include "dnn/tensor.h"
#include "dnn/input.h"
#include "dnn/layers.h"
...
...
dlib/dnn/core.h
View file @
ea9cba7e
...
...
@@ -208,6 +208,8 @@ namespace dlib
};
template
<
typename
T
>
struct
alwaysbool
{
typedef
bool
type
;
};
// one more structure for VS 2015 UP3 support workaround
template
<
typename
T
>
struct
alwaysbool2
{
typedef
bool
type
;
};
resizable_tensor
&
rt
();
...
...
@@ -254,7 +256,7 @@ namespace dlib
constexpr
auto
has_inplace_backward
(
layer_type
&
layer
,
SUBNET
&
sub
)
->
typename
alwaysbool
<
decltype
(
layer
.
backward
(
rt
(),
rt
(),
sub
,
rt
()))
>::
type
)
->
typename
alwaysbool
2
<
decltype
(
layer
.
backward
(
rt
(),
rt
(),
sub
,
rt
()))
>::
type
{
return
false
;
}
...
...
@@ -263,7 +265,7 @@ namespace dlib
constexpr
auto
has_inplace_backward
(
layer_type
&
layer
,
SUBNET
&
sub
)
->
typename
alwaysbool
<
decltype
(
layer
.
backward
(
rt
(),
sub
,
rt
()))
>::
type
)
->
typename
alwaysbool
2
<
decltype
(
layer
.
backward
(
rt
(),
sub
,
rt
()))
>::
type
{
return
false
;
}
...
...
@@ -272,7 +274,7 @@ namespace dlib
constexpr
auto
has_inplace_backward
(
layer_type
&
layer
,
SUBNET
&
sub
)
->
typename
alwaysbool
<
decltype
(
layer
.
backward_inplace
(
rt
(),
rt
(),
sub
.
get_gradient_input
(),
rt
()))
>::
type
)
->
typename
alwaysbool
2
<
decltype
(
layer
.
backward_inplace
(
rt
(),
rt
(),
sub
.
get_gradient_input
(),
rt
()))
>::
type
{
return
true
;
}
...
...
@@ -281,7 +283,7 @@ namespace dlib
constexpr
auto
has_inplace_backward
(
layer_type
&
layer
,
SUBNET
&
sub
)
->
typename
alwaysbool
<
decltype
(
layer
.
backward_inplace
(
rt
(),
sub
.
get_gradient_input
(),
rt
()))
>::
type
)
->
typename
alwaysbool
2
<
decltype
(
layer
.
backward_inplace
(
rt
(),
sub
.
get_gradient_input
(),
rt
()))
>::
type
{
return
true
;
}
...
...
@@ -290,7 +292,7 @@ namespace dlib
constexpr
auto
is_inplace_layer
(
layer_type
&
layer
,
const
SUBNET
&
sub
)
->
typename
alwaysbool
<
decltype
(
layer
.
forward
(
sub
,
rt
()))
>::
type
)
->
typename
alwaysbool
2
<
decltype
(
layer
.
forward
(
sub
,
rt
()))
>::
type
{
return
false
;
}
...
...
@@ -1363,7 +1365,7 @@ namespace dlib
static_assert
(
sample_expansion_factor
>=
1
,
"The input layer can't produce fewer output tensors than there are inputs."
);
add_tag_layer
()
=
default
;
add_tag_layer
()
{}
;
add_tag_layer
(
const
add_tag_layer
&
)
=
default
;
add_tag_layer
(
add_tag_layer
&&
)
=
default
;
add_tag_layer
&
operator
=
(
add_tag_layer
&&
)
=
default
;
...
...
@@ -2552,7 +2554,7 @@ namespace dlib
static_assert
(
sample_expansion_factor
>=
1
,
"The input layer can't produce fewer output tensors than there are inputs."
);
add_skip_layer
()
=
default
;
add_skip_layer
()
{}
;
add_skip_layer
(
const
add_skip_layer
&
)
=
default
;
add_skip_layer
(
add_skip_layer
&&
)
=
default
;
add_skip_layer
&
operator
=
(
add_skip_layer
&&
)
=
default
;
...
...
dlib/dnn/layers.h
View file @
ea9cba7e
...
...
@@ -2025,69 +2025,66 @@ namespace dlib
using
softmax
=
add_layer
<
softmax_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
namespace
impl
{
// helper classes for layer concat processing
template
<
template
<
typename
>
class
...
TAG_TYPES
>
struct
concat_helper_impl
{
};
template
<
template
<
typename
>
class
TAG_TYPE
>
struct
concat_helper_impl
<
TAG_TYPE
>
{
constexpr
static
size_t
tag_count
()
{
return
1
;}
static
void
list_tags
(
std
::
ostream
&
out
)
{
out
<<
tag_id
<
TAG_TYPE
>::
id
;
namespace
impl
{
template
<
template
<
typename
>
class
TAG_TYPE
,
template
<
typename
>
class
...
TAG_TYPES
>
struct
concat_helper_impl
{
constexpr
static
size_t
tag_count
()
{
return
1
+
concat_helper_impl
<
TAG_TYPES
...
>::
tag_count
();}
static
void
list_tags
(
std
::
ostream
&
out
)
{
out
<<
tag_id
<
TAG_TYPE
>::
id
<<
(
tag_count
()
>
1
?
","
:
""
);
concat_helper_impl
<
TAG_TYPES
...
>::
list_tags
(
out
);
}
template
<
typename
SUBNET
>
static
void
resize_out
(
resizable_tensor
&
out
,
const
SUBNET
&
sub
,
long
sum_k
)
{
auto
&
t
=
layer
<
TAG_TYPE
>
(
sub
).
get_output
();
out
.
set_size
(
t
.
num_samples
(),
t
.
k
()
+
sum_k
,
t
.
nr
(),
t
.
nc
());
concat_helper_impl
<
TAG_TYPES
...
>::
resize_out
(
out
,
sub
,
sum_k
+
t
.
k
());
}
template
<
typename
SUBNET
>
static
void
concat
(
tensor
&
out
,
const
SUBNET
&
sub
,
size_t
k_offset
)
{
auto
&
t
=
layer
<
TAG_TYPE
>
(
sub
).
get_output
();
tt
::
copy_tensor
(
out
,
k_offset
,
t
,
0
,
t
.
k
());
k_offset
+=
t
.
k
();
concat_helper_impl
<
TAG_TYPES
...
>::
concat
(
out
,
sub
,
k_offset
);
}
template
<
typename
SUBNET
>
static
void
split
(
const
tensor
&
input
,
SUBNET
&
sub
,
size_t
k_offset
)
{
auto
&
t
=
layer
<
TAG_TYPE
>
(
sub
).
get_gradient_input
();
tt
::
copy_tensor
(
t
,
0
,
input
,
k_offset
,
t
.
k
());
k_offset
+=
t
.
k
();
concat_helper_impl
<
TAG_TYPES
...
>::
split
(
input
,
sub
,
k_offset
);
}
};
template
<
template
<
typename
>
class
TAG_TYPE
,
template
<
typename
>
class
...
TAG_TYPES
>
struct
concat_helper_impl
<
TAG_TYPE
,
TAG_TYPES
...
>
{
constexpr
static
size_t
tag_count
()
{
return
1
+
concat_helper_impl
<
TAG_TYPES
...
>::
tag_count
();}
template
<
template
<
typename
>
class
TAG_TYPE
>
struct
concat_helper_impl
<
TAG_TYPE
>
{
constexpr
static
size_t
tag_count
()
{
return
1
;}
static
void
list_tags
(
std
::
ostream
&
out
)
{
out
<<
tag_id
<
TAG_TYPE
>::
id
<<
","
;
concat_helper_impl
<
TAG_TYPES
...
>::
list_tags
(
out
);
out
<<
tag_id
<
TAG_TYPE
>::
id
;
}
template
<
typename
SUBNET
>
static
void
resize_out
(
resizable_tensor
&
out
,
const
SUBNET
&
sub
,
long
sum_k
)
{
auto
&
t
=
layer
<
TAG_TYPE
>
(
sub
).
get_output
();
concat_helper_impl
<
TAG_TYPES
...
>::
resize_out
(
out
,
sub
,
sum_k
+
t
.
k
());
out
.
set_size
(
t
.
num_samples
(),
t
.
k
()
+
sum_k
,
t
.
nr
(),
t
.
nc
());
}
template
<
typename
SUBNET
>
static
void
concat
(
tensor
&
out
,
const
SUBNET
&
sub
,
size_t
k_offset
)
{
auto
&
t
=
layer
<
TAG_TYPE
>
(
sub
).
get_output
();
tt
::
copy_tensor
(
out
,
k_offset
,
t
,
0
,
t
.
k
());
k_offset
+=
t
.
k
();
concat_helper_impl
<
TAG_TYPES
...
>::
concat
(
out
,
sub
,
k_offset
);
}
template
<
typename
SUBNET
>
static
void
split
(
const
tensor
&
input
,
SUBNET
&
sub
,
size_t
k_offset
)
{
auto
&
t
=
layer
<
TAG_TYPE
>
(
sub
).
get_gradient_input
();
tt
::
copy_tensor
(
t
,
0
,
input
,
k_offset
,
t
.
k
());
k_offset
+=
t
.
k
();
concat_helper_impl
<
TAG_TYPES
...
>::
split
(
input
,
sub
,
k_offset
);
}
};
}
...
...
dlib/use_cpp_11.cmake
View file @
ea9cba7e
...
...
@@ -61,7 +61,6 @@ else()
";
${
CMAKE_CXX_COMPILE_FEATURES
}
;"
MATCHES
";cxx_delegating_constructors;"
AND
";
${
CMAKE_CXX_COMPILE_FEATURES
}
;"
MATCHES
";cxx_thread_local;"
AND
";
${
CMAKE_CXX_COMPILE_FEATURES
}
;"
MATCHES
";cxx_constexpr;"
AND
";
${
CMAKE_CXX_COMPILE_FEATURES
}
;"
MATCHES
";cxx_decltype_incomplete_return_types;"
AND
";
${
CMAKE_CXX_COMPILE_FEATURES
}
;"
MATCHES
";cxx_auto_type;"
)
set
(
COMPILER_CAN_DO_CPP_11 1
)
...
...
examples/dnn_mnist_advanced_ex.cpp
View file @
ea9cba7e
...
...
@@ -10,7 +10,6 @@
- Accessing and configuring layers in a network
*/
#include <dlib/dnn.h>
#include <iostream>
#include <dlib/data_io.h>
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment