Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
f9bb4f47
Commit
f9bb4f47
authored
Jun 26, 2017
by
Davis King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' of
git://github.com/OranjeeGeneral/dlib
into OranjeeGeneral-master
parents
cbd187fb
ecb7095e
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
276 additions
and
44 deletions
+276
-44
cpu_dlib.cpp
dlib/dnn/cpu_dlib.cpp
+7
-18
cpu_dlib.h
dlib/dnn/cpu_dlib.h
+19
-4
cudnn_dlibapi.cpp
dlib/dnn/cudnn_dlibapi.cpp
+1
-7
cudnn_dlibapi.h
dlib/dnn/cudnn_dlibapi.h
+4
-7
layers.h
dlib/dnn/layers.h
+0
-0
layers_abstract.h
dlib/dnn/layers_abstract.h
+211
-0
tensor_tools.h
dlib/dnn/tensor_tools.h
+12
-6
dnn.cpp
dlib/test/dnn.cpp
+22
-2
No files found.
dlib/dnn/cpu_dlib.cpp
View file @
f9bb4f47
...
...
@@ -1739,43 +1739,32 @@ namespace dlib
}
}
void
tensor_conv
::
operator
()
(
resizable_tensor
&
output
,
const
tensor
&
data
,
const
tensor
&
filters
,
int
stride_y
,
int
stride_x
,
int
padding_y
,
int
padding_x
const
tensor
&
filters
)
{
DLIB_CASSERT
(
is_same_object
(
output
,
data
)
==
false
);
DLIB_CASSERT
(
is_same_object
(
output
,
filters
)
==
false
);
DLIB_CASSERT
(
filters
.
k
()
==
data
.
k
());
DLIB_CASSERT
(
stride_y
>
0
&&
stride_x
>
0
);
DLIB_CASSERT
(
0
<=
padding_y
&&
padding_y
<
filters
.
nr
());
DLIB_CASSERT
(
0
<=
padding_x
&&
padding_x
<
filters
.
nc
());
DLIB_CASSERT
(
filters
.
nr
()
<=
data
.
nr
()
+
2
*
padding_y
,
DLIB_CASSERT
(
filters
.
nr
()
<=
data
.
nr
()
+
2
*
last_padding_y
,
"Filter windows must be small enough to fit into the padded image."
);
DLIB_CASSERT
(
filters
.
nc
()
<=
data
.
nc
()
+
2
*
padding_x
,
DLIB_CASSERT
(
filters
.
nc
()
<=
data
.
nc
()
+
2
*
last_
padding_x
,
"Filter windows must be small enough to fit into the padded image."
);
output
.
set_size
(
data
.
num_samples
(),
filters
.
num_samples
(),
1
+
(
data
.
nr
()
+
2
*
padding_y
-
filters
.
nr
())
/
stride_y
,
1
+
(
data
.
nc
()
+
2
*
padding_x
-
filters
.
nc
())
/
stride_x
);
1
+
(
data
.
nr
()
+
2
*
last_padding_y
-
filters
.
nr
())
/
last_
stride_y
,
1
+
(
data
.
nc
()
+
2
*
last_padding_x
-
filters
.
nc
())
/
last_
stride_x
);
matrix
<
float
>
temp
;
for
(
long
n
=
0
;
n
<
data
.
num_samples
();
++
n
)
{
img2col
(
temp
,
data
,
n
,
filters
.
nr
(),
filters
.
nc
(),
stride_y
,
stride_x
,
padding_y
,
padding_x
);
img2col
(
temp
,
data
,
n
,
filters
.
nr
(),
filters
.
nc
(),
last_stride_y
,
last_stride_x
,
last_padding_y
,
last_
padding_x
);
output
.
set_sample
(
n
,
mat
(
filters
)
*
trans
(
temp
));
}
last_stride_y
=
stride_y
;
last_stride_x
=
stride_x
;
last_padding_y
=
padding_y
;
last_padding_x
=
padding_x
;
}
// ------------------------------------------------------------------------------------
...
...
dlib/dnn/cpu_dlib.h
View file @
f9bb4f47
...
...
@@ -368,14 +368,29 @@ namespace dlib
void
clear
(
)
{}
void
operator
()
(
resizable_tensor
&
output
,
const
tensor
&
data
,
const
tensor
&
filters
,
void
setup
(
const
tensor
&
data
,
/* not used but required for interface */
const
tensor
&
filters
,
/* not used but required for interface */
int
stride_y
,
int
stride_x
,
int
padding_y
,
int
padding_x
)
{
(
void
)
data
;
/* silence compiler */
DLIB_CASSERT
(
stride_y
>
0
&&
stride_x
>
0
);
DLIB_CASSERT
(
0
<=
padding_y
&&
padding_y
<
filters
.
nr
());
DLIB_CASSERT
(
0
<=
padding_x
&&
padding_x
<
filters
.
nc
());
last_stride_y
=
stride_y
;
last_stride_x
=
stride_x
;
last_padding_y
=
padding_y
;
last_padding_x
=
padding_x
;
}
void
operator
()
(
resizable_tensor
&
output
,
const
tensor
&
data
,
const
tensor
&
filters
);
void
get_gradient_for_data
(
...
...
dlib/dnn/cudnn_dlibapi.cpp
View file @
f9bb4f47
...
...
@@ -953,11 +953,7 @@ namespace dlib
void
tensor_conv
::
operator
()
(
resizable_tensor
&
output
,
const
tensor
&
data
,
const
tensor
&
filters
,
int
stride_y
,
int
stride_x
,
int
padding_y
,
int
padding_x
const
tensor
&
filters
)
{
DLIB_CASSERT
(
is_same_object
(
output
,
data
)
==
false
);
...
...
@@ -978,8 +974,6 @@ namespace dlib
);
setup
(
data
,
filters
,
stride_y
,
stride_x
,
padding_y
,
padding_x
);
output
.
set_size
(
out_num_samples
,
out_k
,
out_nr
,
out_nc
);
DLIB_ASSERT
(
output
.
num_samples
()
==
data
.
num_samples
(),
out_num_samples
<<
" "
<<
data
.
num_samples
());
...
...
dlib/dnn/cudnn_dlibapi.h
View file @
f9bb4f47
...
...
@@ -205,11 +205,7 @@ namespace dlib
void
operator
()
(
resizable_tensor
&
output
,
const
tensor
&
data
,
const
tensor
&
filters
,
int
stride_y
,
int
stride_x
,
int
padding_y
,
int
padding_x
const
tensor
&
filters
);
/*!
requires
...
...
@@ -270,8 +266,6 @@ namespace dlib
and assigns this gradient to filters_gradient.
!*/
private
:
void
setup
(
const
tensor
&
data
,
const
tensor
&
filters
,
...
...
@@ -280,6 +274,9 @@ namespace dlib
int
padding_y
,
int
padding_x
);
private
:
/*!
requires
- filters.k() == data.k()
...
...
dlib/dnn/layers.h
View file @
f9bb4f47
This diff is collapsed.
Click to expand it.
dlib/dnn/layers_abstract.h
View file @
f9bb4f47
...
...
@@ -840,6 +840,217 @@ namespace dlib
>
using
con
=
add_layer
<
con_
<
num_filters
,
nr
,
nc
,
stride_y
,
stride_x
>
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
template
<
long
_num_filters
,
long
_nr
,
long
_nc
,
int
_stride_y
,
int
_stride_x
,
int
_padding_y
=
_stride_y
!=
1
?
0
:
_nr
/
2
,
int
_padding_x
=
_stride_x
!=
1
?
0
:
_nc
/
2
>
class
cont_
{
/*!
REQUIREMENTS ON TEMPLATE ARGUMENTS
All of them must be > 0.
Also, we require that:
- 0 <= _padding_y && _padding_y < _nr
- 0 <= _padding_x && _padding_x < _nc
WHAT THIS OBJECT REPRESENTS
This is an implementation of the EXAMPLE_COMPUTATIONAL_LAYER_ interface
defined above. In particular, it defines a transposed convolution layer
that takes an input tensor (nominally representing an image) and
transpose convolves (deconvolves) it with a set of filters and then outputs the results.
This is basically a convolutional layer with reversed forward/backward passes
The dimensions of the tensors output by this layer are as follows (letting
IN be the input tensor and OUT the output tensor):
- OUT.num_samples() == IN.num_samples()
- OUT.k() == num_filters()
- OUT.nr() == stride_y * (IN.nr() -1) + nr) - 2*padding_y
- OUT.nc() == stride_x * (IN.nc() -1) + nc) - 2*padding_x
!*/
public
:
cont_
(
);
/*!
ensures
- #num_filters() == _num_filters
- #nr() == _nr
- #nc() == _nc
- #stride_y() == _stride_y
- #stride_x() == _stride_x
- #padding_y() == _padding_y
- #padding_x() == _padding_x
- #get_learning_rate_multiplier() == 1
- #get_weight_decay_multiplier() == 1
- #get_bias_learning_rate_multiplier() == 1
- #get_bias_weight_decay_multiplier() == 0
!*/
long
num_filters
(
)
const
;
/*!
ensures
- returns the number of filters contained in this layer. The k dimension
of the output tensors produced by this layer will be equal to the number
of filters.
!*/
long
nr
(
)
const
;
/*!
ensures
- returns the number of rows in the filters in this layer.
!*/
long
nc
(
)
const
;
/*!
ensures
- returns the number of columns in the filters in this layer.
!*/
long
stride_y
(
)
const
;
/*!
ensures
- returns the vertical stride used when convolving the filters over an
image. That is, each filter will be moved stride_y() pixels down at a
time when it moves over the image.
!*/
long
stride_x
(
)
const
;
/*!
ensures
- returns the horizontal stride used when convolving the filters over an
image. That is, each filter will be moved stride_x() pixels right at a
time when it moves over the image.
!*/
long
padding_y
(
)
const
;
/*!
ensures
- returns the number of pixels of zero padding added to the top and bottom
sides of the image.
!*/
long
padding_x
(
)
const
;
/*!
ensures
- returns the number of pixels of zero padding added to the left and right
sides of the image.
!*/
double
get_learning_rate_multiplier
(
)
const
;
/*!
ensures
- returns a multiplier number. The interpretation is that this object is
requesting that the learning rate used to optimize its parameters be
multiplied by get_learning_rate_multiplier().
!*/
double
get_weight_decay_multiplier
(
)
const
;
/*!
ensures
- returns a multiplier number. The interpretation is that this object is
requesting that the weight decay used to optimize its parameters be
multiplied by get_weight_decay_multiplier().
!*/
void
set_learning_rate_multiplier
(
double
val
);
/*!
requires
- val >= 0
ensures
- #get_learning_rate_multiplier() == val
!*/
void
set_weight_decay_multiplier
(
double
val
);
/*!
requires
- val >= 0
ensures
- #get_weight_decay_multiplier() == val
!*/
double
get_bias_learning_rate_multiplier
(
)
const
;
/*!
ensures
- returns a multiplier number. The interpretation is that this object is
requesting that the learning rate used to optimize its bias parameters be
multiplied by get_learning_rate_multiplier()*get_bias_learning_rate_multiplier().
!*/
double
get_bias_weight_decay_multiplier
(
)
const
;
/*!
ensures
- returns a multiplier number. The interpretation is that this object is
requesting that the weight decay used to optimize its bias parameters be
multiplied by get_weight_decay_multiplier()*get_bias_weight_decay_multiplier().
!*/
void
set_bias_learning_rate_multiplier
(
double
val
);
/*!
requires
- val >= 0
ensures
- #get_bias_learning_rate_multiplier() == val
!*/
void
set_bias_weight_decay_multiplier
(
double
val
);
/*!
requires
- val >= 0
ensures
- #get_bias_weight_decay_multiplier() == val
!*/
template
<
typename
SUBNET
>
void
setup
(
const
SUBNET
&
sub
);
template
<
typename
SUBNET
>
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
);
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
);
point
map_input_to_output
(
point
p
)
const
;
point
map_output_to_input
(
point
p
)
const
;
const
tensor
&
get_layer_params
()
const
;
tensor
&
get_layer_params
();
/*!
These functions are implemented as described in the EXAMPLE_COMPUTATIONAL_LAYER_ interface.
!*/
};
template
<
long
num_filters
,
long
nr
,
long
nc
,
int
stride_y
,
int
stride_x
,
typename
SUBNET
>
using
cont
=
add_layer
<
cont_
<
num_filters
,
nr
,
nc
,
stride_y
,
stride_x
>
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
class
dropout_
...
...
dlib/dnn/tensor_tools.h
View file @
f9bb4f47
...
...
@@ -879,12 +879,8 @@ namespace dlib { namespace tt
void
operator
()
(
resizable_tensor
&
output
,
const
tensor
&
data
,
const
tensor
&
filters
,
int
stride_y
,
int
stride_x
,
int
padding_y
,
int
padding_x
)
{
impl
(
output
,
data
,
filters
,
stride_y
,
stride_x
,
padding_y
,
padding_x
);
}
const
tensor
&
filters
)
{
impl
(
output
,
data
,
filters
);
}
/*!
requires
- stride_y > 0
...
...
@@ -947,6 +943,16 @@ namespace dlib { namespace tt
this gradient to filters_gradient.
!*/
void
setup
(
const
tensor
&
data
,
const
tensor
&
filters
,
int
stride_y
,
int
stride_x
,
int
padding_y
,
int
padding_x
)
{
impl
.
setup
(
data
,
filters
,
stride_y
,
stride_x
,
padding_y
,
padding_x
);
}
private
:
#ifdef DLIB_USE_CUDA
cuda
::
tensor_conv
impl
;
...
...
dlib/test/dnn.cpp
View file @
f9bb4f47
...
...
@@ -805,8 +805,10 @@ namespace
padding_y
=
(
filters
.
nr
()
-
data
.
nr
()
+
1
)
/
2
;
if
(
!
(
filters
.
nc
()
<=
data
.
nc
()
+
2
*
padding_x
))
padding_x
=
(
filters
.
nc
()
-
data
.
nc
()
+
1
)
/
2
;
conv1
(
output1
,
data
,
filters
,
stride_y
,
stride_x
,
padding_y
,
padding_x
);
conv2
(
output2
,
data
,
filters
,
stride_y
,
stride_x
,
padding_y
,
padding_x
);
conv1
.
setup
(
data
,
filters
,
stride_y
,
stride_x
,
padding_y
,
padding_x
);
conv1
(
output1
,
data
,
filters
);
conv2
.
setup
(
data
,
filters
,
stride_y
,
stride_x
,
padding_y
,
padding_x
);
conv2
(
output2
,
data
,
filters
);
dlog
<<
LINFO
<<
"forward error: "
<<
max
(
abs
(
mat
(
output1
)
-
mat
(
output2
)));
DLIB_TEST_MSG
(
max
(
abs
(
mat
(
output1
)
-
mat
(
output2
)))
<
1e-3
,
max
(
abs
(
mat
(
output1
)
-
mat
(
output2
)))
<<
"
\n\t
padding_y: "
<<
padding_y
...
...
@@ -1473,6 +1475,24 @@ namespace
auto
res
=
test_layer
(
l
);
DLIB_TEST_MSG
(
res
,
res
);
}
{
print_spinner
();
cont_
<
3
,
3
,
3
,
2
,
2
>
l
;
auto
res
=
test_layer
(
l
);
DLIB_TEST_MSG
(
res
,
res
);
}
{
print_spinner
();
cont_
<
3
,
3
,
3
,
1
,
1
>
l
;
auto
res
=
test_layer
(
l
);
DLIB_TEST_MSG
(
res
,
res
);
}
{
print_spinner
();
cont_
<
3
,
2
,
2
,
2
,
2
>
l
;
auto
res
=
test_layer
(
l
);
DLIB_TEST_MSG
(
res
,
res
);
}
{
print_spinner
();
con_
<
3
,
2
,
2
,
2
,
2
>
l
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment