Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
9043c741
Commit
9043c741
authored
Aug 14, 2017
by
Davis King
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added extract_ layer
parent
525cfc71
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
201 additions
and
4 deletions
+201
-4
layers.h
dlib/dnn/layers.h
+116
-0
layers_abstract.h
dlib/dnn/layers_abstract.h
+67
-4
dnn.cpp
dlib/test/dnn.cpp
+18
-0
No files found.
dlib/dnn/layers.h
View file @
9043c741
...
...
@@ -2883,6 +2883,122 @@ namespace dlib
template
<
typename
SUBNET
>
using
l2normalize
=
add_layer
<
l2normalize_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
template
<
long
_offset
,
long
_k
,
long
_nr
,
long
_nc
>
class
extract_
{
static_assert
(
_offset
>=
0
,
"The offset must be >= 0."
);
static_assert
(
_k
>
0
,
"The number of channels must be > 0."
);
static_assert
(
_nr
>
0
,
"The number of rows must be > 0."
);
static_assert
(
_nc
>
0
,
"The number of columns must be > 0."
);
public
:
extract_
(
)
{
}
template
<
typename
SUBNET
>
void
setup
(
const
SUBNET
&
sub
)
{
DLIB_CASSERT
(
sub
.
get_output
().
size
()
>=
sub
.
get_output
().
num_samples
()
*
(
_offset
+
_k
*
_nr
*
_nc
),
"The tensor we are trying to extract from the input tensor is too big to fit into the input tensor."
);
aout
=
alias_tensor
(
sub
.
get_output
().
num_samples
(),
_k
*
_nr
*
_nc
);
ain
=
alias_tensor
(
sub
.
get_output
().
num_samples
(),
sub
.
get_output
().
size
()
/
sub
.
get_output
().
num_samples
());
}
template
<
typename
SUBNET
>
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
)
{
output
.
set_size
(
sub
.
get_output
().
num_samples
(),
_k
,
_nr
,
_nc
);
auto
out
=
aout
(
output
,
0
);
auto
in
=
ain
(
sub
.
get_output
(),
0
);
tt
::
copy_tensor
(
false
,
out
,
0
,
in
,
_offset
,
_k
*
_nr
*
_nc
);
}
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
/*params_grad*/
)
{
auto
out
=
ain
(
sub
.
get_gradient_input
(),
0
);
auto
in
=
aout
(
gradient_input
,
0
);
tt
::
copy_tensor
(
true
,
out
,
_offset
,
in
,
0
,
_k
*
_nr
*
_nc
);
}
const
tensor
&
get_layer_params
()
const
{
return
params
;
}
tensor
&
get_layer_params
()
{
return
params
;
}
friend
void
serialize
(
const
extract_
&
item
,
std
::
ostream
&
out
)
{
serialize
(
"extract_"
,
out
);
serialize
(
_offset
,
out
);
serialize
(
_k
,
out
);
serialize
(
_nr
,
out
);
serialize
(
_nc
,
out
);
}
friend
void
deserialize
(
extract_
&
item
,
std
::
istream
&
in
)
{
std
::
string
version
;
deserialize
(
version
,
in
);
if
(
version
!=
"extract_"
)
throw
serialization_error
(
"Unexpected version '"
+
version
+
"' found while deserializing dlib::extract_."
);
long
offset
;
long
k
;
long
nr
;
long
nc
;
deserialize
(
offset
,
in
);
deserialize
(
k
,
in
);
deserialize
(
nr
,
in
);
deserialize
(
nc
,
in
);
if
(
offset
!=
_offset
)
throw
serialization_error
(
"Wrong offset found while deserializing dlib::extract_"
);
if
(
k
!=
_k
)
throw
serialization_error
(
"Wrong k found while deserializing dlib::extract_"
);
if
(
nr
!=
_nr
)
throw
serialization_error
(
"Wrong nr found while deserializing dlib::extract_"
);
if
(
nc
!=
_nc
)
throw
serialization_error
(
"Wrong nc found while deserializing dlib::extract_"
);
}
friend
std
::
ostream
&
operator
<<
(
std
::
ostream
&
out
,
const
extract_
&
item
)
{
out
<<
"extract
\t
("
<<
"offset="
<<
_offset
<<
", k="
<<
_k
<<
", nr="
<<
_nr
<<
", nc="
<<
_nc
<<
")"
;
return
out
;
}
friend
void
to_xml
(
const
extract_
&
item
,
std
::
ostream
&
out
)
{
out
<<
"<extract"
;
out
<<
" offset='"
<<
_offset
<<
"'"
;
out
<<
" k='"
<<
_k
<<
"'"
;
out
<<
" nr='"
<<
_nr
<<
"'"
;
out
<<
" nc='"
<<
_nc
<<
"'"
;
out
<<
"/>
\n
"
;
}
private
:
alias_tensor
aout
,
ain
;
resizable_tensor
params
;
// unused
};
template
<
long
offset
,
long
k
,
long
nr
,
long
nc
,
typename
SUBNET
>
using
extract
=
add_layer
<
extract_
<
offset
,
k
,
nr
,
nc
>
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
}
...
...
dlib/dnn/layers_abstract.h
View file @
9043c741
...
...
@@ -877,8 +877,6 @@ namespace dlib
>
using
con
=
add_layer
<
con_
<
num_filters
,
nr
,
nc
,
stride_y
,
stride_x
>
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
template
<
...
...
@@ -2333,11 +2331,11 @@ namespace dlib
This is an implementation of the EXAMPLE_COMPUTATIONAL_LAYER_ interface
defined above. It takes tensors as input and L2 normalizes them. In particular,
it has the following properties:
- The output tensors from this layer have the same dimens
t
ions as the
- The output tensors from this layer have the same dimensions as the
input tensors.
- If you think of each input tensor as a set of tensor::num_samples()
vectors, then the output tensor contains the same vectors except they
have been length normlized so that their L2 norms are all 1. I.e.
have been length norm
a
lized so that their L2 norms are all 1. I.e.
for each vector v we will have ||v||==1.
!*/
...
...
@@ -2372,6 +2370,71 @@ namespace dlib
!*/
};
// ----------------------------------------------------------------------------------------
template
<
long
_offset
,
long
_k
,
long
_nr
,
long
_nc
>
class
extract_
{
/*!
REQUIREMENTS ON TEMPLATE ARGUMENTS
- 0 <= _offset
- 0 < _k
- 0 < _nr
- 0 < _nc
WHAT THIS OBJECT REPRESENTS
This is an implementation of the EXAMPLE_COMPUTATIONAL_LAYER_ interface
defined above. In particular, the output of this layer is simply a copy of
the input tensor. However, you can configure the extract layer to output
only some subset of the input tensor and also to reshape it. Therefore,
the dimensions of the tensors output by this layer are as follows (letting
IN be the input tensor and OUT the output tensor):
- OUT.num_samples() == IN.num_samples()
- OUT.k() == _k
- OUT.nr() == _nr
- OUT.nc() == _nc
So the output will always have the same number of samples as the input, but
within each sample (the k,nr,nc part) we will copy only a subset of the
values. Moreover, the _offset parameter controls which part of each sample
we take. To be very precise, we will have:
- let IN_SIZE = IN.k()*IN.nr()*IN.nc()
- let OUT_SIZE = _k*_nr*_nc
- for i in range[0,IN.num_samples()) and j in range[0,OUT_SIZE):
- OUT.host()[i*OUT_SIZE+j] == IN.host()[i*IN_SIZE+_offset+j]
Finally, all this means that the input tensor to this layer must have a big
enough size to accommodate taking a _k*_nr*_nc slice from each of its
samples.
!*/
public
:
template
<
typename
SUBNET
>
void
setup
(
const
SUBNET
&
sub
);
template
<
typename
SUBNET
>
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
);
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
);
const
tensor
&
get_layer_params
()
const
;
tensor
&
get_layer_params
();
/*!
These functions are implemented as described in the EXAMPLE_COMPUTATIONAL_LAYER_ interface.
!*/
};
template
<
long
offset
,
long
k
,
long
nr
,
long
nc
,
typename
SUBNET
>
using
extract
=
add_layer
<
extract_
<
offset
,
k
,
nr
,
nc
>
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
}
...
...
dlib/test/dnn.cpp
View file @
9043c741
...
...
@@ -1509,6 +1509,24 @@ namespace
void
test_layers
()
{
{
print_spinner
();
extract_
<
0
,
2
,
2
,
2
>
l
;
auto
res
=
test_layer
(
l
);
DLIB_TEST_MSG
(
res
,
res
);
}
{
print_spinner
();
extract_
<
3
,
2
,
1
,
2
>
l
;
auto
res
=
test_layer
(
l
);
DLIB_TEST_MSG
(
res
,
res
);
}
{
print_spinner
();
extract_
<
0
,
2
,
1
,
2
>
l
;
auto
res
=
test_layer
(
l
);
DLIB_TEST_MSG
(
res
,
res
);
}
{
print_spinner
();
upsample_
<
1
,
1
>
l
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment