Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
368d6d19
Commit
368d6d19
authored
Jan 04, 2016
by
Davis King
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added CPU version of pooling layer code.
parent
2639a523
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
304 additions
and
103 deletions
+304
-103
cpu_dlib.cpp
dlib/dnn/cpu_dlib.cpp
+219
-0
cpu_dlib.h
dlib/dnn/cpu_dlib.h
+53
-0
tensor_tools.cpp
dlib/dnn/tensor_tools.cpp
+0
-95
tensor_tools.h
dlib/dnn/tensor_tools.h
+32
-8
No files found.
dlib/dnn/cpu_dlib.cpp
View file @
368d6d19
...
@@ -1023,6 +1023,225 @@ namespace dlib
...
@@ -1023,6 +1023,225 @@ namespace dlib
}
}
// ------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------
pooling
::
pooling
(
)
:
window_height
(
0
),
window_width
(
0
),
stride_y
(
0
),
stride_x
(
0
),
do_max_pooling
(
true
)
{
}
void
pooling
::
clear
(
)
{
window_height
=
0
;
window_width
=
0
;
stride_y
=
0
;
stride_x
=
0
;
}
void
pooling
::
setup_max_pooling
(
int
window_height_
,
int
window_width_
,
int
stride_y_
,
int
stride_x_
)
{
DLIB_CASSERT
(
window_width_
>
0
,
""
);
DLIB_CASSERT
(
window_height_
>
0
,
""
);
DLIB_CASSERT
(
stride_y_
>
0
,
""
);
DLIB_CASSERT
(
stride_x_
>
0
,
""
);
window_height
=
window_height_
;
window_width
=
window_width_
;
stride_y
=
stride_y_
;
stride_x
=
stride_x_
;
do_max_pooling
=
true
;
}
void
pooling
::
setup_avg_pooling
(
int
window_height_
,
int
window_width_
,
int
stride_y_
,
int
stride_x_
)
{
DLIB_CASSERT
(
window_width_
>
0
,
""
);
DLIB_CASSERT
(
window_height_
>
0
,
""
);
DLIB_CASSERT
(
stride_y_
>
0
,
""
);
DLIB_CASSERT
(
stride_x_
>
0
,
""
);
window_height
=
window_height_
;
window_width
=
window_width_
;
stride_y
=
stride_y_
;
stride_x
=
stride_x_
;
do_max_pooling
=
false
;
}
void
pooling
::
operator
()
(
resizable_tensor
&
dest
,
const
tensor
&
src
)
{
DLIB_CASSERT
(
window_width
>
0
,
""
);
DLIB_CASSERT
(
window_height
>
0
,
""
);
DLIB_CASSERT
(
stride_y
>
0
,
""
);
DLIB_CASSERT
(
stride_x
>
0
,
""
);
dest
.
set_size
(
src
.
num_samples
(),
src
.
k
(),
1
+
(
src
.
nr
()
-
window_height
%
2
)
/
stride_y
,
1
+
(
src
.
nc
()
-
window_width
%
2
)
/
stride_x
);
if
(
src
.
size
()
==
0
)
{
dest
=
0
;
return
;
}
auto
d
=
dest
.
host
();
auto
s
=
src
.
host
();
if
(
does_max_pooling
())
{
for
(
long
n
=
0
;
n
<
dest
.
num_samples
();
++
n
)
{
for
(
long
k
=
0
;
k
<
dest
.
k
();
++
k
)
{
auto
simg
=
image_plane
(
src
,
n
,
k
);
auto
dimg
=
d
+
(
n
*
dest
.
k
()
+
k
)
*
dest
.
nr
()
*
dest
.
nc
();
for
(
long
r
=
0
;
r
<
dest
.
nr
();
++
r
)
{
for
(
long
c
=
0
;
c
<
dest
.
nc
();
++
c
)
{
auto
win
=
centered_rect
(
c
*
stride_x
,
r
*
stride_y
,
window_width
,
window_height
);
dimg
[
r
*
dest
.
nc
()
+
c
]
=
max
(
subm_clipped
(
simg
,
win
));
}
}
}
}
}
else
{
for
(
long
n
=
0
;
n
<
dest
.
num_samples
();
++
n
)
{
for
(
long
k
=
0
;
k
<
dest
.
k
();
++
k
)
{
auto
simg
=
image_plane
(
src
,
n
,
k
);
auto
dimg
=
d
+
(
n
*
dest
.
k
()
+
k
)
*
dest
.
nr
()
*
dest
.
nc
();
for
(
long
r
=
0
;
r
<
dest
.
nr
();
++
r
)
{
for
(
long
c
=
0
;
c
<
dest
.
nc
();
++
c
)
{
auto
win
=
centered_rect
(
c
*
stride_x
,
r
*
stride_y
,
window_width
,
window_height
);
dimg
[
r
*
dest
.
nc
()
+
c
]
=
mean
(
subm_clipped
(
simg
,
win
));
}
}
}
}
}
}
void
pooling
::
get_gradient
(
const
tensor
&
gradient_input
,
const
tensor
&
dest
,
const
tensor
&
src
,
tensor
&
grad
)
{
DLIB_CASSERT
(
have_same_dimensions
(
gradient_input
,
dest
),
""
);
DLIB_CASSERT
(
have_same_dimensions
(
src
,
grad
),
""
);
if
(
src
.
size
()
==
0
)
{
return
;
}
auto
gi
=
gradient_input
.
host
();
auto
g
=
grad
.
host
();
auto
s
=
src
.
host
();
if
(
does_max_pooling
())
{
for
(
long
n
=
0
;
n
<
dest
.
num_samples
();
++
n
)
{
for
(
long
k
=
0
;
k
<
dest
.
k
();
++
k
)
{
auto
simg
=
image_plane
(
src
,
n
,
k
);
auto
gimg
=
g
+
(
n
*
grad
.
k
()
+
k
)
*
grad
.
nr
()
*
grad
.
nc
();
auto
giimg
=
gi
+
(
n
*
dest
.
k
()
+
k
)
*
dest
.
nr
()
*
dest
.
nc
();
auto
imgbox
=
get_rect
(
simg
);
for
(
long
r
=
0
;
r
<
dest
.
nr
();
++
r
)
{
for
(
long
c
=
0
;
c
<
dest
.
nc
();
++
c
)
{
auto
win
=
centered_rect
(
c
*
stride_x
,
r
*
stride_y
,
window_width
,
window_height
).
intersect
(
imgbox
);
auto
p
=
max_point
(
subm
(
simg
,
win
))
+
win
.
tl_corner
();
gimg
[
p
.
y
()
*
grad
.
nc
()
+
p
.
x
()]
+=
giimg
[
r
*
dest
.
nc
()
+
c
];
}
}
}
}
}
else
{
for
(
long
n
=
0
;
n
<
dest
.
num_samples
();
++
n
)
{
for
(
long
k
=
0
;
k
<
dest
.
k
();
++
k
)
{
auto
simg
=
image_plane
(
src
,
n
,
k
);
auto
gimg
=
g
+
(
n
*
grad
.
k
()
+
k
)
*
grad
.
nr
()
*
grad
.
nc
();
auto
giimg
=
gi
+
(
n
*
dest
.
k
()
+
k
)
*
dest
.
nr
()
*
dest
.
nc
();
auto
imgbox
=
get_rect
(
simg
);
for
(
long
r
=
0
;
r
<
dest
.
nr
();
++
r
)
{
for
(
long
c
=
0
;
c
<
dest
.
nc
();
++
c
)
{
auto
win
=
centered_rect
(
c
*
stride_x
,
r
*
stride_y
,
window_width
,
window_height
).
intersect
(
imgbox
);
const
float
delta
=
giimg
[
r
*
dest
.
nc
()
+
c
]
/
win
.
area
();
for
(
long
y
=
win
.
top
();
y
<=
win
.
bottom
();
++
y
)
{
for
(
long
x
=
win
.
left
();
x
<=
win
.
right
();
++
x
)
{
gimg
[
y
*
grad
.
nc
()
+
x
]
+=
delta
;
}
}
}
}
}
}
}
}
// ------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------
}
}
}
}
...
...
dlib/dnn/cpu_dlib.h
View file @
368d6d19
...
@@ -213,6 +213,59 @@ namespace dlib
...
@@ -213,6 +213,59 @@ namespace dlib
const
tensor
&
gradient_input
const
tensor
&
gradient_input
);
);
// -----------------------------------------------------------------------------------
class
pooling
{
public
:
pooling
(
const
pooling
&
)
=
delete
;
pooling
&
operator
=
(
const
pooling
&
)
=
delete
;
pooling
(
);
void
clear
(
);
void
setup_max_pooling
(
int
window_height
,
int
window_width
,
int
stride_y
,
int
stride_x
);
void
setup_avg_pooling
(
int
window_height
,
int
window_width
,
int
stride_y
,
int
stride_x
);
bool
does_max_pooling
(
)
const
{
return
do_max_pooling
;
}
void
operator
()
(
resizable_tensor
&
dest
,
const
tensor
&
src
);
void
get_gradient
(
const
tensor
&
gradient_input
,
const
tensor
&
dest
,
const
tensor
&
src
,
tensor
&
grad
);
private
:
int
window_height
;
int
window_width
;
int
stride_y
;
int
stride_x
;
bool
do_max_pooling
;
};
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
}
}
...
...
dlib/dnn/tensor_tools.cpp
View file @
368d6d19
...
@@ -448,101 +448,6 @@ namespace dlib { namespace tt
...
@@ -448,101 +448,6 @@ namespace dlib { namespace tt
#endif
#endif
}
}
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
pooling
::
pooling
(
)
{
}
void
pooling
::
clear
(
)
{
#ifdef DLIB_USE_CUDA
impl
.
clear
();
#else
// TODO
DLIB_CASSERT
(
false
,
""
);
#endif
}
void
pooling
::
setup_max_pooling
(
int
window_height
,
int
window_width
,
int
stride_y
,
int
stride_x
)
{
#ifdef DLIB_USE_CUDA
impl
.
setup_max_pooling
(
window_height
,
window_width
,
stride_y
,
stride_x
);
#else
// TODO
DLIB_CASSERT
(
false
,
""
);
#endif
}
void
pooling
::
setup_avg_pooling
(
int
window_height
,
int
window_width
,
int
stride_y
,
int
stride_x
)
{
#ifdef DLIB_USE_CUDA
impl
.
setup_avg_pooling
(
window_height
,
window_width
,
stride_y
,
stride_x
);
#else
// TODO
DLIB_CASSERT
(
false
,
""
);
#endif
}
bool
pooling
::
does_max_pooling
(
)
const
{
#ifdef DLIB_USE_CUDA
return
impl
.
does_max_pooling
();
#else
// TODO
DLIB_CASSERT
(
false
,
""
);
#endif
}
void
pooling
::
operator
()
(
resizable_tensor
&
dest
,
const
tensor
&
src
)
{
#ifdef DLIB_USE_CUDA
impl
(
dest
,
src
);
#else
// TODO
DLIB_CASSERT
(
false
,
""
);
#endif
}
void
pooling
::
get_gradient
(
const
tensor
&
gradient_input
,
const
tensor
&
dest
,
const
tensor
&
src
,
tensor
&
grad
)
{
#ifdef DLIB_USE_CUDA
impl
.
get_gradient
(
gradient_input
,
dest
,
src
,
grad
);
#else
// TODO
DLIB_CASSERT
(
false
,
""
);
#endif
}
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
...
dlib/dnn/tensor_tools.h
View file @
368d6d19
...
@@ -595,6 +595,9 @@ namespace dlib { namespace tt
...
@@ -595,6 +595,9 @@ namespace dlib { namespace tt
class
pooling
class
pooling
{
{
/*!
/*!
WHAT THIS OBJECT REPRESENTS
The pooling object is a tool for performing spatial pooling over a tensor.
It can be configured to do either max or average pooling.
!*/
!*/
public
:
public
:
...
@@ -602,35 +605,56 @@ namespace dlib { namespace tt
...
@@ -602,35 +605,56 @@ namespace dlib { namespace tt
pooling
&
operator
=
(
const
pooling
&
)
=
delete
;
pooling
&
operator
=
(
const
pooling
&
)
=
delete
;
pooling
(
pooling
(
);
)
=
default
;
void
clear
(
void
clear
(
)
;
)
{
impl
.
clear
();
}
void
setup_max_pooling
(
void
setup_max_pooling
(
int
window_height
,
int
window_height
,
int
window_width
,
int
window_width
,
int
stride_y
,
int
stride_y
,
int
stride_x
int
stride_x
);
)
{
impl
.
setup_max_pooling
(
window_height
,
window_width
,
stride_y
,
stride_x
);
}
/*!
requires
- window_height > 0
- window_width > 0
- stride_y > 0
- stride_x > 0
ensures
- When you call operator() it will do max pooling with the given
parameters.
!*/
void
setup_avg_pooling
(
void
setup_avg_pooling
(
int
window_height
,
int
window_height
,
int
window_width
,
int
window_width
,
int
stride_y
,
int
stride_y
,
int
stride_x
int
stride_x
);
)
{
impl
.
setup_avg_pooling
(
window_height
,
window_width
,
stride_y
,
stride_x
);
}
/*!
requires
- window_height > 0
- window_width > 0
- stride_y > 0
- stride_x > 0
ensures
- When you call operator() it will do average pooling with the given
parameters.
!*/
bool
does_max_pooling
(
bool
does_max_pooling
(
)
const
;
)
const
{
return
impl
.
does_max_pooling
();
}
void
operator
()
(
void
operator
()
(
resizable_tensor
&
dest
,
resizable_tensor
&
dest
,
const
tensor
&
src
const
tensor
&
src
)
;
)
{
impl
(
dest
,
src
);
}
/*!
/*!
requires
requires
- is_same_object(dest,src) == false
- is_same_object(dest,src) == false
- either setup_max_pooling() or setup_avg_pooling() has been called.
ensures
ensures
- #dest.num_samples() == src.num_samples()
- #dest.num_samples() == src.num_samples()
- #dest.k() == src.k()
- #dest.k() == src.k()
...
@@ -656,7 +680,7 @@ namespace dlib { namespace tt
...
@@ -656,7 +680,7 @@ namespace dlib { namespace tt
const
tensor
&
dest
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
src
,
tensor
&
grad
tensor
&
grad
)
;
)
{
impl
.
get_gradient
(
gradient_input
,
dest
,
src
,
grad
);
}
/*!
/*!
requires
requires
- have_same_dimensions(gradient_input,dest) == true
- have_same_dimensions(gradient_input,dest) == true
...
@@ -676,7 +700,7 @@ namespace dlib { namespace tt
...
@@ -676,7 +700,7 @@ namespace dlib { namespace tt
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
cuda
::
pooling
impl
;
cuda
::
pooling
impl
;
#else
#else
// TODO
cpu
::
pooling
impl
;
#endif
#endif
};
};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment