Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
5039f0ba
Commit
5039f0ba
authored
Nov 13, 2015
by
Davis King
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Changed the API for functions that can operate in-place to a more appropriate
form.
parent
c1433b3d
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
90 additions
and
121 deletions
+90
-121
cudnn_dlibapi.cpp
dlib/dnn/cudnn_dlibapi.cpp
+25
-31
cudnn_dlibapi.h
dlib/dnn/cudnn_dlibapi.h
+25
-38
tensor_tools.cpp
dlib/dnn/tensor_tools.cpp
+15
-14
tensor_tools.h
dlib/dnn/tensor_tools.h
+25
-38
No files found.
dlib/dnn/cudnn_dlibapi.cpp
View file @
5039f0ba
...
@@ -639,24 +639,24 @@ namespace dlib
...
@@ -639,24 +639,24 @@ namespace dlib
void
softmax_gradient
(
void
softmax_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
softmaxed_data
,
const
tensor
&
dest
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
)
)
{
{
DLIB_CASSERT
(
DLIB_CASSERT
(
have_same_dimensions
(
softmaxed_data
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
softmaxed_data
,
grad
)
==
true
,
""
);
have_same_dimensions
(
dest
,
grad
)
==
true
,
""
);
if
(
softmaxed_data
.
size
()
==
0
)
if
(
dest
.
size
()
==
0
)
return
;
return
;
const
float
alpha
=
1
;
const
float
alpha
=
1
;
const
float
beta
=
1
;
const
float
beta
=
0
;
check
(
cudnnSoftmaxBackward
(
context
(),
check
(
cudnnSoftmaxBackward
(
context
(),
CUDNN_SOFTMAX_ACCURATE
,
CUDNN_SOFTMAX_ACCURATE
,
CUDNN_SOFTMAX_MODE_CHANNEL
,
CUDNN_SOFTMAX_MODE_CHANNEL
,
&
alpha
,
&
alpha
,
descriptor
(
softmaxed_data
),
descriptor
(
dest
),
softmaxed_data
.
device
(),
dest
.
device
(),
descriptor
(
gradient_input
),
descriptor
(
gradient_input
),
gradient_input
.
device
(),
gradient_input
.
device
(),
&
beta
,
&
beta
,
...
@@ -691,19 +691,17 @@ namespace dlib
...
@@ -691,19 +691,17 @@ namespace dlib
void
sigmoid_gradient
(
void
sigmoid_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
)
)
{
{
DLIB_CASSERT
(
DLIB_CASSERT
(
have_same_dimensions
(
src
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
src
,
grad
)
==
true
&&
have_same_dimensions
(
dest
,
grad
)
==
true
,
""
);
have_same_dimensions
(
src
,
dest
)
==
true
,
""
);
if
(
dest
.
size
()
==
0
)
if
(
src
.
size
()
==
0
)
return
;
return
;
const
float
alpha
=
1
;
const
float
alpha
=
1
;
const
float
beta
=
1
;
const
float
beta
=
0
;
check
(
cudnnActivationBackward
(
context
(),
check
(
cudnnActivationBackward
(
context
(),
CUDNN_ACTIVATION_SIGMOID
,
CUDNN_ACTIVATION_SIGMOID
,
&
alpha
,
&
alpha
,
...
@@ -711,8 +709,8 @@ namespace dlib
...
@@ -711,8 +709,8 @@ namespace dlib
dest
.
device
(),
dest
.
device
(),
descriptor
(
gradient_input
),
descriptor
(
gradient_input
),
gradient_input
.
device
(),
gradient_input
.
device
(),
descriptor
(
src
),
descriptor
(
dest
),
src
.
device
(),
dest
.
device
(),
&
beta
,
&
beta
,
descriptor
(
grad
),
descriptor
(
grad
),
grad
.
device
()));
grad
.
device
()));
...
@@ -744,19 +742,17 @@ namespace dlib
...
@@ -744,19 +742,17 @@ namespace dlib
void
relu_gradient
(
void
relu_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
)
)
{
{
DLIB_CASSERT
(
DLIB_CASSERT
(
have_same_dimensions
(
src
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
src
,
grad
)
==
true
&&
have_same_dimensions
(
dest
,
grad
)
==
true
,
""
);
have_same_dimensions
(
src
,
dest
)
==
true
,
""
);
if
(
dest
.
size
()
==
0
)
if
(
src
.
size
()
==
0
)
return
;
return
;
const
float
alpha
=
1
;
const
float
alpha
=
1
;
const
float
beta
=
1
;
const
float
beta
=
0
;
check
(
cudnnActivationBackward
(
context
(),
check
(
cudnnActivationBackward
(
context
(),
CUDNN_ACTIVATION_RELU
,
CUDNN_ACTIVATION_RELU
,
&
alpha
,
&
alpha
,
...
@@ -764,8 +760,8 @@ namespace dlib
...
@@ -764,8 +760,8 @@ namespace dlib
dest
.
device
(),
dest
.
device
(),
descriptor
(
gradient_input
),
descriptor
(
gradient_input
),
gradient_input
.
device
(),
gradient_input
.
device
(),
descriptor
(
src
),
descriptor
(
dest
),
src
.
device
(),
dest
.
device
(),
&
beta
,
&
beta
,
descriptor
(
grad
),
descriptor
(
grad
),
grad
.
device
()));
grad
.
device
()));
...
@@ -797,19 +793,17 @@ namespace dlib
...
@@ -797,19 +793,17 @@ namespace dlib
void
tanh_gradient
(
void
tanh_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
)
)
{
{
DLIB_CASSERT
(
DLIB_CASSERT
(
have_same_dimensions
(
src
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
src
,
grad
)
==
true
&&
have_same_dimensions
(
dest
,
grad
)
==
true
,
""
);
have_same_dimensions
(
src
,
dest
)
==
true
,
""
);
if
(
dest
.
size
()
==
0
)
if
(
src
.
size
()
==
0
)
return
;
return
;
const
float
alpha
=
1
;
const
float
alpha
=
1
;
const
float
beta
=
1
;
const
float
beta
=
0
;
check
(
cudnnActivationBackward
(
context
(),
check
(
cudnnActivationBackward
(
context
(),
CUDNN_ACTIVATION_TANH
,
CUDNN_ACTIVATION_TANH
,
&
alpha
,
&
alpha
,
...
@@ -817,8 +811,8 @@ namespace dlib
...
@@ -817,8 +811,8 @@ namespace dlib
dest
.
device
(),
dest
.
device
(),
descriptor
(
gradient_input
),
descriptor
(
gradient_input
),
gradient_input
.
device
(),
gradient_input
.
device
(),
descriptor
(
src
),
descriptor
(
dest
),
src
.
device
(),
dest
.
device
(),
&
beta
,
&
beta
,
descriptor
(
grad
),
descriptor
(
grad
),
grad
.
device
()));
grad
.
device
()));
...
...
dlib/dnn/cudnn_dlibapi.h
View file @
5039f0ba
...
@@ -346,19 +346,18 @@ namespace dlib
...
@@ -346,19 +346,18 @@ namespace dlib
void
softmax_gradient
(
void
softmax_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
softmaxed_data
,
const
tensor
&
dest
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
);
);
/*!
/*!
requires
requires
- have_same_dimensions(
softmaxed_data
,gradient_input) == true
- have_same_dimensions(
dest
,gradient_input) == true
- have_same_dimensions(
softmaxed_data
,grad) == true
- have_same_dimensions(
dest
,grad) == true
- is_same_object(grad,
softmaxed_data
)==false
- is_same_object(grad,
dest
)==false
ensures
ensures
- We interpret softmaxed_data as the output of softmax(softmaxed_data,SRC)
- We interpret dest as the output of softmax(dest,SRC) for some SRC tensor.
for some SRC tensor. Then let f(SRC) == dot(gradient_input,softmaxed_data)
Then let f(SRC) == dot(gradient_input,dest) Then this function computes
Then this function computes the gradient of f() with respect to SRC and
the gradient of f() with respect to SRC and assigns it to grad.
adds it to grad.
- This function supports in-place operation, i.e. having
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
is_same_object(grad, gradient_input)==true
!*/
!*/
...
@@ -381,22 +380,18 @@ namespace dlib
...
@@ -381,22 +380,18 @@ namespace dlib
void
sigmoid_gradient
(
void
sigmoid_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
);
);
/*!
/*!
requires
requires
- have_same_dimensions(src,gradient_input) == true
- have_same_dimensions(dest,gradient_input) == true
- have_same_dimensions(src,grad) == true
- have_same_dimensions(dest,grad) == true
- have_same_dimensions(src,dest) == true
- dest contains the result of calling sigmoid(dest,src)
- is_same_object(grad,src) == false
- is_same_object(grad,dest) == false
- is_same_object(grad,dest) == false
ensures
ensures
- Recalling that dest is the output of sigmoid(dest,
src)
,
- Recalling that dest is the output of sigmoid(dest,
SRC) for some SRC tensor
,
let f(
src
) == dot(gradient_input,dest)
let f(
SRC
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
src
and
- Then this function computes the gradient of f() with respect to
SRC
and
a
dd
s it to grad.
a
ssign
s it to grad.
- This function supports in-place operation, i.e. having
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
is_same_object(grad, gradient_input)==true
!*/
!*/
...
@@ -419,22 +414,18 @@ namespace dlib
...
@@ -419,22 +414,18 @@ namespace dlib
void
relu_gradient
(
void
relu_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
);
);
/*!
/*!
requires
requires
- have_same_dimensions(src,gradient_input) == true
- have_same_dimensions(dest,gradient_input) == true
- have_same_dimensions(src,grad) == true
- have_same_dimensions(dest,grad) == true
- have_same_dimensions(src,dest) == true
- dest contains the result of calling relu(dest,src)
- is_same_object(grad,src) == false
- is_same_object(grad,dest) == false
- is_same_object(grad,dest) == false
ensures
ensures
- Recalling that dest is the output of relu(dest,
src)
,
- Recalling that dest is the output of relu(dest,
SRC) for some SRC tensor
,
let f(
src
) == dot(gradient_input,dest)
let f(
SRC
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
src
and
- Then this function computes the gradient of f() with respect to
SRC
and
a
dd
s it to grad.
a
ssign
s it to grad.
- This function supports in-place operation, i.e. having
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
is_same_object(grad, gradient_input)==true
!*/
!*/
...
@@ -457,22 +448,18 @@ namespace dlib
...
@@ -457,22 +448,18 @@ namespace dlib
void
tanh_gradient
(
void
tanh_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
);
);
/*!
/*!
requires
requires
- have_same_dimensions(src,gradient_input) == true
- have_same_dimensions(dest,gradient_input) == true
- have_same_dimensions(src,grad) == true
- have_same_dimensions(dest,grad) == true
- have_same_dimensions(src,dest) == true
- dest contains the result of calling tanh(dest,src)
- is_same_object(grad,src) == false
- is_same_object(grad,dest) == false
- is_same_object(grad,dest) == false
ensures
ensures
- Recalling that dest is the output of tanh(dest,
src)
,
- Recalling that dest is the output of tanh(dest,
SRC) for some SRC tensor
,
let f(
src
) == dot(gradient_input,dest)
let f(
SRC
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
src
and
- Then this function computes the gradient of f() with respect to
SRC
and
a
dd
s it to grad.
a
ssign
s it to grad.
- This function supports in-place operation, i.e. having
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
is_same_object(grad, gradient_input)==true
!*/
!*/
...
...
dlib/dnn/tensor_tools.cpp
View file @
5039f0ba
...
@@ -86,7 +86,7 @@ namespace dlib { namespace tt
...
@@ -86,7 +86,7 @@ namespace dlib { namespace tt
{
{
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
src
)
==
true
,
""
);
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
src
)
==
true
,
""
);
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
cuda
::
multiply
(
dest
,
src
);
//
cuda::multiply(dest, src);
#else
#else
cpu
::
multiply
(
dest
,
src
);
cpu
::
multiply
(
dest
,
src
);
#endif
#endif
...
@@ -103,7 +103,7 @@ namespace dlib { namespace tt
...
@@ -103,7 +103,7 @@ namespace dlib { namespace tt
)
)
{
{
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
cuda
::
affine_transform
(
dest
,
src
,
A
,
B
);
//
cuda::affine_transform(dest,src,A,B);
#else
#else
cpu
::
affine_transform
(
dest
,
src
,
A
,
B
);
cpu
::
affine_transform
(
dest
,
src
,
A
,
B
);
#endif
#endif
...
@@ -119,7 +119,7 @@ namespace dlib { namespace tt
...
@@ -119,7 +119,7 @@ namespace dlib { namespace tt
)
)
{
{
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
cuda
::
affine_transform
(
dest
,
src
,
A
,
B
);
//
cuda::affine_transform(dest,src,A,B);
#else
#else
cpu
::
affine_transform
(
dest
,
src
,
A
,
B
);
cpu
::
affine_transform
(
dest
,
src
,
A
,
B
);
#endif
#endif
...
@@ -137,7 +137,7 @@ namespace dlib { namespace tt
...
@@ -137,7 +137,7 @@ namespace dlib { namespace tt
)
)
{
{
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
cuda
::
batch_normalize
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
//
cuda::batch_normalize(dest,means,vars,src,gamma,beta);
#else
#else
cpu
::
batch_normalize
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
cpu
::
batch_normalize
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
#endif
#endif
...
@@ -157,8 +157,10 @@ namespace dlib { namespace tt
...
@@ -157,8 +157,10 @@ namespace dlib { namespace tt
)
)
{
{
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
/*
cuda::batch_normalize_gradient(gradient_input,means,vars,src,gamma,
cuda::batch_normalize_gradient(gradient_input,means,vars,src,gamma,
src_grad,gamma_grad,beta_grad);
src_grad,gamma_grad,beta_grad);
*/
#else
#else
cpu
::
batch_normalize_gradient
(
gradient_input
,
means
,
vars
,
src
,
gamma
,
cpu
::
batch_normalize_gradient
(
gradient_input
,
means
,
vars
,
src
,
gamma
,
src_grad
,
gamma_grad
,
beta_grad
);
src_grad
,
gamma_grad
,
beta_grad
);
...
@@ -177,7 +179,7 @@ namespace dlib { namespace tt
...
@@ -177,7 +179,7 @@ namespace dlib { namespace tt
)
)
{
{
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
cuda
::
batch_normalize_conv
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
//
cuda::batch_normalize_conv(dest,means,vars,src,gamma,beta);
#else
#else
cpu
::
batch_normalize_conv
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
cpu
::
batch_normalize_conv
(
dest
,
means
,
vars
,
src
,
gamma
,
beta
);
#endif
#endif
...
@@ -197,8 +199,10 @@ namespace dlib { namespace tt
...
@@ -197,8 +199,10 @@ namespace dlib { namespace tt
)
)
{
{
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
/*
cuda::batch_normalize_conv_gradient(gradient_input,means,vars,src,gamma,
cuda::batch_normalize_conv_gradient(gradient_input,means,vars,src,gamma,
src_grad,gamma_grad,beta_grad);
src_grad,gamma_grad,beta_grad);
*/
#else
#else
cpu
::
batch_normalize_conv_gradient
(
gradient_input
,
means
,
vars
,
src
,
gamma
,
cpu
::
batch_normalize_conv_gradient
(
gradient_input
,
means
,
vars
,
src
,
gamma
,
src_grad
,
gamma_grad
,
beta_grad
);
src_grad
,
gamma_grad
,
beta_grad
);
...
@@ -213,7 +217,7 @@ namespace dlib { namespace tt
...
@@ -213,7 +217,7 @@ namespace dlib { namespace tt
)
)
{
{
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
cuda
::
threshold
(
data
,
thresh
);
//
cuda::threshold(data,thresh);
#else
#else
cpu
::
threshold
(
data
,
thresh
);
cpu
::
threshold
(
data
,
thresh
);
#endif
#endif
...
@@ -417,12 +421,12 @@ namespace dlib { namespace tt
...
@@ -417,12 +421,12 @@ namespace dlib { namespace tt
void
softmax_gradient
(
void
softmax_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
softmaxed_data
,
const
tensor
&
dest
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
)
)
{
{
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
cuda
::
softmax_gradient
(
grad
,
softmaxed_data
,
gradient_input
);
cuda
::
softmax_gradient
(
grad
,
dest
,
gradient_input
);
#else
#else
// TODO
// TODO
DLIB_CASSERT
(
false
,
""
);
DLIB_CASSERT
(
false
,
""
);
...
@@ -447,12 +451,11 @@ namespace dlib { namespace tt
...
@@ -447,12 +451,11 @@ namespace dlib { namespace tt
void
sigmoid_gradient
(
void
sigmoid_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
)
)
{
{
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
cuda
::
sigmoid_gradient
(
grad
,
dest
,
src
,
gradient_input
);
cuda
::
sigmoid_gradient
(
grad
,
dest
,
gradient_input
);
#else
#else
// TODO
// TODO
DLIB_CASSERT
(
false
,
""
);
DLIB_CASSERT
(
false
,
""
);
...
@@ -477,12 +480,11 @@ namespace dlib { namespace tt
...
@@ -477,12 +480,11 @@ namespace dlib { namespace tt
void
relu_gradient
(
void
relu_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
)
)
{
{
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
cuda
::
relu_gradient
(
grad
,
dest
,
src
,
gradient_input
);
cuda
::
relu_gradient
(
grad
,
dest
,
gradient_input
);
#else
#else
// TODO
// TODO
DLIB_CASSERT
(
false
,
""
);
DLIB_CASSERT
(
false
,
""
);
...
@@ -507,12 +509,11 @@ namespace dlib { namespace tt
...
@@ -507,12 +509,11 @@ namespace dlib { namespace tt
void
tanh_gradient
(
void
tanh_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
)
)
{
{
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
cuda
::
tanh_gradient
(
grad
,
dest
,
src
,
gradient_input
);
cuda
::
tanh_gradient
(
grad
,
dest
,
gradient_input
);
#else
#else
// TODO
// TODO
DLIB_CASSERT
(
false
,
""
);
DLIB_CASSERT
(
false
,
""
);
...
...
dlib/dnn/tensor_tools.h
View file @
5039f0ba
...
@@ -519,19 +519,18 @@ namespace dlib { namespace tt
...
@@ -519,19 +519,18 @@ namespace dlib { namespace tt
void
softmax_gradient
(
void
softmax_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
softmaxed_data
,
const
tensor
&
dest
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
);
);
/*!
/*!
requires
requires
- have_same_dimensions(
softmaxed_data
,gradient_input) == true
- have_same_dimensions(
dest
,gradient_input) == true
- have_same_dimensions(
softmaxed_data
,grad) == true
- have_same_dimensions(
dest
,grad) == true
- is_same_object(grad,
softmaxed_data
)==false
- is_same_object(grad,
dest
)==false
ensures
ensures
- We interpret softmaxed_data as the output of softmax(softmaxed_data,SRC) for
- We interpret dest as the output of softmax(dest,SRC) for some SRC tensor.
some SRC tensor. Then let f(SRC) == dot(gradient_input,softmaxed_data) Then
Then let f(SRC) == dot(gradient_input,dest) Then this function computes the
this function computes the gradient of f() with respect to SRC and adds it to
gradient of f() with respect to SRC and adds it to grad.
grad.
- This function supports in-place operation, i.e. having
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
is_same_object(grad, gradient_input)==true
!*/
!*/
...
@@ -554,22 +553,18 @@ namespace dlib { namespace tt
...
@@ -554,22 +553,18 @@ namespace dlib { namespace tt
void
sigmoid_gradient
(
void
sigmoid_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
);
);
/*!
/*!
requires
requires
- have_same_dimensions(src,gradient_input) == true
- have_same_dimensions(dest,gradient_input) == true
- have_same_dimensions(src,grad) == true
- have_same_dimensions(dest,grad) == true
- have_same_dimensions(src,dest) == true
- dest contains the result of calling sigmoid(dest,src)
- is_same_object(grad,src) == false
- is_same_object(grad,dest) == false
- is_same_object(grad,dest) == false
ensures
ensures
- Recalling that dest is the output of sigmoid(dest,
src)
,
- Recalling that dest is the output of sigmoid(dest,
SRC) for some SRC tensor
,
let f(
src
) == dot(gradient_input,dest)
let f(
SRC
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
src
and
- Then this function computes the gradient of f() with respect to
SRC
and
a
dd
s it to grad.
a
ssign
s it to grad.
- This function supports in-place operation, i.e. having
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
is_same_object(grad, gradient_input)==true
!*/
!*/
...
@@ -592,22 +587,18 @@ namespace dlib { namespace tt
...
@@ -592,22 +587,18 @@ namespace dlib { namespace tt
void
relu_gradient
(
void
relu_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
);
);
/*!
/*!
requires
requires
- have_same_dimensions(src,gradient_input) == true
- have_same_dimensions(dest,gradient_input) == true
- have_same_dimensions(src,grad) == true
- have_same_dimensions(dest,grad) == true
- have_same_dimensions(src,dest) == true
- dest contains the result of calling relu(dest,src)
- is_same_object(grad,src) == false
- is_same_object(grad,dest) == false
- is_same_object(grad,dest) == false
ensures
ensures
- Recalling that dest is the output of relu(dest,
src)
,
- Recalling that dest is the output of relu(dest,
SRC) for some SRC tensor
,
let f(
src
) == dot(gradient_input,dest)
let f(
SRC
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
src and adds
- Then this function computes the gradient of f() with respect to
SRC and
it to grad.
assigns
it to grad.
- This function supports in-place operation, i.e. having
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
is_same_object(grad, gradient_input)==true
!*/
!*/
...
@@ -630,22 +621,18 @@ namespace dlib { namespace tt
...
@@ -630,22 +621,18 @@ namespace dlib { namespace tt
void
tanh_gradient
(
void
tanh_gradient
(
tensor
&
grad
,
tensor
&
grad
,
const
tensor
&
dest
,
const
tensor
&
dest
,
const
tensor
&
src
,
const
tensor
&
gradient_input
const
tensor
&
gradient_input
);
);
/*!
/*!
requires
requires
- have_same_dimensions(src,gradient_input) == true
- have_same_dimensions(dest,gradient_input) == true
- have_same_dimensions(src,grad) == true
- have_same_dimensions(dest,grad) == true
- have_same_dimensions(src,dest) == true
- dest contains the result of calling tanh(dest,src)
- is_same_object(grad,src) == false
- is_same_object(grad,dest) == false
- is_same_object(grad,dest) == false
ensures
ensures
- Recalling that dest is the output of tanh(dest,
src)
,
- Recalling that dest is the output of tanh(dest,
SRC) for some SRC tensor
,
let f(
src
) == dot(gradient_input,dest)
let f(
SRC
) == dot(gradient_input,dest)
- Then this function computes the gradient of f() with respect to
src and adds
- Then this function computes the gradient of f() with respect to
SRC and
it to grad.
assigns
it to grad.
- This function supports in-place operation, i.e. having
- This function supports in-place operation, i.e. having
is_same_object(grad, gradient_input)==true
is_same_object(grad, gradient_input)==true
!*/
!*/
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment