Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
15b2d7b5
Commit
15b2d7b5
authored
May 22, 2016
by
Davis King
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added get_learning_rate_multiplier() and get_weight_decay_multiplier() global
functions.
parent
58496f9f
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
71 additions
and
4 deletions
+71
-4
algs.h
dlib/algs.h
+7
-0
core.h
dlib/dnn/core.h
+38
-4
core_abstract.h
dlib/dnn/core_abstract.h
+26
-0
No files found.
dlib/algs.h
View file @
15b2d7b5
...
...
@@ -488,6 +488,13 @@ namespace dlib
// ----------------------------------------------------------------------------------------
struct
general_
{};
struct
special_
:
general_
{};
template
<
typename
>
struct
int_
{
typedef
int
type
;
};
// ----------------------------------------------------------------------------------------
/*!A is_same_object
This is a templated function which checks if both of its arguments are actually
...
...
dlib/dnn/core.h
View file @
15b2d7b5
...
...
@@ -24,6 +24,38 @@
namespace
dlib
{
// ----------------------------------------------------------------------------------------
namespace
impl
{
template
<
typename
T
,
typename
int_
<
decltype
(
&
T
::
get_learning_rate_multiplier
)
>::
type
=
0
>
double
get_learning_rate_multiplier
(
const
T
&
obj
,
special_
)
{
return
obj
.
get_learning_rate_multiplier
();
}
template
<
typename
T
>
double
get_learning_rate_multiplier
(
const
T
&
obj
,
general_
)
{
return
1
;
}
}
template
<
typename
T
>
double
get_learning_rate_multiplier
(
const
T
&
obj
)
{
return
impl
::
get_learning_rate_multiplier
(
obj
,
special_
());
}
// ----------------------------------------------------------------------------------------
namespace
impl
{
template
<
typename
T
,
typename
int_
<
decltype
(
&
T
::
get_weight_decay_multiplier
)
>::
type
=
0
>
double
get_weight_decay_multiplier
(
const
T
&
obj
,
special_
)
{
return
obj
.
get_weight_decay_multiplier
();
}
template
<
typename
T
>
double
get_weight_decay_multiplier
(
const
T
&
obj
,
general_
)
{
return
1
;
}
}
template
<
typename
T
>
double
get_weight_decay_multiplier
(
const
T
&
obj
)
{
return
impl
::
get_weight_decay_multiplier
(
obj
,
special_
());
}
// ----------------------------------------------------------------------------------------
namespace
impl
...
...
@@ -849,8 +881,9 @@ namespace dlib
void
update_parameters
(
sstack
<
solver_type
>
solvers
,
double
learning_rate
)
{
DLIB_CASSERT
(
solvers
.
size
()
>=
num_computational_layers
,
""
);
// Don't try to adjust the parameters if this layer doesn't have any.
if
(
params_grad
.
size
()
!=
0
)
// Don't try to adjust the parameters if this layer doesn't have any or the
// learning rate is disabled for this layer.
if
(
params_grad
.
size
()
!=
0
&&
get_learning_rate_multiplier
(
details
)
!=
0
)
{
const
tensor
&
step
=
solvers
.
top
()(
learning_rate
,
details
,
static_cast
<
const
tensor
&>
(
params_grad
));
tt
::
add
(
details
.
get_layer_params
(),
details
.
get_layer_params
(),
step
);
...
...
@@ -1200,8 +1233,9 @@ namespace dlib
void
update_parameters
(
sstack
<
solver_type
>
solvers
,
double
learning_rate
)
{
DLIB_CASSERT
(
solvers
.
size
()
>=
num_computational_layers
,
""
);
// Don't try to adjust the parameters if this layer doesn't have any.
if
(
params_grad
.
size
()
!=
0
)
// Don't try to adjust the parameters if this layer doesn't have any or the
// learning rate is disabled for this layer.
if
(
params_grad
.
size
()
!=
0
&&
get_learning_rate_multiplier
(
details
)
!=
0
)
{
const
tensor
&
step
=
solvers
.
top
()(
learning_rate
,
details
,
static_cast
<
const
tensor
&>
(
params_grad
));
tt
::
add
(
details
.
get_layer_params
(),
details
.
get_layer_params
(),
step
);
...
...
dlib/dnn/core_abstract.h
View file @
15b2d7b5
...
...
@@ -67,6 +67,32 @@ namespace dlib
(except computes it using a numerically accurate method)
!*/
// ----------------------------------------------------------------------------------------
template
<
typename
T
>
double
get_learning_rate_multiplier
(
const
T
&
obj
);
/*!
ensures
- if (obj has a get_learning_rate_multiplier() member function) then
- returns obj.get_learning_rate_multiplier()
- else
- returns 1
!*/
template
<
typename
T
>
double
get_weight_decay_multiplier
(
const
T
&
obj
);
/*!
ensures
- if (obj has a get_weight_decay_multiplier() member function) then
- returns obj.get_weight_decay_multiplier()
- else
- returns 1
!*/
// ----------------------------------------------------------------------------------------
bool
dnn_prefer_fastest_algorithms
(
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment