Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
c627898e
Commit
c627898e
authored
Dec 24, 2015
by
Davis King
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fixed the tag and skip layers so they compile now that we have the
in-place/out-of-place logic present.
parent
7bb7f8a2
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
89 additions
and
1 deletion
+89
-1
core.h
dlib/dnn/core.h
+89
-1
No files found.
dlib/dnn/core.h
View file @
c627898e
...
@@ -487,6 +487,10 @@ namespace dlib
...
@@ -487,6 +487,10 @@ namespace dlib
friend
class
add_layer
;
friend
class
add_layer
;
template
<
typename
T
,
bool
is_first
,
typename
E
>
template
<
typename
T
,
bool
is_first
,
typename
E
>
friend
class
dimpl
::
subnet_wrapper
;
friend
class
dimpl
::
subnet_wrapper
;
template
<
unsigned
long
T
,
typename
U
,
typename
E
>
friend
class
add_tag_layer
;
template
<
template
<
typename
>
class
T
,
typename
U
>
friend
class
add_skip_layer
;
// Allow copying networks from one to another as long as their corresponding
// Allow copying networks from one to another as long as their corresponding
// layers can be constructed from each other.
// layers can be constructed from each other.
...
@@ -714,7 +718,7 @@ namespace dlib
...
@@ -714,7 +718,7 @@ namespace dlib
)
)
{
{
// This layer can run in-place if it's an in-place capable layer and also if
// This layer can run in-place if it's an in-place capable layer and also if
// the layer it's on top of doesn't need it
'
s own output tensor (since in-place
// the layer it's on top of doesn't need its own output tensor (since in-place
// layers overwrite that tensor)
// layers overwrite that tensor)
return
impl
::
is_inplace_layer
(
details
,
subnetwork
)
&&
!
subnetwork
.
this_layer_requires_forward_output
();
return
impl
::
is_inplace_layer
(
details
,
subnetwork
)
&&
!
subnetwork
.
this_layer_requires_forward_output
();
}
}
...
@@ -787,6 +791,10 @@ namespace dlib
...
@@ -787,6 +791,10 @@ namespace dlib
friend
class
add_layer
;
friend
class
add_layer
;
template
<
typename
T
,
bool
is_first
,
typename
E
>
template
<
typename
T
,
bool
is_first
,
typename
E
>
friend
class
dimpl
::
subnet_wrapper
;
friend
class
dimpl
::
subnet_wrapper
;
template
<
unsigned
long
T
,
typename
U
,
typename
E
>
friend
class
add_tag_layer
;
template
<
template
<
typename
>
class
T
,
typename
U
>
friend
class
add_skip_layer
;
// Allow copying networks from one to another as long as their corresponding
// Allow copying networks from one to another as long as their corresponding
// layers can be constructed from each other.
// layers can be constructed from each other.
...
@@ -1157,6 +1165,36 @@ namespace dlib
...
@@ -1157,6 +1165,36 @@ namespace dlib
private
:
private
:
template
<
typename
T
,
typename
U
,
typename
E
>
friend
class
add_layer
;
template
<
typename
T
,
bool
is_first
,
typename
E
>
friend
class
dimpl
::
subnet_wrapper
;
template
<
unsigned
long
T
,
typename
U
,
typename
E
>
friend
class
add_tag_layer
;
template
<
template
<
typename
>
class
T
,
typename
U
>
friend
class
add_skip_layer
;
// You woudln't put a tag on a layer if you didn't want to access its forward
// outputs. So this is always true.
bool
this_layer_requires_forward_output
(
)
{
return
true
;
}
void
disable_output_and_gradient_getters
(
)
{
// This should never happen because only inplace layers call
// disable_output_and_gradient_getters(), however, putting a tag layer right
// before an inplace layer basically means you don't want the following layer
// to operate in place. So the inplace layer should turn itself into an
// out-of-place layer and not call disable_output_and_gradient_getters().
DLIB_CASSERT
(
false
,
"This should never happen"
);
}
tensor
&
private_get_output
()
const
{
return
subnetwork
.
private_get_output
();
}
tensor
&
private_get_gradient_input
()
{
return
subnetwork
.
private_get_gradient_input
();
}
subnet_type
subnetwork
;
subnet_type
subnetwork
;
};
};
...
@@ -1278,6 +1316,36 @@ namespace dlib
...
@@ -1278,6 +1316,36 @@ namespace dlib
private
:
private
:
template
<
typename
T
,
typename
U
,
typename
E
>
friend
class
add_layer
;
template
<
typename
T
,
bool
is_first
,
typename
E
>
friend
class
dimpl
::
subnet_wrapper
;
template
<
unsigned
long
T
,
typename
U
,
typename
E
>
friend
class
add_tag_layer
;
template
<
template
<
typename
>
class
T
,
typename
U
>
friend
class
add_skip_layer
;
// You woudln't put a tag on a layer if you didn't want to access its forward
// outputs. So this is always true.
bool
this_layer_requires_forward_output
(
)
{
return
true
;
}
void
disable_output_and_gradient_getters
(
)
{
// This should never happen because only inplace layers call
// disable_output_and_gradient_getters(), however, putting a tag layer right
// before an inplace layer basically means you don't want the following layer
// to operate in place. So the inplace layer should turn itself into an
// out-of-place layer and not call disable_output_and_gradient_getters().
DLIB_CASSERT
(
false
,
"This should never happen"
);
}
tensor
&
private_get_output
()
const
{
return
get_output
();
}
tensor
&
private_get_gradient_input
()
{
return
get_gradient_input
();
}
void
swap
(
add_tag_layer
&
item
)
void
swap
(
add_tag_layer
&
item
)
{
{
std
::
swap
(
input_layer
,
item
.
input_layer
);
std
::
swap
(
input_layer
,
item
.
input_layer
);
...
@@ -1775,6 +1843,26 @@ namespace dlib
...
@@ -1775,6 +1843,26 @@ namespace dlib
private
:
private
:
template
<
typename
T
,
typename
U
,
typename
E
>
friend
class
add_layer
;
template
<
typename
T
,
bool
is_first
,
typename
E
>
friend
class
dimpl
::
subnet_wrapper
;
template
<
unsigned
long
T
,
typename
U
,
typename
E
>
friend
class
add_tag_layer
;
template
<
template
<
typename
>
class
T
,
typename
U
>
friend
class
add_skip_layer
;
bool
this_layer_requires_forward_output
(
)
{
return
layer
<
TAG_TYPE
>
(
subnetwork
).
this_layer_requires_forward_output
();
}
void
disable_output_and_gradient_getters
(
)
{
layer
<
TAG_TYPE
>
(
subnetwork
).
disable_output_and_gradient_getters
();
}
tensor
&
private_get_output
()
const
{
return
layer
<
TAG_TYPE
>
(
subnetwork
).
private_get_output
();
}
tensor
&
private_get_gradient_input
()
{
return
layer
<
TAG_TYPE
>
(
subnetwork
).
private_get_gradient_input
();
}
subnet_type
subnetwork
;
subnet_type
subnetwork
;
};
};
template
<
template
<
typename
>
class
T
,
typename
U
>
template
<
template
<
typename
>
class
T
,
typename
U
>
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment