Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
fa812881
Commit
fa812881
authored
Sep 28, 2015
by
Davis King
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Just removed the _ from sub_net.
parent
31757a21
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
312 additions
and
312 deletions
+312
-312
core.h
dlib/dnn/core.h
+153
-153
core_abstract.h
dlib/dnn/core_abstract.h
+79
-79
layers.h
dlib/dnn/layers.h
+32
-32
layers_abstract.h
dlib/dnn/layers_abstract.h
+30
-30
loss.h
dlib/dnn/loss.h
+8
-8
loss_abstract.h
dlib/dnn/loss_abstract.h
+10
-10
No files found.
dlib/dnn/core.h
View file @
fa812881
...
...
@@ -94,68 +94,68 @@ namespace dlib
namespace
dimpl
{
template
<
typename
T
,
typename
enabled
=
void
>
class
sub
_
net_wrapper
class
subnet_wrapper
{
/*!
WHAT THIS OBJECT REPRESENTS
This is a tool that makes an add_layer or add_loss_layer object
expose only the part of its interface defined by the SUB
_
NET
expose only the part of its interface defined by the SUBNET
type in layers_abstract.h. This way, when we pass sub network
objects to the layer callbacks those callbacks won't be able to
interact with the sub networks in a way other than specified
by the SUB
_
NET interface spec.
by the SUBNET interface spec.
!*/
public
:
sub
_net_wrapper
(
const
sub_
net_wrapper
&
)
=
delete
;
sub
_net_wrapper
&
operator
=
(
const
sub_
net_wrapper
&
)
=
delete
;
sub
net_wrapper
(
const
sub
net_wrapper
&
)
=
delete
;
sub
net_wrapper
&
operator
=
(
const
sub
net_wrapper
&
)
=
delete
;
sub
_
net_wrapper
(
T
&
l_
)
{}
subnet_wrapper
(
T
&
l_
)
{}
// Nothing here because in this case T is one of the input layer types
// that doesn't have anything in it.
};
template
<
typename
T
>
class
sub
_
net_wrapper
<
T
,
typename
std
::
enable_if
<
is_nonloss_layer_type
<
T
>::
value
>::
type
>
class
subnet_wrapper
<
T
,
typename
std
::
enable_if
<
is_nonloss_layer_type
<
T
>::
value
>::
type
>
{
public
:
sub
_net_wrapper
(
const
sub_
net_wrapper
&
)
=
delete
;
sub
_net_wrapper
&
operator
=
(
const
sub_
net_wrapper
&
)
=
delete
;
sub
net_wrapper
(
const
sub
net_wrapper
&
)
=
delete
;
sub
net_wrapper
&
operator
=
(
const
sub
net_wrapper
&
)
=
delete
;
typedef
T
wrapped_type
;
const
static
size_t
num_layers
=
T
::
num_layers
;
sub
_net_wrapper
(
T
&
l_
)
:
l
(
l_
),
sub
(
l
.
sub_
net
())
{}
sub
net_wrapper
(
T
&
l_
)
:
l
(
l_
),
sub
(
l
.
sub
net
())
{}
const
tensor
&
get_output
()
const
{
return
l
.
get_output
();
}
tensor
&
get_gradient_input
()
{
return
l
.
get_gradient_input
();
}
const
sub
_net_wrapper
<
typename
T
::
sub_net_type
>&
sub_
net
()
const
{
sub
;
}
sub
_net_wrapper
<
typename
T
::
sub_net_type
>&
sub_
net
()
{
sub
;
}
const
sub
net_wrapper
<
typename
T
::
subnet_type
>&
sub
net
()
const
{
sub
;
}
sub
net_wrapper
<
typename
T
::
subnet_type
>&
sub
net
()
{
sub
;
}
private
:
T
&
l
;
sub
_net_wrapper
<
typename
T
::
sub_
net_type
>
sub
;
sub
net_wrapper
<
typename
T
::
sub
net_type
>
sub
;
};
}
template
<
typename
LAYER_DETAILS
,
typename
SUB
_
NET
,
typename
enabled
=
void
>
template
<
typename
LAYER_DETAILS
,
typename
SUBNET
,
typename
enabled
=
void
>
class
add_layer
;
template
<
typename
T
,
typename
U
>
struct
is_nonloss_layer_type
<
add_layer
<
T
,
U
>>
:
std
::
true_type
{};
template
<
typename
LAYER_DETAILS
,
typename
SUB
_
NET
>
class
add_layer
<
LAYER_DETAILS
,
SUB
_
NET
,
typename
std
::
enable_if
<
is_nonloss_layer_type
<
SUB
_
NET
>::
value
>::
type
>
template
<
typename
LAYER_DETAILS
,
typename
SUBNET
>
class
add_layer
<
LAYER_DETAILS
,
SUBNET
,
typename
std
::
enable_if
<
is_nonloss_layer_type
<
SUBNET
>::
value
>::
type
>
{
public
:
typedef
LAYER_DETAILS
layer_details_type
;
typedef
SUB
_NET
sub_
net_type
;
typedef
typename
sub
_
net_type
::
input_type
input_type
;
const
static
size_t
num_layers
=
sub
_
net_type
::
num_layers
+
1
;
const
static
unsigned
int
sample_expansion_factor
=
sub
_
net_type
::
sample_expansion_factor
;
typedef
SUB
NET
sub
net_type
;
typedef
typename
subnet_type
::
input_type
input_type
;
const
static
size_t
num_layers
=
subnet_type
::
num_layers
+
1
;
const
static
unsigned
int
sample_expansion_factor
=
subnet_type
::
sample_expansion_factor
;
add_layer
(
)
:
...
...
@@ -178,7 +178,7 @@ namespace dlib
add_layer
(
const
add_layer
<
T
,
U
,
E
>&
item
)
:
sub
_network
(
item
.
sub_
net
()),
sub
network
(
item
.
sub
net
()),
details
(
item
.
layer_details
()),
this_layer_setup_called
(
item
.
this_layer_setup_called
),
gradient_input_is_stale
(
item
.
gradient_input_is_stale
),
...
...
@@ -193,7 +193,7 @@ namespace dlib
T
&&
...
args
)
:
details
(
layer_det
),
sub
_
network
(
std
::
forward
<
T
>
(
args
)...),
subnetwork
(
std
::
forward
<
T
>
(
args
)...),
this_layer_setup_called
(
false
),
gradient_input_is_stale
(
true
)
{
...
...
@@ -205,7 +205,7 @@ namespace dlib
T
&&
...
args
)
:
details
(
std
::
move
(
layer_det
)),
sub
_
network
(
std
::
forward
<
T
>
(
args
)...),
subnetwork
(
std
::
forward
<
T
>
(
args
)...),
this_layer_setup_called
(
false
),
gradient_input_is_stale
(
true
)
{
...
...
@@ -218,7 +218,7 @@ namespace dlib
resizable_tensor
&
data
)
const
{
sub
_
network
.
to_tensor
(
ibegin
,
iend
,
data
);
subnetwork
.
to_tensor
(
ibegin
,
iend
,
data
);
}
template
<
typename
input_iterator
>
...
...
@@ -247,8 +247,8 @@ namespace dlib
const
tensor
&
forward
(
const
tensor
&
x
)
{
sub
_
network
.
forward
(
x
);
const
dimpl
::
sub
_net_wrapper
<
sub_net_type
>
wsub
(
sub_
network
);
subnetwork
.
forward
(
x
);
const
dimpl
::
sub
net_wrapper
<
subnet_type
>
wsub
(
sub
network
);
if
(
!
this_layer_setup_called
)
{
details
.
setup
(
wsub
);
...
...
@@ -281,18 +281,18 @@ namespace dlib
to some loss.
!*/
{
dimpl
::
sub
_net_wrapper
<
sub_net_type
>
wsub
(
sub_
network
);
dimpl
::
sub
net_wrapper
<
subnet_type
>
wsub
(
sub
network
);
params_grad
.
copy_size
(
details
.
get_layer_params
());
params_grad
=
0
;
details
.
backward
(
get_gradient_input
(),
wsub
,
static_cast
<
tensor
&>
(
params_grad
));
// Don't try to adjust the parameters if this layer doesn't have any.
if
(
params_grad
.
size
()
!=
0
)
solvers
.
top
()(
details
,
static_cast
<
const
tensor
&>
(
params_grad
));
sub
_
network
.
update
(
x
,
solvers
.
pop
());
subnetwork
.
update
(
x
,
solvers
.
pop
());
}
const
sub
_net_type
&
sub_net
()
const
{
return
sub_
network
;
}
sub
_net_type
&
sub_net
()
{
return
sub_
network
;
}
const
sub
net_type
&
subnet
()
const
{
return
sub
network
;
}
sub
net_type
&
subnet
()
{
return
sub
network
;
}
const
layer_details_type
&
layer_details
()
const
{
return
details
;
}
layer_details_type
&
layer_details
()
{
return
details
;
}
...
...
@@ -304,13 +304,13 @@ namespace dlib
params_grad
.
clear
();
temp_tensor
.
clear
();
gradient_input_is_stale
=
true
;
sub
_
network
.
clean
();
subnetwork
.
clean
();
}
private
:
sub
_net_type
sub_
network
;
sub
net_type
sub
network
;
LAYER_DETAILS
details
;
bool
this_layer_setup_called
;
bool
gradient_input_is_stale
;
...
...
@@ -334,7 +334,7 @@ namespace dlib
{
public
:
typedef
LAYER_DETAILS
layer_details_type
;
typedef
INPUT_LAYER
sub
_
net_type
;
typedef
INPUT_LAYER
subnet_type
;
typedef
typename
INPUT_LAYER
::
input_type
input_type
;
const
static
unsigned
int
sample_expansion_factor
=
INPUT_LAYER
::
sample_expansion_factor
;
const
static
size_t
num_layers
=
1
;
...
...
@@ -361,7 +361,7 @@ namespace dlib
add_layer
(
const
add_layer
<
T
,
U
,
E
>&
item
)
:
input_layer
(
item
.
sub
_
net
()),
input_layer
(
item
.
subnet
()),
details
(
item
.
layer_details
()),
this_layer_setup_called
(
item
.
this_layer_setup_called
),
gradient_input_is_stale
(
item
.
gradient_input_is_stale
),
...
...
@@ -441,7 +441,7 @@ namespace dlib
!*/
{
DLIB_CASSERT
(
x
.
num_samples
()
%
sample_expansion_factor
==
0
,
""
);
sub
_
net_wrapper
wsub
(
x
,
grad_final_ignored
);
subnet_wrapper
wsub
(
x
,
grad_final_ignored
);
if
(
!
this_layer_setup_called
)
{
details
.
setup
(
wsub
);
...
...
@@ -474,7 +474,7 @@ namespace dlib
- x.num_samples() == get_gradient_input().num_samples()
!*/
{
sub
_
net_wrapper
wsub
(
x
,
grad_final_ignored
);
subnet_wrapper
wsub
(
x
,
grad_final_ignored
);
params_grad
.
copy_size
(
details
.
get_layer_params
());
params_grad
=
0
;
details
.
backward
(
get_gradient_input
(),
wsub
,
static_cast
<
tensor
&>
(
params_grad
));
...
...
@@ -483,8 +483,8 @@ namespace dlib
solvers
.
top
()(
details
,
static_cast
<
const
tensor
&>
(
params_grad
));
}
const
sub
_net_type
&
sub_
net
()
const
{
return
input_layer
;
}
sub
_net_type
&
sub_
net
()
{
return
input_layer
;
}
const
sub
net_type
&
sub
net
()
const
{
return
input_layer
;
}
sub
net_type
&
sub
net
()
{
return
input_layer
;
}
const
layer_details_type
&
layer_details
()
const
{
return
details
;
}
layer_details_type
&
layer_details
()
{
return
details
;
}
...
...
@@ -501,14 +501,14 @@ namespace dlib
private
:
class
sub
_
net_wrapper
class
subnet_wrapper
{
public
:
sub
_
net_wrapper
(
const
tensor
&
x_
,
resizable_tensor
&
grad_final_ignored_
)
:
subnet_wrapper
(
const
tensor
&
x_
,
resizable_tensor
&
grad_final_ignored_
)
:
x
(
x_
),
grad_final_ignored
(
grad_final_ignored_
)
{}
sub
_net_wrapper
(
const
sub_
net_wrapper
&
)
=
delete
;
sub
_net_wrapper
&
operator
=
(
const
sub_
net_wrapper
&
)
=
delete
;
sub
net_wrapper
(
const
sub
net_wrapper
&
)
=
delete
;
sub
net_wrapper
&
operator
=
(
const
sub
net_wrapper
&
)
=
delete
;
const
tensor
&
get_output
()
const
{
return
x
;
}
tensor
&
get_gradient_input
()
...
...
@@ -530,7 +530,7 @@ namespace dlib
resizable_tensor
&
grad_final_ignored
;
};
sub
_
net_type
input_layer
;
subnet_type
input_layer
;
LAYER_DETAILS
details
;
bool
this_layer_setup_called
;
bool
gradient_input_is_stale
;
...
...
@@ -547,18 +547,18 @@ namespace dlib
// ----------------------------------------------------------------------------------------
template
<
unsigned
long
ID
,
typename
SUB
_
NET
,
typename
enabled
=
void
>
template
<
unsigned
long
ID
,
typename
SUBNET
,
typename
enabled
=
void
>
class
add_tag_layer
;
template
<
unsigned
long
ID
,
typename
SUB
_
NET
>
class
add_tag_layer
<
ID
,
SUB
_
NET
,
typename
std
::
enable_if
<
is_nonloss_layer_type
<
SUB
_
NET
>::
value
>::
type
>
template
<
unsigned
long
ID
,
typename
SUBNET
>
class
add_tag_layer
<
ID
,
SUBNET
,
typename
std
::
enable_if
<
is_nonloss_layer_type
<
SUBNET
>::
value
>::
type
>
{
public
:
typedef
SUB
_NET
sub_
net_type
;
typedef
typename
sub
_
net_type
::
input_type
input_type
;
const
static
size_t
num_layers
=
sub
_
net_type
::
num_layers
+
1
;
const
static
unsigned
int
sample_expansion_factor
=
sub
_
net_type
::
sample_expansion_factor
;
typedef
SUB
NET
sub
net_type
;
typedef
typename
subnet_type
::
input_type
input_type
;
const
static
size_t
num_layers
=
subnet_type
::
num_layers
+
1
;
const
static
unsigned
int
sample_expansion_factor
=
subnet_type
::
sample_expansion_factor
;
static_assert
(
sample_expansion_factor
>=
1
,
"The input layer can't produce fewer output tensors than there are inputs."
);
...
...
@@ -571,14 +571,14 @@ namespace dlib
template
<
typename
T
>
add_tag_layer
(
const
add_tag_layer
<
ID
,
T
>&
item
)
:
sub
_network
(
item
.
sub_
net
())
)
:
sub
network
(
item
.
sub
net
())
{}
template
<
typename
...
T
>
add_tag_layer
(
T
...
args
)
:
sub
_
network
(
std
::
move
(
args
)...)
subnetwork
(
std
::
move
(
args
)...)
{
}
...
...
@@ -589,7 +589,7 @@ namespace dlib
resizable_tensor
&
data
)
const
{
sub
_
network
.
to_tensor
(
ibegin
,
iend
,
data
);
subnetwork
.
to_tensor
(
ibegin
,
iend
,
data
);
}
template
<
typename
input_iterator
>
...
...
@@ -598,43 +598,43 @@ namespace dlib
input_iterator
iend
)
{
return
sub
_
network
(
ibegin
,
iend
);
return
subnetwork
(
ibegin
,
iend
);
}
const
tensor
&
operator
()
(
const
input_type
&
x
)
{
return
sub
_
network
(
x
);
return
subnetwork
(
x
);
}
const
tensor
&
forward
(
const
tensor
&
x
)
{
return
sub
_
network
.
forward
(
x
);
return
subnetwork
.
forward
(
x
);
}
const
tensor
&
get_output
()
const
{
return
sub
_
network
.
get_output
();
}
const
tensor
&
get_output
()
const
{
return
subnetwork
.
get_output
();
}
tensor
&
get_gradient_input
()
{
return
sub
_
network
.
get_gradient_input
();
return
subnetwork
.
get_gradient_input
();
}
template
<
typename
solver_type
>
void
update
(
const
tensor
&
x
,
sstack
<
solver_type
,
num_layers
>&
solvers
)
{
sub
_
network
.
update
(
x
,
solvers
.
pop
());
subnetwork
.
update
(
x
,
solvers
.
pop
());
}
const
sub
_net_type
&
sub_net
()
const
{
return
sub_
network
;
}
sub
_net_type
&
sub_net
()
{
return
sub_
network
;
}
const
sub
net_type
&
subnet
()
const
{
return
sub
network
;
}
sub
net_type
&
subnet
()
{
return
sub
network
;
}
void
clean
()
{
sub
_
network
.
clean
();
subnetwork
.
clean
();
}
private
:
sub
_net_type
sub_
network
;
sub
net_type
sub
network
;
};
// ----------------------------------------------------------------------------------------
...
...
@@ -645,10 +645,10 @@ namespace dlib
class
add_tag_layer
{
public
:
typedef
INPUT_LAYER
sub
_
net_type
;
typedef
typename
sub
_
net_type
::
input_type
input_type
;
typedef
INPUT_LAYER
subnet_type
;
typedef
typename
subnet_type
::
input_type
input_type
;
const
static
size_t
num_layers
=
1
;
const
static
unsigned
int
sample_expansion_factor
=
sub
_
net_type
::
sample_expansion_factor
;
const
static
unsigned
int
sample_expansion_factor
=
subnet_type
::
sample_expansion_factor
;
static_assert
(
sample_expansion_factor
>=
1
,
"The input layer can't produce fewer output tensors than there are inputs."
);
...
...
@@ -661,7 +661,7 @@ namespace dlib
template
<
typename
T
,
typename
E
>
add_tag_layer
(
const
add_tag_layer
<
ID
,
T
,
E
>&
item
)
:
input_layer
(
item
.
sub
_
net
())
)
:
input_layer
(
item
.
subnet
())
{}
template
<
typename
...
T
>
...
...
@@ -724,8 +724,8 @@ namespace dlib
// nothing to update
}
const
sub
_net_type
&
sub_
net
()
const
{
return
input_layer
;
}
sub
_net_type
&
sub_
net
()
{
return
input_layer
;
}
const
sub
net_type
&
sub
net
()
const
{
return
input_layer
;
}
sub
net_type
&
sub
net
()
{
return
input_layer
;
}
void
clean
()
{
...
...
@@ -735,7 +735,7 @@ namespace dlib
private
:
sub
_
net_type
input_layer
;
subnet_type
input_layer
;
resizable_tensor
cached_output
;
resizable_tensor
grad_final_ignored
;
};
...
...
@@ -748,7 +748,7 @@ namespace dlib
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
template
<
typename
LOSS_DETAILS
,
typename
SUB
_
NET
>
template
<
typename
LOSS_DETAILS
,
typename
SUBNET
>
class
add_loss_layer
;
class
no_label_type
...
...
@@ -762,12 +762,12 @@ namespace dlib
// add_loss_layer objects can make it (again, just to simplify add_loss_layer's
// implementation).
no_label_type
()
=
default
;
template
<
typename
LOSS_DETAILS
,
typename
SUB
_
NET
>
friend
class
add_loss_layer
;
template
<
typename
LOSS_DETAILS
,
typename
SUBNET
>
friend
class
add_loss_layer
;
};
// ----------------------------------------------------------------------------------------
template
<
typename
LOSS_DETAILS
,
typename
SUB
_
NET
>
template
<
typename
LOSS_DETAILS
,
typename
SUBNET
>
class
add_loss_layer
{
template
<
typename
T
,
typename
enabled
=
void
>
...
...
@@ -783,14 +783,14 @@ namespace dlib
public
:
typedef
LOSS_DETAILS
loss_details_type
;
typedef
SUB
_NET
sub_
net_type
;
typedef
typename
sub
_
net_type
::
input_type
input_type
;
typedef
SUB
NET
sub
net_type
;
typedef
typename
subnet_type
::
input_type
input_type
;
// Note that the loss layer doesn't count as an additional layer.
const
static
size_t
num_layers
=
sub
_
net_type
::
num_layers
;
const
static
unsigned
int
sample_expansion_factor
=
sub
_
net_type
::
sample_expansion_factor
;
const
static
size_t
num_layers
=
subnet_type
::
num_layers
;
const
static
unsigned
int
sample_expansion_factor
=
subnet_type
::
sample_expansion_factor
;
typedef
typename
get_loss_layer_label_type
<
LOSS_DETAILS
>::
type
label_type
;
static_assert
(
is_nonloss_layer_type
<
SUB
_NET
>::
value
,
"SUB_
NET must be of type add_layer, add_skip_layer, or add_tag_layer."
);
static_assert
(
is_nonloss_layer_type
<
SUB
NET
>::
value
,
"SUB
NET must be of type add_layer, add_skip_layer, or add_tag_layer."
);
static_assert
(
sample_expansion_factor
==
LOSS_DETAILS
::
sample_expansion_factor
,
"The loss layer and input layer must agree on the sample_expansion_factor."
);
...
...
@@ -806,7 +806,7 @@ namespace dlib
const
add_loss_layer
<
T
,
U
>&
item
)
:
loss
(
item
.
loss_details
()),
sub
(
item
.
sub
_
net
())
sub
(
item
.
subnet
())
{}
template
<
typename
...
T
>
...
...
@@ -877,7 +877,7 @@ namespace dlib
{
sub
.
to_tensor
(
ibegin
,
iend
,
temp_tensor
);
sub
.
forward
(
temp_tensor
);
dimpl
::
sub
_net_wrapper
<
sub_
net_type
>
wsub
(
sub
);
dimpl
::
sub
net_wrapper
<
sub
net_type
>
wsub
(
sub
);
return
loss
.
compute_loss
(
temp_tensor
,
lbegin
,
wsub
);
}
...
...
@@ -889,7 +889,7 @@ namespace dlib
{
sub
.
to_tensor
(
ibegin
,
iend
,
temp_tensor
);
sub
.
forward
(
temp_tensor
);
dimpl
::
sub
_net_wrapper
<
sub_
net_type
>
wsub
(
sub
);
dimpl
::
sub
net_wrapper
<
sub
net_type
>
wsub
(
sub
);
return
loss
.
compute_loss
(
temp_tensor
,
wsub
);
}
...
...
@@ -903,7 +903,7 @@ namespace dlib
{
sub
.
to_tensor
(
ibegin
,
iend
,
temp_tensor
);
sub
.
forward
(
temp_tensor
);
dimpl
::
sub
_net_wrapper
<
sub_
net_type
>
wsub
(
sub
);
dimpl
::
sub
net_wrapper
<
sub
net_type
>
wsub
(
sub
);
double
l
=
loss
.
compute_loss
(
temp_tensor
,
lbegin
,
wsub
);
sub
.
update
(
temp_tensor
,
solvers
);
return
l
;
...
...
@@ -918,14 +918,14 @@ namespace dlib
{
sub
.
to_tensor
(
ibegin
,
iend
,
temp_tensor
);
sub
.
forward
(
temp_tensor
);
dimpl
::
sub
_net_wrapper
<
sub_
net_type
>
wsub
(
sub
);
dimpl
::
sub
net_wrapper
<
sub
net_type
>
wsub
(
sub
);
double
l
=
loss
.
compute_loss
(
temp_tensor
,
wsub
);
sub
.
update
(
temp_tensor
,
solvers
);
return
l
;
}
const
sub
_net_type
&
sub_
net
()
const
{
return
sub
;
}
sub
_net_type
&
sub_
net
()
{
return
sub
;
}
const
sub
net_type
&
sub
net
()
const
{
return
sub
;
}
sub
net_type
&
sub
net
()
{
return
sub
;
}
const
loss_details_type
&
loss_details
()
const
{
return
loss
;
}
loss_details_type
&
loss_details
()
{
return
loss
;
}
...
...
@@ -950,7 +950,7 @@ namespace dlib
private
:
loss_details_type
loss
;
sub
_
net_type
sub
;
subnet_type
sub
;
// These two objects don't logically contribute to the state of this object. They
// are here to prevent them from being reallocated over and over.
...
...
@@ -972,11 +972,11 @@ namespace dlib
struct
layer_helper
{
static
T
&
makeT
();
using
next_type
=
typename
std
::
remove_reference
<
decltype
(
makeT
().
sub
_
net
())
>::
type
;
using
next_type
=
typename
std
::
remove_reference
<
decltype
(
makeT
().
subnet
())
>::
type
;
using
type
=
typename
layer_helper
<
i
-
1
,
next_type
>::
type
;
static
type
&
layer
(
T
&
n
)
{
return
layer_helper
<
i
-
1
,
next_type
>::
layer
(
n
.
sub
_
net
());
return
layer_helper
<
i
-
1
,
next_type
>::
layer
(
n
.
subnet
());
}
};
template
<
typename
T
>
...
...
@@ -993,17 +993,17 @@ namespace dlib
struct
layer_helper_match
{
static
T
&
makeT
();
using
next_type
=
typename
std
::
remove_reference
<
decltype
(
makeT
().
sub
_
net
())
>::
type
;
using
next_type
=
typename
std
::
remove_reference
<
decltype
(
makeT
().
subnet
())
>::
type
;
using
type
=
typename
layer_helper_match
<
Match
,
next_type
,
i
>::
type
;
static
type
&
layer
(
T
&
n
)
{
return
layer_helper_match
<
Match
,
next_type
,
i
>::
layer
(
n
.
sub
_
net
());
return
layer_helper_match
<
Match
,
next_type
,
i
>::
layer
(
n
.
subnet
());
}
};
// This overload catches add_layer and add_loss_layer templates.
template
<
template
<
typename
>
class
Match
,
typename
T
,
unsigned
int
i
>
struct
layer_helper_match
<
Match
,
T
,
i
,
typename
std
::
enable_if
<
std
::
is_same
<
const
T
,
const
Match
<
typename
T
::
sub
_
net_type
>>::
value
>::
type
>
typename
std
::
enable_if
<
std
::
is_same
<
const
T
,
const
Match
<
typename
T
::
subnet_type
>>::
value
>::
type
>
{
using
type
=
typename
layer_helper
<
i
,
T
>::
type
;
static
type
&
layer
(
T
&
n
)
...
...
@@ -1022,11 +1022,11 @@ namespace dlib
return
layer_helper
<
i
,
T
>::
layer
(
n
);
}
};
// This overload catches sub
_
net_wrapper templates.
// This overload catches subnet_wrapper templates.
template
<
template
<
typename
>
class
Match
,
typename
T
,
unsigned
int
i
>
struct
layer_helper_match
<
Match
,
T
,
i
,
typename
std
::
enable_if
<
std
::
is_same
<
const
typename
T
::
wrapped_type
,
const
Match
<
typename
T
::
wrapped_type
::
sub
_
net_type
>>::
value
>::
type
>
const
Match
<
typename
T
::
wrapped_type
::
subnet_type
>>::
value
>::
type
>
{
using
type
=
typename
layer_helper
<
i
,
T
>::
type
;
static
type
&
layer
(
T
&
n
)
...
...
@@ -1056,19 +1056,19 @@ namespace dlib
// ----------------------------------------------------------------------------------------
template
<
template
<
typename
>
class
TAG_TYPE
,
typename
SUB
_
NET
>
template
<
template
<
typename
>
class
TAG_TYPE
,
typename
SUBNET
>
class
add_skip_layer
{
/*!
WHAT THIS OBJECT REPRESENTS
This object draws its inputs from layer<TAG_TYPE>(SUB
_
NET())
This object draws its inputs from layer<TAG_TYPE>(SUBNET())
and performs the identity transform.
!*/
public
:
typedef
SUB
_NET
sub_
net_type
;
typedef
typename
sub
_
net_type
::
input_type
input_type
;
const
static
size_t
num_layers
=
sub
_
net_type
::
num_layers
+
1
;
const
static
unsigned
int
sample_expansion_factor
=
sub
_
net_type
::
sample_expansion_factor
;
typedef
SUB
NET
sub
net_type
;
typedef
typename
subnet_type
::
input_type
input_type
;
const
static
size_t
num_layers
=
subnet_type
::
num_layers
+
1
;
const
static
unsigned
int
sample_expansion_factor
=
subnet_type
::
sample_expansion_factor
;
static_assert
(
sample_expansion_factor
>=
1
,
"The input layer can't produce fewer output tensors than there are inputs."
);
...
...
@@ -1081,14 +1081,14 @@ namespace dlib
template
<
typename
T
>
add_skip_layer
(
const
add_skip_layer
<
TAG_TYPE
,
T
>&
item
)
:
sub
_network
(
item
.
sub_
net
())
)
:
sub
network
(
item
.
sub
net
())
{}
template
<
typename
...
T
>
add_skip_layer
(
T
...
args
)
:
sub
_
network
(
std
::
move
(
args
)...)
subnetwork
(
std
::
move
(
args
)...)
{
}
...
...
@@ -1099,7 +1099,7 @@ namespace dlib
resizable_tensor
&
data
)
const
{
sub
_
network
.
to_tensor
(
ibegin
,
iend
,
data
);
subnetwork
.
to_tensor
(
ibegin
,
iend
,
data
);
}
template
<
typename
input_iterator
>
...
...
@@ -1108,81 +1108,81 @@ namespace dlib
input_iterator
iend
)
{
sub
_
network
(
ibegin
,
iend
);
return
layer
<
TAG_TYPE
>
(
sub
_
network
).
get_output
();
subnetwork
(
ibegin
,
iend
);
return
layer
<
TAG_TYPE
>
(
subnetwork
).
get_output
();
}
const
tensor
&
operator
()
(
const
input_type
&
x
)
{
sub
_
network
(
x
);
return
layer
<
TAG_TYPE
>
(
sub
_
network
).
get_output
();
subnetwork
(
x
);
return
layer
<
TAG_TYPE
>
(
subnetwork
).
get_output
();
}
const
tensor
&
forward
(
const
tensor
&
x
)
{
sub
_
network
.
forward
(
x
);
return
layer
<
TAG_TYPE
>
(
sub
_
network
).
get_output
();
subnetwork
.
forward
(
x
);
return
layer
<
TAG_TYPE
>
(
subnetwork
).
get_output
();
}
const
tensor
&
get_output
()
const
{
return
layer
<
TAG_TYPE
>
(
sub
_
network
).
get_output
();
return
layer
<
TAG_TYPE
>
(
subnetwork
).
get_output
();
}
tensor
&
get_gradient_input
()
{
return
layer
<
TAG_TYPE
>
(
sub
_
network
).
get_gradient_input
();
return
layer
<
TAG_TYPE
>
(
subnetwork
).
get_gradient_input
();
}
template
<
typename
solver_type
>
void
update
(
const
tensor
&
x
,
sstack
<
solver_type
,
num_layers
>&
solvers
)
{
sub
_
network
.
update
(
x
,
solvers
.
pop
());
subnetwork
.
update
(
x
,
solvers
.
pop
());
}
const
sub
_net_type
&
sub_
net
()
const
const
sub
net_type
&
sub
net
()
const
{
return
sub
_
network
;
return
subnetwork
;
}
sub
_net_type
&
sub_
net
()
sub
net_type
&
sub
net
()
{
return
sub
_
network
;
return
subnetwork
;
}
void
clean
()
{
sub
_
network
.
clean
();
subnetwork
.
clean
();
}
private
:
sub
_net_type
sub_
network
;
sub
net_type
sub
network
;
};
template
<
template
<
typename
>
class
T
,
typename
U
>
struct
is_nonloss_layer_type
<
add_skip_layer
<
T
,
U
>>
:
std
::
true_type
{};
template
<
typename
SUB
_NET
>
using
tag1
=
add_tag_layer
<
1
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag2
=
add_tag_layer
<
2
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag3
=
add_tag_layer
<
3
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag4
=
add_tag_layer
<
4
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag5
=
add_tag_layer
<
5
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag6
=
add_tag_layer
<
6
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag7
=
add_tag_layer
<
7
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag8
=
add_tag_layer
<
8
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag9
=
add_tag_layer
<
9
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag10
=
add_tag_layer
<
10
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip1
=
add_skip_layer
<
tag1
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip2
=
add_skip_layer
<
tag2
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip3
=
add_skip_layer
<
tag3
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip4
=
add_skip_layer
<
tag4
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip5
=
add_skip_layer
<
tag5
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip6
=
add_skip_layer
<
tag6
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip7
=
add_skip_layer
<
tag7
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip8
=
add_skip_layer
<
tag8
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip9
=
add_skip_layer
<
tag9
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip10
=
add_skip_layer
<
tag10
,
SUB_
NET
>
;
template
<
typename
SUB
NET
>
using
tag1
=
add_tag_layer
<
1
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag2
=
add_tag_layer
<
2
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag3
=
add_tag_layer
<
3
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag4
=
add_tag_layer
<
4
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag5
=
add_tag_layer
<
5
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag6
=
add_tag_layer
<
6
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag7
=
add_tag_layer
<
7
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag8
=
add_tag_layer
<
8
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag9
=
add_tag_layer
<
9
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag10
=
add_tag_layer
<
10
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip1
=
add_skip_layer
<
tag1
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip2
=
add_skip_layer
<
tag2
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip3
=
add_skip_layer
<
tag3
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip4
=
add_skip_layer
<
tag4
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip5
=
add_skip_layer
<
tag5
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip6
=
add_skip_layer
<
tag6
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip7
=
add_skip_layer
<
tag7
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip8
=
add_skip_layer
<
tag8
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip9
=
add_skip_layer
<
tag9
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip10
=
add_skip_layer
<
tag10
,
SUB
NET
>
;
// ----------------------------------------------------------------------------------------
...
...
@@ -1199,10 +1199,10 @@ namespace dlib
data
[
i
]
=
rnd
.
get_random_gaussian
()
*
sigma
;
}
class
test_layer_sub
_
net
class
test_layer_subnet
{
public
:
test_layer_sub
_
net
(
test_layer_subnet
(
dlib
::
rand
&
rnd_
)
:
rnd
(
rnd_
)
{
...
...
@@ -1225,10 +1225,10 @@ namespace dlib
const
tensor
&
get_output
()
const
{
return
output
;
}
const
test_layer_sub
_net
&
sub_
net
()
const
{
init_sub
();
return
*
sub
;
}
const
test_layer_sub
net
&
sub
net
()
const
{
init_sub
();
return
*
sub
;
}
tensor
&
get_gradient_input
()
{
return
gradient_input
;
}
test_layer_sub
_net
&
sub_
net
()
{
init_sub
();
return
*
sub
;
}
test_layer_sub
net
&
sub
net
()
{
init_sub
();
return
*
sub
;
}
...
...
@@ -1245,7 +1245,7 @@ namespace dlib
if
(
i
<
output
.
size
())
return
output
.
host
()[
i
];
else
return
sub
_
net
().
get_output_element
(
i
-
output
.
size
());
return
subnet
().
get_output_element
(
i
-
output
.
size
());
}
float
get_gradient_input_element
(
unsigned
long
i
)
const
...
...
@@ -1253,21 +1253,21 @@ namespace dlib
if
(
i
<
gradient_input
.
size
())
return
gradient_input
.
host
()[
i
];
else
return
sub
_
net
().
get_gradient_input_element
(
i
-
gradient_input
.
size
());
return
subnet
().
get_gradient_input_element
(
i
-
gradient_input
.
size
());
}
private
:
// We lazily initialize sub-layers as needed when someone tries to call
// sub
_
net()
// subnet()
void
init_sub
()
const
{
if
(
!
sub
)
sub
.
reset
(
new
test_layer_sub
_
net
(
rnd
));
sub
.
reset
(
new
test_layer_subnet
(
rnd
));
}
dlib
::
rand
&
rnd
;
mutable
std
::
unique_ptr
<
test_layer_sub
_
net
>
sub
;
mutable
std
::
unique_ptr
<
test_layer_subnet
>
sub
;
resizable_tensor
output
;
resizable_tensor
gradient_input
;
};
...
...
@@ -1295,12 +1295,12 @@ namespace dlib
using
namespace
timpl
;
// Do some setup
dlib
::
rand
rnd
;
test_layer_sub
_
net
sub
(
rnd
);
test_layer_subnet
sub
(
rnd
);
resizable_tensor
output
,
out2
,
out3
;
// Run setup() and forward() as well to make sure any calls to sub
_
net() have
// Run setup() and forward() as well to make sure any calls to subnet() have
// happened before we start assuming we know how many data elements there are
// (since we do a lazy layer creation thing based on calls to sub
_
net() inside
// test_layer_sub
_
net).
// (since we do a lazy layer creation thing based on calls to subnet() inside
// test_layer_subnet).
l
.
setup
(
sub
);
l
.
forward
(
sub
,
output
);
...
...
dlib/dnn/core_abstract.h
View file @
fa812881
...
...
@@ -124,7 +124,7 @@ namespace dlib
template
<
typename
LAYER_DETAILS
,
typename
SUB
_
NET
typename
SUBNET
>
class
add_layer
{
...
...
@@ -133,26 +133,26 @@ namespace dlib
- Must be a type that implements the EXAMPLE_LAYER_ interface defined in
layers_abstract.h
REQUIREMENTS ON SUB
_
NET
REQUIREMENTS ON SUBNET
- One of the following must be true:
- SUB
_
NET implements the EXAMPLE_INPUT_LAYER interface defined in
- SUBNET implements the EXAMPLE_INPUT_LAYER interface defined in
input_abstract.h.
- SUB
_
NET is an add_layer object.
- SUB
_
NET is an add_tag_layer object.
- SUB
_
NET is an add_skip_layer object.
- SUBNET is an add_layer object.
- SUBNET is an add_tag_layer object.
- SUBNET is an add_skip_layer object.
WHAT THIS OBJECT REPRESENTS
Stacks a new layer, defined by LAYER_DETAILS, on top of SUB
_
NET type.
Stacks a new layer, defined by LAYER_DETAILS, on top of SUBNET type.
!*/
public
:
typedef
LAYER_DETAILS
layer_details_type
;
typedef
SUB
_NET
sub_
net_type
;
typedef
typename
sub
_
net_type
::
input_type
input_type
;
const
static
unsigned
int
sample_expansion_factor
=
sub
_
net_type
::
sample_expansion_factor
;
// If SUB
_
NET is an input layer then num_layers == 1, otherwise it has the
typedef
SUB
NET
sub
net_type
;
typedef
typename
subnet_type
::
input_type
input_type
;
const
static
unsigned
int
sample_expansion_factor
=
subnet_type
::
sample_expansion_factor
;
// If SUBNET is an input layer then num_layers == 1, otherwise it has the
// definition shown here:
const
static
size_t
num_layers
=
sub
_
net_type
::
num_layers
+
1
;
const
static
size_t
num_layers
=
subnet_type
::
num_layers
+
1
;
add_layer
(
);
...
...
@@ -172,7 +172,7 @@ namespace dlib
/*!
ensures
- #layer_details() == layer_details_type(item.layer_details())
- #sub
_net() == sub_net_type(item.sub_
net())
- #sub
net() == subnet_type(item.sub
net())
!*/
template
<
typename
...
T
>
...
...
@@ -183,7 +183,7 @@ namespace dlib
/*!
ensures
- #layer_details() == layer_details_type(layer_det)
- #sub
_net() == sub_
net_type(args)
- #sub
net() == sub
net_type(args)
!*/
template
<
typename
...
T
>
...
...
@@ -194,7 +194,7 @@ namespace dlib
/*!
ensures
- #layer_details() == layer_details_type(layer_det)
- #sub
_net() == sub_
net_type(args)
- #sub
net() == sub
net_type(args)
!*/
template
<
typename
input_iterator
>
...
...
@@ -211,7 +211,7 @@ namespace dlib
- #data.num_samples() == distance(ibegin,iend)*sample_expansion_factor.
- Invokes data.async_copy_to_device() so that the data begins transferring
to the device.
- Ultimately this function just calls sub
_net().sub_net()...sub_
net().to_tensor(ibegin,iend,data).
- Ultimately this function just calls sub
net().subnet()...sub
net().to_tensor(ibegin,iend,data).
!*/
template
<
typename
input_iterator
>
...
...
@@ -247,17 +247,17 @@ namespace dlib
ensures
- Runs x through the network and returns the results. In particular, this
function performs the equivalent of:
sub
_
net().forward(x);
subnet().forward(x);
if (this is the first time forward() has been called) then
layer_details().setup(sub
_
net());
layer_details().forward(sub
_
net(), get_output());
layer_details().setup(subnet());
layer_details().forward(subnet(), get_output());
- The return value from this function is also available in #get_output().
- have_same_dimensions(#get_gradient_input(), #get_output()) == true
- All elements of #get_gradient_input() are set to 0.
!*/
{
sub
_
network
.
forward
(
x
);
const
dimpl
::
sub
_net_wrapper
<
sub_net_type
>
wsub
(
sub_
network
);
subnetwork
.
forward
(
x
);
const
dimpl
::
sub
net_wrapper
<
subnet_type
>
wsub
(
sub
network
);
if
(
!
this_layer_setup_called
)
{
details
.
setup
(
wsub
);
...
...
@@ -303,18 +303,18 @@ namespace dlib
to some loss.
!*/
{
dimpl
::
sub
_net_wrapper
<
sub_net_type
>
wsub
(
sub_
network
);
dimpl
::
sub
net_wrapper
<
subnet_type
>
wsub
(
sub
network
);
params_grad
.
copy_size
(
details
.
get_layer_params
());
params_grad
=
0
;
details
.
backward
(
get_gradient_input
(),
wsub
,
static_cast
<
tensor
&>
(
params_grad
));
// Don't try to adjust the parameters if this layer doesn't have any.
if
(
params_grad
.
size
()
!=
0
)
solvers
.
top
()(
details
,
static_cast
<
const
tensor
&>
(
params_grad
));
sub
_
network
.
update
(
x
,
solvers
.
pop
());
subnetwork
.
update
(
x
,
solvers
.
pop
());
}
const
sub
_net_type
&
sub_net
()
const
{
return
sub_
network
;
}
sub
_net_type
&
sub_net
()
{
return
sub_
network
;
}
const
sub
net_type
&
subnet
()
const
{
return
sub
network
;
}
sub
net_type
&
subnet
()
{
return
sub
network
;
}
const
layer_details_type
&
layer_details
()
const
{
return
details
;
}
layer_details_type
&
layer_details
()
{
return
details
;
}
...
...
@@ -332,7 +332,7 @@ namespace dlib
template
<
typename
LOSS_DETAILS
,
typename
SUB
_
NET
typename
SUBNET
>
class
add_loss_layer
{
...
...
@@ -340,26 +340,26 @@ namespace dlib
REQUIREMENTS ON LOSS_DETAILS
- Must be a type that implements the EXAMPLE_LOSS_LAYER_ interface defined
in loss_abstract.h
- LOSS_DETAILS::sample_expansion_factor == SUB
_
NET::sample_expansion_factor
- LOSS_DETAILS::sample_expansion_factor == SUBNET::sample_expansion_factor
i.e. The loss layer and input layer must agree on the sample_expansion_factor.
REQUIREMENTS ON SUB
_
NET
REQUIREMENTS ON SUBNET
- One of the following must be true:
- SUB
_
NET is an add_layer object.
- SUB
_
NET is an add_tag_layer object.
- SUB
_
NET is an add_skip_layer object.
- SUBNET is an add_layer object.
- SUBNET is an add_tag_layer object.
- SUBNET is an add_skip_layer object.
WHAT THIS OBJECT REPRESENTS
- Adds a loss layer, defined by LOSS_DETAILS, on top of SUB
_
NET.
- Adds a loss layer, defined by LOSS_DETAILS, on top of SUBNET.
!*/
public
:
typedef
LOSS_DETAILS
loss_details_type
;
typedef
SUB
_NET
sub_
net_type
;
typedef
typename
sub
_
net_type
::
input_type
input_type
;
typedef
SUB
NET
sub
net_type
;
typedef
typename
subnet_type
::
input_type
input_type
;
// Note that the loss layer doesn't count as an additional layer.
const
static
size_t
num_layers
=
sub
_
net_type
::
num_layers
;
const
static
unsigned
int
sample_expansion_factor
=
sub
_
net_type
::
sample_expansion_factor
;
const
static
size_t
num_layers
=
subnet_type
::
num_layers
;
const
static
unsigned
int
sample_expansion_factor
=
subnet_type
::
sample_expansion_factor
;
// If LOSS_DETAILS is an unsupervised loss then label_type==no_label_type.
// Otherwise it is defined as follows:
typedef
typename
LOSS_DETAILS
::
label_type
label_type
;
...
...
@@ -379,7 +379,7 @@ namespace dlib
const
add_loss_layer
<
T
,
U
>&
item
)
:
loss
(
item
.
loss_details
()),
sub
(
item
.
sub
_
net
())
sub
(
item
.
subnet
())
{}
template
<
typename
...
T
>
...
...
@@ -450,7 +450,7 @@ namespace dlib
{
sub
.
to_tensor
(
ibegin
,
iend
,
temp_tensor
);
sub
.
forward
(
temp_tensor
);
dimpl
::
sub
_net_wrapper
<
sub_
net_type
>
wsub
(
sub
);
dimpl
::
sub
net_wrapper
<
sub
net_type
>
wsub
(
sub
);
return
loss
.
compute_loss
(
temp_tensor
,
lbegin
,
wsub
);
}
...
...
@@ -470,7 +470,7 @@ namespace dlib
{
sub
.
to_tensor
(
ibegin
,
iend
,
temp_tensor
);
sub
.
forward
(
temp_tensor
);
dimpl
::
sub
_net_wrapper
<
sub_
net_type
>
wsub
(
sub
);
dimpl
::
sub
net_wrapper
<
sub
net_type
>
wsub
(
sub
);
double
l
=
loss
.
compute_loss
(
temp_tensor
,
lbegin
,
wsub
);
sub
.
update
(
temp_tensor
,
solvers
);
return
l
;
...
...
@@ -483,8 +483,8 @@ namespace dlib
sstack
<
solver_type
,
num_layers
>&
solvers
);
const
sub
_net_type
&
sub_
net
()
const
{
return
sub
;
}
sub
_net_type
&
sub_
net
()
{
return
sub
;
}
const
sub
net_type
&
sub
net
()
const
{
return
sub
;
}
sub
net_type
&
sub
net
()
{
return
sub
;
}
const
loss_details_type
&
loss_details
()
const
{
return
loss
;
}
loss_details_type
&
loss_details
()
{
return
loss
;
}
...
...
@@ -509,7 +509,7 @@ namespace dlib
private
:
loss_details_type
loss
;
sub
_
net_type
sub
;
subnet_type
sub
;
// These two objects don't logically contribute to the state of this object. They
// are here to prevent them from being reallocated over and over.
...
...
@@ -527,21 +527,21 @@ namespace dlib
template
<
unsigned
long
ID
,
typename
SUB
_
NET
typename
SUBNET
>
class
add_tag_layer
{
/*!
REQUIREMENTS ON SUB
_
NET
REQUIREMENTS ON SUBNET
- One of the following must be true:
- SUB
_
NET implements the EXAMPLE_INPUT_LAYER interface defined in
- SUBNET implements the EXAMPLE_INPUT_LAYER interface defined in
input_abstract.h.
- SUB
_
NET is an add_layer object.
- SUB
_
NET is an add_tag_layer object.
- SUB
_
NET is an add_skip_layer object.
- SUBNET is an add_layer object.
- SUBNET is an add_tag_layer object.
- SUBNET is an add_skip_layer object.
WHAT THIS OBJECT REPRESENTS
This object draws its inputs from sub
_
net() and performs the identity
This object draws its inputs from subnet() and performs the identity
transform. This means it is a no-op and its presence does not change
the behavior of the network. It exists solely to be used by add_skip_layer
to reference a particular part of a network.
...
...
@@ -549,48 +549,48 @@ namespace dlib
!*/
};
template
<
typename
SUB
_NET
>
using
tag1
=
add_tag_layer
<
1
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag2
=
add_tag_layer
<
2
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag3
=
add_tag_layer
<
3
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag4
=
add_tag_layer
<
4
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag5
=
add_tag_layer
<
5
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag6
=
add_tag_layer
<
6
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag7
=
add_tag_layer
<
7
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag8
=
add_tag_layer
<
8
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag9
=
add_tag_layer
<
9
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
tag10
=
add_tag_layer
<
10
,
SUB_
NET
>
;
template
<
typename
SUB
NET
>
using
tag1
=
add_tag_layer
<
1
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag2
=
add_tag_layer
<
2
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag3
=
add_tag_layer
<
3
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag4
=
add_tag_layer
<
4
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag5
=
add_tag_layer
<
5
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag6
=
add_tag_layer
<
6
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag7
=
add_tag_layer
<
7
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag8
=
add_tag_layer
<
8
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag9
=
add_tag_layer
<
9
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
tag10
=
add_tag_layer
<
10
,
SUB
NET
>
;
// ----------------------------------------------------------------------------------------
template
<
template
<
typename
>
class
TAG_TYPE
,
typename
SUB
_
NET
typename
SUBNET
>
class
add_skip_layer
{
/*!
REQUIREMENTS ON SUB
_
NET
REQUIREMENTS ON SUBNET
- One of the following must be true:
- SUB
_
NET is an add_layer object.
- SUB
_
NET is an add_tag_layer object.
- SUB
_
NET is an add_skip_layer object.
- SUBNET is an add_layer object.
- SUBNET is an add_tag_layer object.
- SUBNET is an add_skip_layer object.
WHAT THIS OBJECT REPRESENTS
This object draws its inputs from layer<TAG_TYPE>(sub
_
net())
This object draws its inputs from layer<TAG_TYPE>(subnet())
and performs the identity transform.
!*/
};
template
<
typename
SUB
_NET
>
using
skip1
=
add_skip_layer
<
tag1
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip2
=
add_skip_layer
<
tag2
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip3
=
add_skip_layer
<
tag3
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip4
=
add_skip_layer
<
tag4
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip5
=
add_skip_layer
<
tag5
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip6
=
add_skip_layer
<
tag6
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip7
=
add_skip_layer
<
tag7
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip8
=
add_skip_layer
<
tag8
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip9
=
add_skip_layer
<
tag9
,
SUB_
NET
>
;
template
<
typename
SUB
_NET
>
using
skip10
=
add_skip_layer
<
tag10
,
SUB_
NET
>
;
template
<
typename
SUB
NET
>
using
skip1
=
add_skip_layer
<
tag1
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip2
=
add_skip_layer
<
tag2
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip3
=
add_skip_layer
<
tag3
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip4
=
add_skip_layer
<
tag4
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip5
=
add_skip_layer
<
tag5
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip6
=
add_skip_layer
<
tag6
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip7
=
add_skip_layer
<
tag7
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip8
=
add_skip_layer
<
tag8
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip9
=
add_skip_layer
<
tag9
,
SUB
NET
>
;
template
<
typename
SUB
NET
>
using
skip10
=
add_skip_layer
<
tag10
,
SUB
NET
>
;
// ----------------------------------------------------------------------------------------
...
...
@@ -605,16 +605,16 @@ namespace dlib
requires
- net_type is an object of type add_layer, add_loss_layer, add_skip_layer, or add_tag_layer.
ensures
- This function chains together i calls to n.sub
_
net() and returns the
- This function chains together i calls to n.subnet() and returns the
result. So for example:
- if (i == 0)
- returns n
- else if (i == 1)
- returns n.sub
_
net()
- returns n.subnet()
- else if (i == 2)
- returns n.sub
_net().sub_
net()
- returns n.sub
net().sub
net()
- else if (i == 3)
- returns n.sub
_net().sub_net().sub_
net()
- returns n.sub
net().subnet().sub
net()
- else
- etc.
!*/
...
...
dlib/dnn/layers.h
View file @
fa812881
...
...
@@ -23,20 +23,20 @@ namespace dlib
con_
()
{}
template
<
typename
SUB
_
NET
>
void
setup
(
const
SUB
_
NET
&
sub
)
template
<
typename
SUBNET
>
void
setup
(
const
SUBNET
&
sub
)
{
// TODO
}
template
<
typename
SUB
_
NET
>
void
forward
(
const
SUB
_
NET
&
sub
,
resizable_tensor
&
output
)
template
<
typename
SUBNET
>
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
)
{
// TODO
}
template
<
typename
SUB
_
NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
tensor
&
params_grad
)
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
)
{
// TODO
}
...
...
@@ -49,8 +49,8 @@ namespace dlib
resizable_tensor
params
;
};
template
<
typename
SUB
_
NET
>
using
con
=
add_layer
<
con_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
con
=
add_layer
<
con_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
@@ -71,8 +71,8 @@ namespace dlib
unsigned
long
get_num_outputs
(
)
const
{
return
num_outputs
;
}
template
<
typename
SUB
_
NET
>
void
setup
(
const
SUB
_
NET
&
sub
)
template
<
typename
SUBNET
>
void
setup
(
const
SUBNET
&
sub
)
{
num_inputs
=
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
();
params
.
set_size
(
num_inputs
,
num_outputs
);
...
...
@@ -82,16 +82,16 @@ namespace dlib
randomize_parameters
(
params
,
num_inputs
+
num_outputs
,
rnd
);
}
template
<
typename
SUB
_
NET
>
void
forward
(
const
SUB
_
NET
&
sub
,
resizable_tensor
&
output
)
template
<
typename
SUBNET
>
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
)
{
output
.
set_size
(
sub
.
get_output
().
num_samples
(),
num_outputs
);
output
=
mat
(
sub
.
get_output
())
*
mat
(
params
);
}
template
<
typename
SUB
_
NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
tensor
&
params_grad
)
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
)
{
// d1*W*p1 + d2*W*p2
// total gradient = [d1*W; d2*W; d3*W; ...] == D*W
...
...
@@ -116,8 +116,8 @@ namespace dlib
};
template
<
typename
SUB
_
NET
>
using
fc
=
add_layer
<
fc_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
fc
=
add_layer
<
fc_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
@@ -128,20 +128,20 @@ namespace dlib
{
}
template
<
typename
SUB
_
NET
>
void
setup
(
const
SUB
_
NET
&
sub
)
template
<
typename
SUBNET
>
void
setup
(
const
SUBNET
&
sub
)
{
}
template
<
typename
SUB
_
NET
>
void
forward
(
const
SUB
_
NET
&
sub
,
resizable_tensor
&
output
)
template
<
typename
SUBNET
>
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
)
{
output
.
copy_size
(
sub
.
get_output
());
output
=
lowerbound
(
mat
(
sub
.
get_output
()),
0
);
}
template
<
typename
SUB
_
NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
tensor
&
params_grad
)
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
)
{
const
float
*
grad
=
gradient_input
.
host
();
const
float
*
in
=
sub
.
get_output
().
host
();
...
...
@@ -163,8 +163,8 @@ namespace dlib
};
template
<
typename
SUB
_
NET
>
using
relu
=
add_layer
<
relu_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
relu
=
add_layer
<
relu_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
@@ -176,8 +176,8 @@ namespace dlib
}
template
<
typename
SUB
_
NET
>
void
setup
(
const
SUB
_
NET
&
sub
)
template
<
typename
SUBNET
>
void
setup
(
const
SUBNET
&
sub
)
{
num_inputs
=
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
();
params
.
set_size
(
1
,
num_inputs
);
...
...
@@ -189,8 +189,8 @@ namespace dlib
randomize_parameters
(
params
,
num_inputs
+
num_outputs
,
rnd
);
}
template
<
typename
SUB
_
NET
>
void
forward
(
const
SUB
_
NET
&
sub
,
resizable_tensor
&
output
)
template
<
typename
SUBNET
>
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
)
{
DLIB_CASSERT
(
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
()
==
params
.
size
(),
""
);
DLIB_CASSERT
(
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
()
==
num_inputs
,
""
);
...
...
@@ -208,8 +208,8 @@ namespace dlib
}
}
template
<
typename
SUB
_
NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
tensor
&
params_grad
)
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
)
{
params_grad
+=
sum_rows
(
pointwise_multiply
(
mat
(
sub
.
get_output
()),
mat
(
gradient_input
)));
...
...
@@ -230,8 +230,8 @@ namespace dlib
dlib
::
rand
rnd
;
};
template
<
typename
SUB
_
NET
>
using
multiply
=
add_layer
<
multiply_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
multiply
=
add_layer
<
multiply_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
dlib/dnn/layers_abstract.h
View file @
fa812881
...
...
@@ -12,7 +12,7 @@ namespace dlib
// ----------------------------------------------------------------------------------------
class
SUB
_
NET
class
SUBNET
{
/*!
WHAT THIS OBJECT REPRESENTS
...
...
@@ -35,8 +35,8 @@ namespace dlib
public
:
// You aren't allowed to copy subnetworks from inside a layer.
SUB
_NET
(
const
SUB_
NET
&
)
=
delete
;
SUB
_NET
&
operator
=
(
const
SUB_
NET
&
)
=
delete
;
SUB
NET
(
const
SUB
NET
&
)
=
delete
;
SUB
NET
&
operator
=
(
const
SUB
NET
&
)
=
delete
;
const
tensor
&
get_output
(
)
const
;
...
...
@@ -61,21 +61,21 @@ namespace dlib
get_gradient_input().
!*/
const
NEXT_SUB
_NET
&
sub_
net
(
const
NEXT_SUB
NET
&
sub
net
(
)
const
;
/*!
ensures
- returns the subnetwork of *this network. With respect to the diagram
above, if *this was layer1 then sub
_
net() would return the network that
above, if *this was layer1 then subnet() would return the network that
begins with layer2.
!*/
NEXT_SUB
_NET
&
sub_
net
(
NEXT_SUB
NET
&
sub
net
(
);
/*!
ensures
- returns the subnetwork of *this network. With respect to the diagram
above, if *this was layer1 then sub
_
net() would return the network that
above, if *this was layer1 then subnet() would return the network that
begins with layer2.
!*/
};
...
...
@@ -126,45 +126,45 @@ namespace dlib
allows you to easily convert between related deep neural network types.
!*/
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
setup
(
const
SUB
_
NET
&
sub
const
SUBNET
&
sub
);
/*!
requires
- SUB
_NET implements the SUB_
NET interface defined at the top of this file.
- SUB
NET implements the SUB
NET interface defined at the top of this file.
ensures
- performs any necessary initial memory allocations and/or sets parameters
to their initial values prior to learning. Therefore, calling setup
destroys any previously learned parameters.
!*/
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
forward
(
const
SUB
_
NET
&
sub
,
const
SUBNET
&
sub
,
resizable_tensor
&
output
);
/*!
requires
- SUB
_NET implements the SUB_
NET interface defined at the top of this file.
- SUB
NET implements the SUB
NET interface defined at the top of this file.
- setup() has been called.
ensures
- Runs the output of the subnetwork through this layer and stores the
output into #output. In particular, forward() can use any of the outputs
in sub (e.g. sub.get_output(), sub.sub
_
net().get_output(), etc.) to
in sub (e.g. sub.get_output(), sub.subnet().get_output(), etc.) to
compute whatever it wants.
- #output.num_samples() == sub.get_output().num_samples()
!*/
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
SUBNET
&
sub
,
tensor
&
params_grad
);
/*!
requires
- SUB
_NET implements the SUB_
NET interface defined at the top of this file.
- SUB
NET implements the SUB
NET interface defined at the top of this file.
- setup() has been called.
- gradient_input has the same dimensions as the output of forward(sub,output).
- have_same_dimensions(sub.get_gradient_input(), sub.get_output()) == true
...
...
@@ -183,7 +183,7 @@ namespace dlib
- for all valid I:
- DATA_GRADIENT_I == gradient of f(sub,get_layer_params()) with
respect to layer<I>(sub).get_output() (recall that forward() can
draw inputs from the immediate sub layer, sub.sub
_
net(), or
draw inputs from the immediate sub layer, sub.subnet(), or
any earlier layer. So you must consider the gradients with
respect to all inputs drawn from sub)
Finally, backward() adds these gradients into the output by performing:
...
...
@@ -211,8 +211,8 @@ namespace dlib
// For each layer you define, always define an add_layer template so that layers can be
// easily composed. Moreover, the convention is that the layer class ends with an _
// while the add_layer template has the same name but without the trailing _.
template
<
typename
SUB
_
NET
>
using
EXAMPLE_LAYER
=
add_layer
<
EXAMPLE_LAYER_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
EXAMPLE_LAYER
=
add_layer
<
EXAMPLE_LAYER_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
...
@@ -254,9 +254,9 @@ namespace dlib
- The rest of the dimensions of T will be 1.
!*/
template
<
typename
SUB
_NET
>
void
setup
(
const
SUB_
NET
&
sub
);
template
<
typename
SUB
_NET
>
void
forward
(
const
SUB_
NET
&
sub
,
resizable_tensor
&
output
);
template
<
typename
SUB
_NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB_
NET
&
sub
,
tensor
&
params_grad
);
template
<
typename
SUB
NET
>
void
setup
(
const
SUB
NET
&
sub
);
template
<
typename
SUB
NET
>
void
forward
(
const
SUB
NET
&
sub
,
resizable_tensor
&
output
);
template
<
typename
SUB
NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
NET
&
sub
,
tensor
&
params_grad
);
const
tensor
&
get_layer_params
()
const
;
tensor
&
get_layer_params
();
/*!
...
...
@@ -265,8 +265,8 @@ namespace dlib
};
template
<
typename
SUB
_
NET
>
using
fc
=
add_layer
<
fc_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
fc
=
add_layer
<
fc_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
@@ -277,9 +277,9 @@ namespace dlib
relu_
(
);
template
<
typename
SUB
_NET
>
void
setup
(
const
SUB_
NET
&
sub
);
template
<
typename
SUB
_NET
>
void
forward
(
const
SUB_
NET
&
sub
,
resizable_tensor
&
output
);
template
<
typename
SUB
_NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB_
NET
&
sub
,
tensor
&
params_grad
);
template
<
typename
SUB
NET
>
void
setup
(
const
SUB
NET
&
sub
);
template
<
typename
SUB
NET
>
void
forward
(
const
SUB
NET
&
sub
,
resizable_tensor
&
output
);
template
<
typename
SUB
NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
NET
&
sub
,
tensor
&
params_grad
);
const
tensor
&
get_layer_params
()
const
;
tensor
&
get_layer_params
();
/*!
...
...
@@ -288,8 +288,8 @@ namespace dlib
};
template
<
typename
SUB
_
NET
>
using
relu
=
add_layer
<
relu_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
relu
=
add_layer
<
relu_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
dlib/dnn/loss.h
View file @
fa812881
...
...
@@ -43,12 +43,12 @@ namespace dlib
template
<
typename
const_label_iterator
,
typename
SUB
_
NET
typename
SUBNET
>
double
compute_loss
(
const
tensor
&
input_tensor
,
const_label_iterator
truth
,
SUB
_
NET
&
sub
SUBNET
&
sub
)
const
{
const
tensor
&
output_tensor
=
sub
.
get_output
();
...
...
@@ -83,8 +83,8 @@ namespace dlib
};
template
<
typename
SUB
_
NET
>
using
loss_binary_hinge
=
add_loss_layer
<
loss_binary_hinge_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
loss_binary_hinge
=
add_loss_layer
<
loss_binary_hinge_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
@@ -95,11 +95,11 @@ namespace dlib
const
static
unsigned
int
sample_expansion_factor
=
1
;
template
<
typename
SUB
_
NET
typename
SUBNET
>
double
compute_loss
(
const
tensor
&
input_tensor
,
SUB
_
NET
&
sub
SUBNET
&
sub
)
const
{
return
0
;
...
...
@@ -107,8 +107,8 @@ namespace dlib
};
template
<
typename
SUB
_
NET
>
using
loss_no_label
=
add_loss_layer
<
loss_no_label_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
loss_no_label
=
add_loss_layer
<
loss_no_label_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
dlib/dnn/loss_abstract.h
View file @
fa812881
...
...
@@ -54,7 +54,7 @@ namespace dlib
)
const
;
/*!
requires
- SUB
_NET implements the SUB_
NET interface defined at the top of
- SUB
NET implements the SUB
NET interface defined at the top of
layers_abstract.h.
- sub.get_output().num_samples()%sample_expansion_factor == 0
- All outputs in each layer of sub have the same number of samples. That
...
...
@@ -73,16 +73,16 @@ namespace dlib
template
<
typename
const_label_iterator
,
typename
SUB
_
NET
typename
SUBNET
>
double
compute_loss
(
const
tensor
&
input_tensor
,
const_label_iterator
truth
,
SUB
_
NET
&
sub
SUBNET
&
sub
)
const
;
/*!
requires
- SUB
_NET implements the SUB_
NET interface defined at the top of
- SUB
NET implements the SUB
NET interface defined at the top of
layers_abstract.h.
- input_tensor was given as input to the network sub and the outputs are
now visible in layer<i>(sub).get_output(), for all valid i.
...
...
@@ -114,8 +114,8 @@ namespace dlib
// layers can be easily composed. Moreover, the convention is that the layer class
// ends with an _ while the add_loss_layer template has the same name but without the
// trailing _.
template
<
typename
SUB
_
NET
>
using
EXAMPLE_LOSS_LAYER
=
add_loss_layer
<
EXAMPLE_LOSS_LAYER_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
EXAMPLE_LOSS_LAYER
=
add_loss_layer
<
EXAMPLE_LOSS_LAYER_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
...
@@ -151,12 +151,12 @@ namespace dlib
template
<
typename
const_label_iterator
,
typename
SUB
_
NET
typename
SUBNET
>
double
compute_loss
(
const
tensor
&
input_tensor
,
const_label_iterator
truth
,
SUB
_
NET
&
sub
SUBNET
&
sub
)
const
;
/*!
This function has the same interface as EXAMPLE_LOSS_LAYER_::to_label() except
...
...
@@ -169,8 +169,8 @@ namespace dlib
};
template
<
typename
SUB
_
NET
>
using
loss_binary_hinge
=
add_loss_layer
<
loss_binary_hinge_
,
SUB
_
NET
>
;
template
<
typename
SUBNET
>
using
loss_binary_hinge
=
add_loss_layer
<
loss_binary_hinge_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment