Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
fa812881
Commit
fa812881
authored
Sep 28, 2015
by
Davis King
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Just removed the _ from sub_net.
parent
31757a21
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
80 additions
and
80 deletions
+80
-80
core.h
dlib/dnn/core.h
+0
-0
core_abstract.h
dlib/dnn/core_abstract.h
+0
-0
layers.h
dlib/dnn/layers.h
+32
-32
layers_abstract.h
dlib/dnn/layers_abstract.h
+30
-30
loss.h
dlib/dnn/loss.h
+8
-8
loss_abstract.h
dlib/dnn/loss_abstract.h
+10
-10
No files found.
dlib/dnn/core.h
View file @
fa812881
This diff is collapsed.
Click to expand it.
dlib/dnn/core_abstract.h
View file @
fa812881
This diff is collapsed.
Click to expand it.
dlib/dnn/layers.h
View file @
fa812881
...
@@ -23,20 +23,20 @@ namespace dlib
...
@@ -23,20 +23,20 @@ namespace dlib
con_
()
con_
()
{}
{}
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
setup
(
const
SUB
_
NET
&
sub
)
void
setup
(
const
SUBNET
&
sub
)
{
{
// TODO
// TODO
}
}
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
forward
(
const
SUB
_
NET
&
sub
,
resizable_tensor
&
output
)
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
)
{
{
// TODO
// TODO
}
}
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
tensor
&
params_grad
)
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
)
{
{
// TODO
// TODO
}
}
...
@@ -49,8 +49,8 @@ namespace dlib
...
@@ -49,8 +49,8 @@ namespace dlib
resizable_tensor
params
;
resizable_tensor
params
;
};
};
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
using
con
=
add_layer
<
con_
,
SUB
_
NET
>
;
using
con
=
add_layer
<
con_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
@@ -71,8 +71,8 @@ namespace dlib
...
@@ -71,8 +71,8 @@ namespace dlib
unsigned
long
get_num_outputs
(
unsigned
long
get_num_outputs
(
)
const
{
return
num_outputs
;
}
)
const
{
return
num_outputs
;
}
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
setup
(
const
SUB
_
NET
&
sub
)
void
setup
(
const
SUBNET
&
sub
)
{
{
num_inputs
=
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
();
num_inputs
=
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
();
params
.
set_size
(
num_inputs
,
num_outputs
);
params
.
set_size
(
num_inputs
,
num_outputs
);
...
@@ -82,16 +82,16 @@ namespace dlib
...
@@ -82,16 +82,16 @@ namespace dlib
randomize_parameters
(
params
,
num_inputs
+
num_outputs
,
rnd
);
randomize_parameters
(
params
,
num_inputs
+
num_outputs
,
rnd
);
}
}
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
forward
(
const
SUB
_
NET
&
sub
,
resizable_tensor
&
output
)
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
)
{
{
output
.
set_size
(
sub
.
get_output
().
num_samples
(),
num_outputs
);
output
.
set_size
(
sub
.
get_output
().
num_samples
(),
num_outputs
);
output
=
mat
(
sub
.
get_output
())
*
mat
(
params
);
output
=
mat
(
sub
.
get_output
())
*
mat
(
params
);
}
}
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
tensor
&
params_grad
)
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
)
{
{
// d1*W*p1 + d2*W*p2
// d1*W*p1 + d2*W*p2
// total gradient = [d1*W; d2*W; d3*W; ...] == D*W
// total gradient = [d1*W; d2*W; d3*W; ...] == D*W
...
@@ -116,8 +116,8 @@ namespace dlib
...
@@ -116,8 +116,8 @@ namespace dlib
};
};
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
using
fc
=
add_layer
<
fc_
,
SUB
_
NET
>
;
using
fc
=
add_layer
<
fc_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
@@ -128,20 +128,20 @@ namespace dlib
...
@@ -128,20 +128,20 @@ namespace dlib
{
{
}
}
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
setup
(
const
SUB
_
NET
&
sub
)
void
setup
(
const
SUBNET
&
sub
)
{
{
}
}
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
forward
(
const
SUB
_
NET
&
sub
,
resizable_tensor
&
output
)
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
)
{
{
output
.
copy_size
(
sub
.
get_output
());
output
.
copy_size
(
sub
.
get_output
());
output
=
lowerbound
(
mat
(
sub
.
get_output
()),
0
);
output
=
lowerbound
(
mat
(
sub
.
get_output
()),
0
);
}
}
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
tensor
&
params_grad
)
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
)
{
{
const
float
*
grad
=
gradient_input
.
host
();
const
float
*
grad
=
gradient_input
.
host
();
const
float
*
in
=
sub
.
get_output
().
host
();
const
float
*
in
=
sub
.
get_output
().
host
();
...
@@ -163,8 +163,8 @@ namespace dlib
...
@@ -163,8 +163,8 @@ namespace dlib
};
};
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
using
relu
=
add_layer
<
relu_
,
SUB
_
NET
>
;
using
relu
=
add_layer
<
relu_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
@@ -176,8 +176,8 @@ namespace dlib
...
@@ -176,8 +176,8 @@ namespace dlib
}
}
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
setup
(
const
SUB
_
NET
&
sub
)
void
setup
(
const
SUBNET
&
sub
)
{
{
num_inputs
=
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
();
num_inputs
=
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
();
params
.
set_size
(
1
,
num_inputs
);
params
.
set_size
(
1
,
num_inputs
);
...
@@ -189,8 +189,8 @@ namespace dlib
...
@@ -189,8 +189,8 @@ namespace dlib
randomize_parameters
(
params
,
num_inputs
+
num_outputs
,
rnd
);
randomize_parameters
(
params
,
num_inputs
+
num_outputs
,
rnd
);
}
}
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
forward
(
const
SUB
_
NET
&
sub
,
resizable_tensor
&
output
)
void
forward
(
const
SUBNET
&
sub
,
resizable_tensor
&
output
)
{
{
DLIB_CASSERT
(
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
()
==
params
.
size
(),
""
);
DLIB_CASSERT
(
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
()
==
params
.
size
(),
""
);
DLIB_CASSERT
(
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
()
==
num_inputs
,
""
);
DLIB_CASSERT
(
sub
.
get_output
().
nr
()
*
sub
.
get_output
().
nc
()
*
sub
.
get_output
().
k
()
==
num_inputs
,
""
);
...
@@ -208,8 +208,8 @@ namespace dlib
...
@@ -208,8 +208,8 @@ namespace dlib
}
}
}
}
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
tensor
&
params_grad
)
void
backward
(
const
tensor
&
gradient_input
,
SUBNET
&
sub
,
tensor
&
params_grad
)
{
{
params_grad
+=
sum_rows
(
pointwise_multiply
(
mat
(
sub
.
get_output
()),
mat
(
gradient_input
)));
params_grad
+=
sum_rows
(
pointwise_multiply
(
mat
(
sub
.
get_output
()),
mat
(
gradient_input
)));
...
@@ -230,8 +230,8 @@ namespace dlib
...
@@ -230,8 +230,8 @@ namespace dlib
dlib
::
rand
rnd
;
dlib
::
rand
rnd
;
};
};
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
using
multiply
=
add_layer
<
multiply_
,
SUB
_
NET
>
;
using
multiply
=
add_layer
<
multiply_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
...
dlib/dnn/layers_abstract.h
View file @
fa812881
...
@@ -12,7 +12,7 @@ namespace dlib
...
@@ -12,7 +12,7 @@ namespace dlib
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
class
SUB
_
NET
class
SUBNET
{
{
/*!
/*!
WHAT THIS OBJECT REPRESENTS
WHAT THIS OBJECT REPRESENTS
...
@@ -35,8 +35,8 @@ namespace dlib
...
@@ -35,8 +35,8 @@ namespace dlib
public
:
public
:
// You aren't allowed to copy subnetworks from inside a layer.
// You aren't allowed to copy subnetworks from inside a layer.
SUB
_NET
(
const
SUB_
NET
&
)
=
delete
;
SUB
NET
(
const
SUB
NET
&
)
=
delete
;
SUB
_NET
&
operator
=
(
const
SUB_
NET
&
)
=
delete
;
SUB
NET
&
operator
=
(
const
SUB
NET
&
)
=
delete
;
const
tensor
&
get_output
(
const
tensor
&
get_output
(
)
const
;
)
const
;
...
@@ -61,21 +61,21 @@ namespace dlib
...
@@ -61,21 +61,21 @@ namespace dlib
get_gradient_input().
get_gradient_input().
!*/
!*/
const
NEXT_SUB
_NET
&
sub_
net
(
const
NEXT_SUB
NET
&
sub
net
(
)
const
;
)
const
;
/*!
/*!
ensures
ensures
- returns the subnetwork of *this network. With respect to the diagram
- returns the subnetwork of *this network. With respect to the diagram
above, if *this was layer1 then sub
_
net() would return the network that
above, if *this was layer1 then subnet() would return the network that
begins with layer2.
begins with layer2.
!*/
!*/
NEXT_SUB
_NET
&
sub_
net
(
NEXT_SUB
NET
&
sub
net
(
);
);
/*!
/*!
ensures
ensures
- returns the subnetwork of *this network. With respect to the diagram
- returns the subnetwork of *this network. With respect to the diagram
above, if *this was layer1 then sub
_
net() would return the network that
above, if *this was layer1 then subnet() would return the network that
begins with layer2.
begins with layer2.
!*/
!*/
};
};
...
@@ -126,45 +126,45 @@ namespace dlib
...
@@ -126,45 +126,45 @@ namespace dlib
allows you to easily convert between related deep neural network types.
allows you to easily convert between related deep neural network types.
!*/
!*/
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
setup
(
void
setup
(
const
SUB
_
NET
&
sub
const
SUBNET
&
sub
);
);
/*!
/*!
requires
requires
- SUB
_NET implements the SUB_
NET interface defined at the top of this file.
- SUB
NET implements the SUB
NET interface defined at the top of this file.
ensures
ensures
- performs any necessary initial memory allocations and/or sets parameters
- performs any necessary initial memory allocations and/or sets parameters
to their initial values prior to learning. Therefore, calling setup
to their initial values prior to learning. Therefore, calling setup
destroys any previously learned parameters.
destroys any previously learned parameters.
!*/
!*/
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
forward
(
void
forward
(
const
SUB
_
NET
&
sub
,
const
SUBNET
&
sub
,
resizable_tensor
&
output
resizable_tensor
&
output
);
);
/*!
/*!
requires
requires
- SUB
_NET implements the SUB_
NET interface defined at the top of this file.
- SUB
NET implements the SUB
NET interface defined at the top of this file.
- setup() has been called.
- setup() has been called.
ensures
ensures
- Runs the output of the subnetwork through this layer and stores the
- Runs the output of the subnetwork through this layer and stores the
output into #output. In particular, forward() can use any of the outputs
output into #output. In particular, forward() can use any of the outputs
in sub (e.g. sub.get_output(), sub.sub
_
net().get_output(), etc.) to
in sub (e.g. sub.get_output(), sub.subnet().get_output(), etc.) to
compute whatever it wants.
compute whatever it wants.
- #output.num_samples() == sub.get_output().num_samples()
- #output.num_samples() == sub.get_output().num_samples()
!*/
!*/
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
void
backward
(
void
backward
(
const
tensor
&
gradient_input
,
const
tensor
&
gradient_input
,
SUB
_
NET
&
sub
,
SUBNET
&
sub
,
tensor
&
params_grad
tensor
&
params_grad
);
);
/*!
/*!
requires
requires
- SUB
_NET implements the SUB_
NET interface defined at the top of this file.
- SUB
NET implements the SUB
NET interface defined at the top of this file.
- setup() has been called.
- setup() has been called.
- gradient_input has the same dimensions as the output of forward(sub,output).
- gradient_input has the same dimensions as the output of forward(sub,output).
- have_same_dimensions(sub.get_gradient_input(), sub.get_output()) == true
- have_same_dimensions(sub.get_gradient_input(), sub.get_output()) == true
...
@@ -183,7 +183,7 @@ namespace dlib
...
@@ -183,7 +183,7 @@ namespace dlib
- for all valid I:
- for all valid I:
- DATA_GRADIENT_I == gradient of f(sub,get_layer_params()) with
- DATA_GRADIENT_I == gradient of f(sub,get_layer_params()) with
respect to layer<I>(sub).get_output() (recall that forward() can
respect to layer<I>(sub).get_output() (recall that forward() can
draw inputs from the immediate sub layer, sub.sub
_
net(), or
draw inputs from the immediate sub layer, sub.subnet(), or
any earlier layer. So you must consider the gradients with
any earlier layer. So you must consider the gradients with
respect to all inputs drawn from sub)
respect to all inputs drawn from sub)
Finally, backward() adds these gradients into the output by performing:
Finally, backward() adds these gradients into the output by performing:
...
@@ -211,8 +211,8 @@ namespace dlib
...
@@ -211,8 +211,8 @@ namespace dlib
// For each layer you define, always define an add_layer template so that layers can be
// For each layer you define, always define an add_layer template so that layers can be
// easily composed. Moreover, the convention is that the layer class ends with an _
// easily composed. Moreover, the convention is that the layer class ends with an _
// while the add_layer template has the same name but without the trailing _.
// while the add_layer template has the same name but without the trailing _.
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
using
EXAMPLE_LAYER
=
add_layer
<
EXAMPLE_LAYER_
,
SUB
_
NET
>
;
using
EXAMPLE_LAYER
=
add_layer
<
EXAMPLE_LAYER_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
@@ -254,9 +254,9 @@ namespace dlib
...
@@ -254,9 +254,9 @@ namespace dlib
- The rest of the dimensions of T will be 1.
- The rest of the dimensions of T will be 1.
!*/
!*/
template
<
typename
SUB
_NET
>
void
setup
(
const
SUB_
NET
&
sub
);
template
<
typename
SUB
NET
>
void
setup
(
const
SUB
NET
&
sub
);
template
<
typename
SUB
_NET
>
void
forward
(
const
SUB_
NET
&
sub
,
resizable_tensor
&
output
);
template
<
typename
SUB
NET
>
void
forward
(
const
SUB
NET
&
sub
,
resizable_tensor
&
output
);
template
<
typename
SUB
_NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB_
NET
&
sub
,
tensor
&
params_grad
);
template
<
typename
SUB
NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
NET
&
sub
,
tensor
&
params_grad
);
const
tensor
&
get_layer_params
()
const
;
const
tensor
&
get_layer_params
()
const
;
tensor
&
get_layer_params
();
tensor
&
get_layer_params
();
/*!
/*!
...
@@ -265,8 +265,8 @@ namespace dlib
...
@@ -265,8 +265,8 @@ namespace dlib
};
};
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
using
fc
=
add_layer
<
fc_
,
SUB
_
NET
>
;
using
fc
=
add_layer
<
fc_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
@@ -277,9 +277,9 @@ namespace dlib
...
@@ -277,9 +277,9 @@ namespace dlib
relu_
(
relu_
(
);
);
template
<
typename
SUB
_NET
>
void
setup
(
const
SUB_
NET
&
sub
);
template
<
typename
SUB
NET
>
void
setup
(
const
SUB
NET
&
sub
);
template
<
typename
SUB
_NET
>
void
forward
(
const
SUB_
NET
&
sub
,
resizable_tensor
&
output
);
template
<
typename
SUB
NET
>
void
forward
(
const
SUB
NET
&
sub
,
resizable_tensor
&
output
);
template
<
typename
SUB
_NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB_
NET
&
sub
,
tensor
&
params_grad
);
template
<
typename
SUB
NET
>
void
backward
(
const
tensor
&
gradient_input
,
SUB
NET
&
sub
,
tensor
&
params_grad
);
const
tensor
&
get_layer_params
()
const
;
const
tensor
&
get_layer_params
()
const
;
tensor
&
get_layer_params
();
tensor
&
get_layer_params
();
/*!
/*!
...
@@ -288,8 +288,8 @@ namespace dlib
...
@@ -288,8 +288,8 @@ namespace dlib
};
};
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
using
relu
=
add_layer
<
relu_
,
SUB
_
NET
>
;
using
relu
=
add_layer
<
relu_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
...
dlib/dnn/loss.h
View file @
fa812881
...
@@ -43,12 +43,12 @@ namespace dlib
...
@@ -43,12 +43,12 @@ namespace dlib
template
<
template
<
typename
const_label_iterator
,
typename
const_label_iterator
,
typename
SUB
_
NET
typename
SUBNET
>
>
double
compute_loss
(
double
compute_loss
(
const
tensor
&
input_tensor
,
const
tensor
&
input_tensor
,
const_label_iterator
truth
,
const_label_iterator
truth
,
SUB
_
NET
&
sub
SUBNET
&
sub
)
const
)
const
{
{
const
tensor
&
output_tensor
=
sub
.
get_output
();
const
tensor
&
output_tensor
=
sub
.
get_output
();
...
@@ -83,8 +83,8 @@ namespace dlib
...
@@ -83,8 +83,8 @@ namespace dlib
};
};
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
using
loss_binary_hinge
=
add_loss_layer
<
loss_binary_hinge_
,
SUB
_
NET
>
;
using
loss_binary_hinge
=
add_loss_layer
<
loss_binary_hinge_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
@@ -95,11 +95,11 @@ namespace dlib
...
@@ -95,11 +95,11 @@ namespace dlib
const
static
unsigned
int
sample_expansion_factor
=
1
;
const
static
unsigned
int
sample_expansion_factor
=
1
;
template
<
template
<
typename
SUB
_
NET
typename
SUBNET
>
>
double
compute_loss
(
double
compute_loss
(
const
tensor
&
input_tensor
,
const
tensor
&
input_tensor
,
SUB
_
NET
&
sub
SUBNET
&
sub
)
const
)
const
{
{
return
0
;
return
0
;
...
@@ -107,8 +107,8 @@ namespace dlib
...
@@ -107,8 +107,8 @@ namespace dlib
};
};
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
using
loss_no_label
=
add_loss_layer
<
loss_no_label_
,
SUB
_
NET
>
;
using
loss_no_label
=
add_loss_layer
<
loss_no_label_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
...
dlib/dnn/loss_abstract.h
View file @
fa812881
...
@@ -54,7 +54,7 @@ namespace dlib
...
@@ -54,7 +54,7 @@ namespace dlib
)
const
;
)
const
;
/*!
/*!
requires
requires
- SUB
_NET implements the SUB_
NET interface defined at the top of
- SUB
NET implements the SUB
NET interface defined at the top of
layers_abstract.h.
layers_abstract.h.
- sub.get_output().num_samples()%sample_expansion_factor == 0
- sub.get_output().num_samples()%sample_expansion_factor == 0
- All outputs in each layer of sub have the same number of samples. That
- All outputs in each layer of sub have the same number of samples. That
...
@@ -73,16 +73,16 @@ namespace dlib
...
@@ -73,16 +73,16 @@ namespace dlib
template
<
template
<
typename
const_label_iterator
,
typename
const_label_iterator
,
typename
SUB
_
NET
typename
SUBNET
>
>
double
compute_loss
(
double
compute_loss
(
const
tensor
&
input_tensor
,
const
tensor
&
input_tensor
,
const_label_iterator
truth
,
const_label_iterator
truth
,
SUB
_
NET
&
sub
SUBNET
&
sub
)
const
;
)
const
;
/*!
/*!
requires
requires
- SUB
_NET implements the SUB_
NET interface defined at the top of
- SUB
NET implements the SUB
NET interface defined at the top of
layers_abstract.h.
layers_abstract.h.
- input_tensor was given as input to the network sub and the outputs are
- input_tensor was given as input to the network sub and the outputs are
now visible in layer<i>(sub).get_output(), for all valid i.
now visible in layer<i>(sub).get_output(), for all valid i.
...
@@ -114,8 +114,8 @@ namespace dlib
...
@@ -114,8 +114,8 @@ namespace dlib
// layers can be easily composed. Moreover, the convention is that the layer class
// layers can be easily composed. Moreover, the convention is that the layer class
// ends with an _ while the add_loss_layer template has the same name but without the
// ends with an _ while the add_loss_layer template has the same name but without the
// trailing _.
// trailing _.
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
using
EXAMPLE_LOSS_LAYER
=
add_loss_layer
<
EXAMPLE_LOSS_LAYER_
,
SUB
_
NET
>
;
using
EXAMPLE_LOSS_LAYER
=
add_loss_layer
<
EXAMPLE_LOSS_LAYER_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
@@ -151,12 +151,12 @@ namespace dlib
...
@@ -151,12 +151,12 @@ namespace dlib
template
<
template
<
typename
const_label_iterator
,
typename
const_label_iterator
,
typename
SUB
_
NET
typename
SUBNET
>
>
double
compute_loss
(
double
compute_loss
(
const
tensor
&
input_tensor
,
const
tensor
&
input_tensor
,
const_label_iterator
truth
,
const_label_iterator
truth
,
SUB
_
NET
&
sub
SUBNET
&
sub
)
const
;
)
const
;
/*!
/*!
This function has the same interface as EXAMPLE_LOSS_LAYER_::to_label() except
This function has the same interface as EXAMPLE_LOSS_LAYER_::to_label() except
...
@@ -169,8 +169,8 @@ namespace dlib
...
@@ -169,8 +169,8 @@ namespace dlib
};
};
template
<
typename
SUB
_
NET
>
template
<
typename
SUBNET
>
using
loss_binary_hinge
=
add_loss_layer
<
loss_binary_hinge_
,
SUB
_
NET
>
;
using
loss_binary_hinge
=
add_loss_layer
<
loss_binary_hinge_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment