Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
d4da6c53
Commit
d4da6c53
authored
Nov 26, 2016
by
Dennis Francis
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
adapt to dlib indentation style
parent
af76e826
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
53 additions
and
58 deletions
+53
-58
loss.h
dlib/dnn/loss.h
+52
-52
loss_abstract.h
dlib/dnn/loss_abstract.h
+0
-1
dnn.cpp
dlib/test/dnn.cpp
+1
-5
No files found.
dlib/dnn/loss.h
View file @
d4da6c53
...
...
@@ -1305,70 +1305,70 @@ namespace dlib
typename
SUB_TYPE
,
typename
label_iterator
>
void
to_label
(
const
tensor
&
input_tensor
,
const
SUB_TYPE
&
sub
,
label_iterator
iter
)
const
{
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
void
to_label
(
const
tensor
&
input_tensor
,
const
SUB_TYPE
&
sub
,
label_iterator
iter
)
const
{
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
const
tensor
&
output_tensor
=
sub
.
get_output
();
const
tensor
&
output_tensor
=
sub
.
get_output
();
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
k
()
==
1
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
k
()
==
1
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
const
float
*
out_data
=
output_tensor
.
host
();
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
{
*
iter
++
=
out_data
[
i
];
}
const
float
*
out_data
=
output_tensor
.
host
();
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
{
*
iter
++
=
out_data
[
i
];
}
}
template
<
typename
const_label_iterator
,
typename
SUBNET
>
double
compute_loss_value_and_gradient
(
const
tensor
&
input_tensor
,
const_label_iterator
truth
,
SUBNET
&
sub
)
const
double
compute_loss_value_and_gradient
(
const
tensor
&
input_tensor
,
const_label_iterator
truth
,
SUBNET
&
sub
)
const
{
const
tensor
&
output_tensor
=
sub
.
get_output
();
tensor
&
grad
=
sub
.
get_gradient_input
();
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
!=
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
%
sub
.
sample_expansion_factor
()
==
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
grad
.
num_samples
());
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
k
()
==
1
);
DLIB_CASSERT
(
grad
.
nr
()
==
1
&&
grad
.
nc
()
==
1
&&
grad
.
k
()
==
1
);
// The loss we output is the average loss over the mini-batch.
const
double
scale
=
1
.
0
/
output_tensor
.
num_samples
();
double
loss
=
0
;
float
*
g
=
grad
.
host_write_only
();
const
float
*
out_data
=
output_tensor
.
host
();
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
{
const
tensor
&
output_tensor
=
sub
.
get_output
();
tensor
&
grad
=
sub
.
get_gradient_input
();
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
!=
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
%
sub
.
sample_expansion_factor
()
==
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
grad
.
num_samples
());
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
k
()
==
1
);
DLIB_CASSERT
(
grad
.
nr
()
==
1
&&
grad
.
nc
()
==
1
&&
grad
.
k
()
==
1
);
// The loss we output is the average loss over the mini-batch.
const
double
scale
=
1
.
0
/
output_tensor
.
num_samples
();
double
loss
=
0
;
float
*
g
=
grad
.
host_write_only
();
const
float
*
out_data
=
output_tensor
.
host
();
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
{
const
float
y
=
*
truth
++
;
const
float
temp1
=
y
-
out_data
[
i
];
const
float
temp2
=
scale
*
temp1
;
loss
+=
0
.
5
*
temp2
*
temp1
;
g
[
i
]
=
-
temp2
;
const
float
y
=
*
truth
++
;
const
float
temp1
=
y
-
out_data
[
i
];
const
float
temp2
=
scale
*
temp1
;
loss
+=
0
.
5
*
temp2
*
temp1
;
g
[
i
]
=
-
temp2
;
}
return
loss
;
}
return
loss
;
}
friend
void
serialize
(
const
loss_mean_squared_
&
,
std
::
ostream
&
out
)
{
...
...
@@ -1397,7 +1397,7 @@ namespace dlib
};
template
<
typename
SUBNET
>
using
loss_mean_squared
=
add_loss_layer
<
loss_mean_squared_
,
SUBNET
>
;
using
loss_mean_squared
=
add_loss_layer
<
loss_mean_squared_
,
SUBNET
>
;
// ----------------------------------------------------------------------------------------
...
...
dlib/dnn/loss_abstract.h
View file @
d4da6c53
...
...
@@ -584,7 +584,6 @@ namespace dlib
template
<
typename
SUBNET
>
using
loss_mean_squared
=
add_loss_layer
<
loss_mean_squared_
,
SUBNET
>
;
}
#endif // DLIB_DNn_LOSS_ABSTRACT_H_
...
...
dlib/test/dnn.cpp
View file @
d4da6c53
...
...
@@ -1758,11 +1758,7 @@ namespace
y
[
ii
]
=
(
true_intercept
+
true_slope
*
static_cast
<
float
>
(
val
)
+
distribution
(
generator
));
}
using
net_type
=
loss_mean_squared
<
fc
<
1
,
input
<
matrix
<
double
>>
>
>
;
using
net_type
=
loss_mean_squared
<
fc
<
1
,
input
<
matrix
<
double
>>>>
;
net_type
net
;
layer
<
1
>
(
net
).
layer_details
().
set_bias_learning_rate_multiplier
(
300
);
sgd
defsolver
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment