Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
c1da9dc9
Commit
c1da9dc9
authored
Oct 18, 2015
by
Davis King
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fixed some warnings and errors from visual studio 2015
parent
78109ac9
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
9 additions
and
9 deletions
+9
-9
core.h
dlib/dnn/core.h
+2
-2
loss.h
dlib/dnn/loss.h
+2
-2
trainer.h
dlib/dnn/trainer.h
+5
-5
No files found.
dlib/dnn/core.h
View file @
c1da9dc9
...
@@ -876,7 +876,7 @@ namespace dlib
...
@@ -876,7 +876,7 @@ namespace dlib
"The loss layer and input layer must agree on the sample_expansion_factor."
);
"The loss layer and input layer must agree on the sample_expansion_factor."
);
add_loss_layer
()
=
default
;
add_loss_layer
()
{}
;
add_loss_layer
(
const
add_loss_layer
&
)
=
default
;
add_loss_layer
(
const
add_loss_layer
&
)
=
default
;
add_loss_layer
(
add_loss_layer
&&
)
=
default
;
add_loss_layer
(
add_loss_layer
&&
)
=
default
;
add_loss_layer
&
operator
=
(
add_loss_layer
&&
)
=
default
;
add_loss_layer
&
operator
=
(
add_loss_layer
&&
)
=
default
;
...
@@ -1478,7 +1478,7 @@ namespace dlib
...
@@ -1478,7 +1478,7 @@ namespace dlib
// ==================================================================
// ==================================================================
// first validate the way the parameter gradients are computed
// first validate the way the parameter gradients are computed
for
(
long
i
=
0
;
i
<
params_grad
.
size
();
++
i
)
for
(
unsigned
long
i
=
0
;
i
<
params_grad
.
size
();
++
i
)
{
{
layer_details_type
l1
(
l
);
layer_details_type
l1
(
l
);
...
...
dlib/dnn/loss.h
View file @
c1da9dc9
...
@@ -35,7 +35,7 @@ namespace dlib
...
@@ -35,7 +35,7 @@ namespace dlib
DLIB_CASSERT
(
output_tensor
.
num_samples
()
%
sample_expansion_factor
==
0
,
""
);
DLIB_CASSERT
(
output_tensor
.
num_samples
()
%
sample_expansion_factor
==
0
,
""
);
const
float
*
out_data
=
output_tensor
.
host
();
const
float
*
out_data
=
output_tensor
.
host
();
for
(
unsigned
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
{
{
*
iter
++
=
out_data
[
i
];
*
iter
++
=
out_data
[
i
];
}
}
...
@@ -67,7 +67,7 @@ namespace dlib
...
@@ -67,7 +67,7 @@ namespace dlib
double
loss
=
0
;
double
loss
=
0
;
const
float
*
out_data
=
output_tensor
.
host
();
const
float
*
out_data
=
output_tensor
.
host
();
float
*
g
=
grad
.
host
();
float
*
g
=
grad
.
host
();
for
(
unsigned
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
{
{
const
float
y
=
*
truth
++
;
const
float
y
=
*
truth
++
;
DLIB_CASSERT
(
y
==
+
1
||
y
==
-
1
,
"y: "
<<
y
);
DLIB_CASSERT
(
y
==
+
1
||
y
==
-
1
,
"y: "
<<
y
);
...
...
dlib/dnn/trainer.h
View file @
c1da9dc9
...
@@ -123,7 +123,7 @@ namespace dlib
...
@@ -123,7 +123,7 @@ namespace dlib
{
{
running_stats
<
double
>
rs
;
running_stats
<
double
>
rs
;
unsigned
long
j
=
0
;
size_t
j
=
0
;
// Load two tensors worth of data at once so we can overlap the computation
// Load two tensors worth of data at once so we can overlap the computation
// and data transfer between the host and the device.
// and data transfer between the host and the device.
...
@@ -140,7 +140,7 @@ namespace dlib
...
@@ -140,7 +140,7 @@ namespace dlib
j
+=
mini_batch_size
;
j
+=
mini_batch_size
;
}
}
unsigned
long
i
=
0
;
size_t
i
=
0
;
using
namespace
std
::
chrono
;
using
namespace
std
::
chrono
;
auto
last_time
=
system_clock
::
now
();
auto
last_time
=
system_clock
::
now
();
while
(
i
<
data
.
size
())
while
(
i
<
data
.
size
())
...
@@ -211,7 +211,7 @@ namespace dlib
...
@@ -211,7 +211,7 @@ namespace dlib
for
(
unsigned
long
epoch_iteration
=
0
;
epoch_iteration
<
num_epochs
;
++
epoch_iteration
)
for
(
unsigned
long
epoch_iteration
=
0
;
epoch_iteration
<
num_epochs
;
++
epoch_iteration
)
{
{
running_stats
<
double
>
rs
;
running_stats
<
double
>
rs
;
unsigned
long
j
=
0
;
size_t
j
=
0
;
// Load two tensors worth of data at once so we can overlap the computation
// Load two tensors worth of data at once so we can overlap the computation
// and data transfer between the host and the device.
// and data transfer between the host and the device.
...
@@ -228,7 +228,7 @@ namespace dlib
...
@@ -228,7 +228,7 @@ namespace dlib
j
+=
mini_batch_size
;
j
+=
mini_batch_size
;
}
}
unsigned
long
i
=
0
;
size_t
i
=
0
;
using
namespace
std
::
chrono
;
using
namespace
std
::
chrono
;
auto
last_time
=
system_clock
::
now
();
auto
last_time
=
system_clock
::
now
();
while
(
i
<
data
.
size
())
while
(
i
<
data
.
size
())
...
@@ -318,7 +318,7 @@ namespace dlib
...
@@ -318,7 +318,7 @@ namespace dlib
}
}
unsigned
long
num_epochs
;
unsigned
long
num_epochs
;
unsigned
long
mini_batch_size
;
size_t
mini_batch_size
;
bool
verbose
;
bool
verbose
;
net_type
net
;
net_type
net
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment