Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
9eed5974
Commit
9eed5974
authored
Aug 28, 2016
by
Davis King
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Cleaned up assert statements a bit.
parent
bb60d061
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
151 additions
and
151 deletions
+151
-151
core.h
dlib/dnn/core.h
+5
-5
cpu_dlib.cpp
dlib/dnn/cpu_dlib.cpp
+0
-0
cublas_dlibapi.cpp
dlib/dnn/cublas_dlibapi.cpp
+4
-4
cuda_dlib.cu
dlib/dnn/cuda_dlib.cu
+29
-29
cudnn_dlibapi.cpp
dlib/dnn/cudnn_dlibapi.cpp
+42
-42
gpu_data.cpp
dlib/dnn/gpu_data.cpp
+3
-3
gpu_data.h
dlib/dnn/gpu_data.h
+3
-3
input.h
dlib/dnn/input.h
+4
-4
layers.h
dlib/dnn/layers.h
+1
-1
loss.h
dlib/dnn/loss.h
+29
-29
solvers.h
dlib/dnn/solvers.h
+4
-4
tensor.h
dlib/dnn/tensor.h
+13
-13
tensor_tools.cpp
dlib/dnn/tensor_tools.cpp
+3
-3
trainer.h
dlib/dnn/trainer.h
+11
-11
No files found.
dlib/dnn/core.h
View file @
9eed5974
...
@@ -861,7 +861,7 @@ namespace dlib
...
@@ -861,7 +861,7 @@ namespace dlib
template
<
typename
solver_type
>
template
<
typename
solver_type
>
void
update_parameters
(
sstack
<
solver_type
>
solvers
,
double
learning_rate
)
void
update_parameters
(
sstack
<
solver_type
>
solvers
,
double
learning_rate
)
{
{
DLIB_CASSERT
(
solvers
.
size
()
>=
num_computational_layers
,
""
);
DLIB_CASSERT
(
solvers
.
size
()
>=
num_computational_layers
);
// Don't try to adjust the parameters if this layer doesn't have any or the
// Don't try to adjust the parameters if this layer doesn't have any or the
// learning rate is disabled for this layer.
// learning rate is disabled for this layer.
if
(
params_grad
.
size
()
!=
0
&&
get_learning_rate_multiplier
(
details
)
!=
0
)
if
(
params_grad
.
size
()
!=
0
&&
get_learning_rate_multiplier
(
details
)
!=
0
)
...
@@ -1158,7 +1158,7 @@ namespace dlib
...
@@ -1158,7 +1158,7 @@ namespace dlib
const
tensor
&
forward
(
const
tensor
&
x
)
const
tensor
&
forward
(
const
tensor
&
x
)
{
{
DLIB_CASSERT
(
sample_expansion_factor
()
!=
0
,
"You must call to_tensor() before this function can be used."
);
DLIB_CASSERT
(
sample_expansion_factor
()
!=
0
,
"You must call to_tensor() before this function can be used."
);
DLIB_CASSERT
(
x
.
num_samples
()
%
sample_expansion_factor
()
==
0
,
""
);
DLIB_CASSERT
(
x
.
num_samples
()
%
sample_expansion_factor
()
==
0
);
subnet_wrapper
wsub
(
x
,
grad_final
,
_sample_expansion_factor
);
subnet_wrapper
wsub
(
x
,
grad_final
,
_sample_expansion_factor
);
if
(
!
this_layer_setup_called
)
if
(
!
this_layer_setup_called
)
{
{
...
@@ -1224,7 +1224,7 @@ namespace dlib
...
@@ -1224,7 +1224,7 @@ namespace dlib
template
<
typename
solver_type
>
template
<
typename
solver_type
>
void
update_parameters
(
sstack
<
solver_type
>
solvers
,
double
learning_rate
)
void
update_parameters
(
sstack
<
solver_type
>
solvers
,
double
learning_rate
)
{
{
DLIB_CASSERT
(
solvers
.
size
()
>=
num_computational_layers
,
""
);
DLIB_CASSERT
(
solvers
.
size
()
>=
num_computational_layers
);
// Don't try to adjust the parameters if this layer doesn't have any or the
// Don't try to adjust the parameters if this layer doesn't have any or the
// learning rate is disabled for this layer.
// learning rate is disabled for this layer.
if
(
params_grad
.
size
()
!=
0
&&
get_learning_rate_multiplier
(
details
)
!=
0
)
if
(
params_grad
.
size
()
!=
0
&&
get_learning_rate_multiplier
(
details
)
!=
0
)
...
@@ -1615,7 +1615,7 @@ namespace dlib
...
@@ -1615,7 +1615,7 @@ namespace dlib
size_t
i
size_t
i
)
const
)
const
{
{
DLIB_CASSERT
(
i
<
num_repetitions
()
,
""
);
DLIB_CASSERT
(
i
<
num_repetitions
());
return
details
[
i
];
return
details
[
i
];
}
}
...
@@ -1623,7 +1623,7 @@ namespace dlib
...
@@ -1623,7 +1623,7 @@ namespace dlib
size_t
i
size_t
i
)
)
{
{
DLIB_CASSERT
(
i
<
num_repetitions
()
,
""
);
DLIB_CASSERT
(
i
<
num_repetitions
());
return
details
[
i
];
return
details
[
i
];
}
}
...
...
dlib/dnn/cpu_dlib.cpp
View file @
9eed5974
This diff is collapsed.
Click to expand it.
dlib/dnn/cublas_dlibapi.cpp
View file @
9eed5974
...
@@ -119,25 +119,25 @@ namespace dlib
...
@@ -119,25 +119,25 @@ namespace dlib
{
{
DLIB_ASSERT
(
dest_nr
==
lhs_nc
&&
DLIB_ASSERT
(
dest_nr
==
lhs_nc
&&
dest_nc
==
rhs_nr
&&
dest_nc
==
rhs_nr
&&
lhs_nr
==
rhs_nc
,
""
)
lhs_nr
==
rhs_nc
)
}
}
else
if
(
!
trans_lhs
&&
trans_rhs
)
else
if
(
!
trans_lhs
&&
trans_rhs
)
{
{
DLIB_ASSERT
(
dest_nr
==
lhs_nr
&&
DLIB_ASSERT
(
dest_nr
==
lhs_nr
&&
dest_nc
==
rhs_nr
&&
dest_nc
==
rhs_nr
&&
lhs_nc
==
rhs_nc
,
""
)
lhs_nc
==
rhs_nc
)
}
}
else
if
(
trans_lhs
&&
!
trans_rhs
)
else
if
(
trans_lhs
&&
!
trans_rhs
)
{
{
DLIB_ASSERT
(
dest_nr
==
lhs_nc
&&
DLIB_ASSERT
(
dest_nr
==
lhs_nc
&&
dest_nc
==
rhs_nc
&&
dest_nc
==
rhs_nc
&&
lhs_nr
==
rhs_nr
,
""
)
lhs_nr
==
rhs_nr
)
}
}
else
else
{
{
DLIB_ASSERT
(
dest_nr
==
lhs_nr
&&
DLIB_ASSERT
(
dest_nr
==
lhs_nr
&&
dest_nc
==
rhs_nc
&&
dest_nc
==
rhs_nc
&&
lhs_nc
==
rhs_nr
,
""
)
lhs_nc
==
rhs_nr
)
}
}
const
int
k
=
trans_rhs
?
rhs_nc
:
rhs_nr
;
const
int
k
=
trans_rhs
?
rhs_nc
:
rhs_nr
;
...
...
dlib/dnn/cuda_dlib.cu
View file @
9eed5974
...
@@ -173,11 +173,11 @@ namespace dlib
...
@@ -173,11 +173,11 @@ namespace dlib
DLIB_CASSERT(dest.k() == src1.k() && src1.k() == src2.k() &&
DLIB_CASSERT(dest.k() == src1.k() && src1.k() == src2.k() &&
dest.nr() == src1.nr() && src1.nr() == src2.nr() &&
dest.nr() == src1.nr() && src1.nr() == src2.nr() &&
dest.nc() == src1.nc() && src1.nc() == src2.nc()
,""
);
dest.nc() == src1.nc() && src1.nc() == src2.nc() );
const long MD = std::max(std::max(dest.num_samples(),src1.num_samples()),src2.num_samples());
const long MD = std::max(std::max(dest.num_samples(),src1.num_samples()),src2.num_samples());
DLIB_CASSERT((dest.num_samples()==1 || dest.num_samples()==MD) &&
DLIB_CASSERT((dest.num_samples()==1 || dest.num_samples()==MD) &&
(src1.num_samples()==1 || src1.num_samples()==MD) &&
(src1.num_samples()==1 || src1.num_samples()==MD) &&
(src2.num_samples()==1 || src2.num_samples()==MD)
,""
);
(src2.num_samples()==1 || src2.num_samples()==MD) );
if (dest.size() == 0)
if (dest.size() == 0)
return;
return;
...
@@ -278,7 +278,7 @@ namespace dlib
...
@@ -278,7 +278,7 @@ namespace dlib
{
{
if (have_same_dimensions(dest,src1))
if (have_same_dimensions(dest,src1))
{
{
DLIB_CASSERT(src2.num_samples() == 1 && src2.nr() == 1 && src2.nc() == 1 && src2.k() == src1.k()
,""
);
DLIB_CASSERT(src2.num_samples() == 1 && src2.nr() == 1 && src2.nc() == 1 && src2.k() == src1.k());
if (dest.size() == 0)
if (dest.size() == 0)
return;
return;
...
@@ -291,8 +291,8 @@ namespace dlib
...
@@ -291,8 +291,8 @@ namespace dlib
}
}
else
else
{
{
DLIB_CASSERT(have_same_dimensions(src1,src2)
,""
);
DLIB_CASSERT(have_same_dimensions(src1,src2));
DLIB_CASSERT(dest.num_samples() == 1 && dest.nr() == 1 && dest.nc() == 1 && dest.k() == src1.k()
,""
);
DLIB_CASSERT(dest.num_samples() == 1 && dest.nr() == 1 && dest.nc() == 1 && dest.k() == src1.k());
if (dest.size() == 0)
if (dest.size() == 0)
return;
return;
...
@@ -404,7 +404,7 @@ namespace dlib
...
@@ -404,7 +404,7 @@ namespace dlib
const float B
const float B
)
)
{
{
DLIB_CASSERT(dest.size()==src.size()
,""
);
DLIB_CASSERT(dest.size()==src.size());
if (B != 0)
if (B != 0)
launch_kernel(_cuda_affine_transform1,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A, B);
launch_kernel(_cuda_affine_transform1,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A, B);
else
else
...
@@ -417,7 +417,7 @@ namespace dlib
...
@@ -417,7 +417,7 @@ namespace dlib
const float A
const float A
)
)
{
{
DLIB_CASSERT(dest.size()==src.size()
,""
);
DLIB_CASSERT(dest.size()==src.size());
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
}
...
@@ -448,8 +448,8 @@ namespace dlib
...
@@ -448,8 +448,8 @@ namespace dlib
const float C
const float C
)
)
{
{
DLIB_CASSERT(dest.size()==src1.size()
,""
);
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size()
,""
);
DLIB_CASSERT(dest.size()==src2.size());
if (C != 0)
if (C != 0)
launch_kernel(_cuda_affine_transform4,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B, C);
launch_kernel(_cuda_affine_transform4,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B, C);
else
else
...
@@ -464,8 +464,8 @@ namespace dlib
...
@@ -464,8 +464,8 @@ namespace dlib
const float B
const float B
)
)
{
{
DLIB_CASSERT(dest.size()==src1.size()
,""
);
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size()
,""
);
DLIB_CASSERT(dest.size()==src2.size());
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
}
...
@@ -485,7 +485,7 @@ namespace dlib
...
@@ -485,7 +485,7 @@ namespace dlib
const tensor& src
const tensor& src
)
)
{
{
DLIB_CASSERT(dest.size()==src.size()
,""
);
DLIB_CASSERT(dest.size()==src.size());
launch_kernel(_cuda_add_scaled,max_jobs(dest.size()),dest.device(), src.device(), dest.size(), scale);
launch_kernel(_cuda_add_scaled,max_jobs(dest.size()),dest.device(), src.device(), dest.size(), scale);
}
}
...
@@ -512,9 +512,9 @@ namespace dlib
...
@@ -512,9 +512,9 @@ namespace dlib
const float D
const float D
)
)
{
{
DLIB_CASSERT(dest.size()==src1.size()
,""
);
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size()
,""
);
DLIB_CASSERT(dest.size()==src2.size());
DLIB_CASSERT(dest.size()==src3.size()
,""
);
DLIB_CASSERT(dest.size()==src3.size());
launch_kernel(_cuda_affine_transform5,max_jobs(dest.size()),dest.device(), src1.device(),
launch_kernel(_cuda_affine_transform5,max_jobs(dest.size()),dest.device(), src1.device(),
src2.device(), src3.device(), dest.size(), A, B, C, D);
src2.device(), src3.device(), dest.size(), A, B, C, D);
}
}
...
@@ -544,10 +544,10 @@ namespace dlib
...
@@ -544,10 +544,10 @@ namespace dlib
const float C
const float C
)
)
{
{
DLIB_CASSERT(dest.size()==src1.size()
,""
);
DLIB_CASSERT(dest.size()==src1.size());
DLIB_CASSERT(dest.size()==src2.size()
,""
);
DLIB_CASSERT(dest.size()==src2.size());
DLIB_CASSERT(dest.size()==src3.size()
,""
);
DLIB_CASSERT(dest.size()==src3.size());
DLIB_CASSERT(begin <= end && end <= dest.size()
,""
);
DLIB_CASSERT(begin <= end && end <= dest.size());
launch_kernel(_cuda_affine_transform_range,max_jobs(end-begin),
launch_kernel(_cuda_affine_transform_range,max_jobs(end-begin),
dest.device(), src1.device(),
dest.device(), src1.device(),
src2.device(), src3.device(), begin, end, A, B, C);
src2.device(), src3.device(), begin, end, A, B, C);
...
@@ -577,10 +577,10 @@ namespace dlib
...
@@ -577,10 +577,10 @@ namespace dlib
const tensor& B
const tensor& B
)
)
{
{
DLIB_CASSERT(have_same_dimensions(dest, src)
,""
);
DLIB_CASSERT(have_same_dimensions(dest, src));
DLIB_CASSERT(
DLIB_CASSERT(
((A.num_samples()==1 && B.num_samples()==1) ||
((A.num_samples()==1 && B.num_samples()==1) ||
(A.num_samples()==src.num_samples() && B.num_samples()==src.num_samples()))
,""
);
(A.num_samples()==src.num_samples() && B.num_samples()==src.num_samples())));
DLIB_CASSERT(
DLIB_CASSERT(
A.nr()==B.nr() && B.nr()==src.nr() &&
A.nr()==B.nr() && B.nr()==src.nr() &&
A.nc()==B.nc() && B.nc()==src.nc() &&
A.nc()==B.nc() && B.nc()==src.nc() &&
...
@@ -648,8 +648,8 @@ namespace dlib
...
@@ -648,8 +648,8 @@ namespace dlib
DLIB_CASSERT(s.size() == m.size() &&
DLIB_CASSERT(s.size() == m.size() &&
s.size() == v.size() &&
s.size() == v.size() &&
s.size() == params.size() &&
s.size() == params.size() &&
s.size() == params_grad.size()
,""
);
s.size() == params_grad.size());
DLIB_CASSERT(begin <= end && end <= params.size()
,""
);
DLIB_CASSERT(begin <= end && end <= params.size());
const float alpha = learning_rate*std::sqrt(1-std::pow(momentum2,t))/(1-std::pow(momentum1, t));
const float alpha = learning_rate*std::sqrt(1-std::pow(momentum2,t))/(1-std::pow(momentum1, t));
launch_kernel(_cuda_compute_adam_update,max_jobs(end-begin),
launch_kernel(_cuda_compute_adam_update,max_jobs(end-begin),
...
@@ -675,9 +675,9 @@ namespace dlib
...
@@ -675,9 +675,9 @@ namespace dlib
const tensor& B
const tensor& B
)
)
{
{
DLIB_CASSERT(have_same_dimensions(dest, src)
,""
);
DLIB_CASSERT(have_same_dimensions(dest, src));
DLIB_CASSERT(have_same_dimensions(A, B)
,""
);
DLIB_CASSERT(have_same_dimensions(A, B));
DLIB_CASSERT(A.num_samples() == 1 && A.nr() == 1 && A.nc() == 1 && A.k() == src.k()
,""
);
DLIB_CASSERT(A.num_samples() == 1 && A.nr() == 1 && A.nc() == 1 && A.k() == src.k());
launch_kernel(_cuda_affine_transform_conv,max_jobs(dest.size()),
launch_kernel(_cuda_affine_transform_conv,max_jobs(dest.size()),
dest.device(), src.device(), src.size(), A.device(), B.device(), src.nr()*src.nc(), src.k());
dest.device(), src.device(), src.size(), A.device(), B.device(), src.nr()*src.nc(), src.k());
...
@@ -705,7 +705,7 @@ namespace dlib
...
@@ -705,7 +705,7 @@ namespace dlib
gradient_input.k() == grad.k() &&
gradient_input.k() == grad.k() &&
gradient_input.nr() == grad.nr() &&
gradient_input.nr() == grad.nr() &&
gradient_input.nc() == grad.nc() &&
gradient_input.nc() == grad.nc() &&
gradient_input.size() > 0
,""
);
gradient_input.size() > 0);
launch_kernel(_add_bias_gradient,max_jobs(grad.size()),grad.device(), gradient_input.device(), grad.size(), gradient_input.size());
launch_kernel(_add_bias_gradient,max_jobs(grad.size()),grad.device(), gradient_input.device(), grad.size(), gradient_input.size());
}
}
...
@@ -750,8 +750,8 @@ namespace dlib
...
@@ -750,8 +750,8 @@ namespace dlib
size_t idx
size_t idx
)
)
{
{
DLIB_CASSERT(a.size() == b.size()
, ""
);
DLIB_CASSERT(a.size() == b.size());
DLIB_CASSERT(idx < result.size()
, ""
);
DLIB_CASSERT(idx < result.size());
launch_kernel(_cuda_dot, max_jobs(a.size()), a.device(), b.device(), a.size(), result.device()+idx);
launch_kernel(_cuda_dot, max_jobs(a.size()), a.device(), b.device(), a.size(), result.device()+idx);
}
}
...
...
dlib/dnn/cudnn_dlibapi.cpp
View file @
9eed5974
...
@@ -326,7 +326,7 @@ namespace dlib
...
@@ -326,7 +326,7 @@ namespace dlib
gradient_input
.
k
()
==
grad
.
k
()
&&
gradient_input
.
k
()
==
grad
.
k
()
&&
gradient_input
.
size
()
>
0
&&
gradient_input
.
size
()
>
0
&&
is_same_object
(
grad
,
gradient_input
)
==
false
is_same_object
(
grad
,
gradient_input
)
==
false
,
""
);
);
const
float
alpha
=
1
;
const
float
alpha
=
1
;
const
float
beta
=
0
;
const
float
beta
=
0
;
...
@@ -417,8 +417,8 @@ namespace dlib
...
@@ -417,8 +417,8 @@ namespace dlib
)
)
{
{
DLIB_CASSERT
(
0
<=
averaging_factor
&&
averaging_factor
<=
1
,
"averaging_factor: "
<<
averaging_factor
);
DLIB_CASSERT
(
0
<=
averaging_factor
&&
averaging_factor
<=
1
,
"averaging_factor: "
<<
averaging_factor
);
DLIB_CASSERT
(
averaging_factor
==
1
||
have_same_dimensions
(
running_means
,
means
)
,
""
);
DLIB_CASSERT
(
averaging_factor
==
1
||
have_same_dimensions
(
running_means
,
means
));
DLIB_CASSERT
(
averaging_factor
==
1
||
have_same_dimensions
(
running_variances
,
invstds
)
,
""
);
DLIB_CASSERT
(
averaging_factor
==
1
||
have_same_dimensions
(
running_variances
,
invstds
));
DLIB_CASSERT
(
DLIB_CASSERT
(
src
.
num_samples
()
>
1
&&
src
.
num_samples
()
>
1
&&
gamma
.
num_samples
()
==
1
&&
gamma
.
num_samples
()
==
1
&&
...
@@ -491,15 +491,15 @@ namespace dlib
...
@@ -491,15 +491,15 @@ namespace dlib
)
)
{
{
const
long
num
=
src
.
k
()
*
src
.
nr
()
*
src
.
nc
();
const
long
num
=
src
.
k
()
*
src
.
nr
()
*
src
.
nc
();
DLIB_CASSERT
(
src
.
num_samples
()
>
1
,
""
);
DLIB_CASSERT
(
src
.
num_samples
()
>
1
);
DLIB_CASSERT
(
num
==
(
long
)
means
.
size
()
,
""
);
DLIB_CASSERT
(
num
==
(
long
)
means
.
size
());
DLIB_CASSERT
(
num
==
(
long
)
invstds
.
size
()
,
""
);
DLIB_CASSERT
(
num
==
(
long
)
invstds
.
size
());
DLIB_CASSERT
(
num
==
(
long
)
gamma
.
size
()
,
""
);
DLIB_CASSERT
(
num
==
(
long
)
gamma
.
size
());
DLIB_CASSERT
(
num
==
(
long
)
gamma_grad
.
size
()
,
""
);
DLIB_CASSERT
(
num
==
(
long
)
gamma_grad
.
size
());
DLIB_CASSERT
(
num
==
(
long
)
beta_grad
.
size
()
,
""
);
DLIB_CASSERT
(
num
==
(
long
)
beta_grad
.
size
());
DLIB_CASSERT
(
have_same_dimensions
(
gradient_input
,
src
)
,
""
);
DLIB_CASSERT
(
have_same_dimensions
(
gradient_input
,
src
));
DLIB_CASSERT
(
have_same_dimensions
(
gradient_input
,
src_grad
)
,
""
);
DLIB_CASSERT
(
have_same_dimensions
(
gradient_input
,
src_grad
));
DLIB_CASSERT
(
eps
>
0
,
""
);
DLIB_CASSERT
(
eps
>
0
);
const
float
in_scale
=
1
;
const
float
in_scale
=
1
;
const
float
out_scale
=
1
;
const
float
out_scale
=
1
;
...
@@ -606,8 +606,8 @@ namespace dlib
...
@@ -606,8 +606,8 @@ namespace dlib
)
)
{
{
DLIB_CASSERT
(
0
<=
averaging_factor
&&
averaging_factor
<=
1
,
"averaging_factor: "
<<
averaging_factor
);
DLIB_CASSERT
(
0
<=
averaging_factor
&&
averaging_factor
<=
1
,
"averaging_factor: "
<<
averaging_factor
);
DLIB_CASSERT
(
averaging_factor
==
1
||
have_same_dimensions
(
running_means
,
means
)
,
""
);
DLIB_CASSERT
(
averaging_factor
==
1
||
have_same_dimensions
(
running_means
,
means
));
DLIB_CASSERT
(
averaging_factor
==
1
||
have_same_dimensions
(
running_variances
,
invstds
)
,
""
);
DLIB_CASSERT
(
averaging_factor
==
1
||
have_same_dimensions
(
running_variances
,
invstds
));
DLIB_CASSERT
(
DLIB_CASSERT
(
src
.
num_samples
()
>
1
&&
src
.
num_samples
()
>
1
&&
gamma
.
num_samples
()
==
1
&&
gamma
.
num_samples
()
==
1
&&
...
@@ -680,14 +680,14 @@ namespace dlib
...
@@ -680,14 +680,14 @@ namespace dlib
tensor
&
beta_grad
tensor
&
beta_grad
)
)
{
{
DLIB_CASSERT
(
src
.
k
()
==
(
long
)
means
.
size
()
,
""
);
DLIB_CASSERT
(
src
.
k
()
==
(
long
)
means
.
size
());
DLIB_CASSERT
(
src
.
k
()
==
(
long
)
invstds
.
size
()
,
""
);
DLIB_CASSERT
(
src
.
k
()
==
(
long
)
invstds
.
size
());
DLIB_CASSERT
(
src
.
k
()
==
(
long
)
gamma
.
size
()
,
""
);
DLIB_CASSERT
(
src
.
k
()
==
(
long
)
gamma
.
size
());
DLIB_CASSERT
(
src
.
k
()
==
(
long
)
gamma_grad
.
size
()
,
""
);
DLIB_CASSERT
(
src
.
k
()
==
(
long
)
gamma_grad
.
size
());
DLIB_CASSERT
(
src
.
k
()
==
(
long
)
beta_grad
.
size
()
,
""
);
DLIB_CASSERT
(
src
.
k
()
==
(
long
)
beta_grad
.
size
());
DLIB_CASSERT
(
have_same_dimensions
(
gradient_input
,
src
)
,
""
);
DLIB_CASSERT
(
have_same_dimensions
(
gradient_input
,
src
));
DLIB_CASSERT
(
have_same_dimensions
(
gradient_input
,
src_grad
)
,
""
);
DLIB_CASSERT
(
have_same_dimensions
(
gradient_input
,
src_grad
));
DLIB_CASSERT
(
eps
>
0
,
""
);
DLIB_CASSERT
(
eps
>
0
);
const
float
in_scale
=
1
;
const
float
in_scale
=
1
;
const
float
out_scale
=
1
;
const
float
out_scale
=
1
;
...
@@ -794,7 +794,7 @@ namespace dlib
...
@@ -794,7 +794,7 @@ namespace dlib
int
padding_x_
int
padding_x_
)
)
{
{
DLIB_CASSERT
(
data
.
k
()
==
filters
.
k
()
,
""
);
DLIB_CASSERT
(
data
.
k
()
==
filters
.
k
());
// if the last call to setup gave the same exact settings then don't do
// if the last call to setup gave the same exact settings then don't do
// anything.
// anything.
...
@@ -969,10 +969,10 @@ namespace dlib
...
@@ -969,10 +969,10 @@ namespace dlib
int
padding_x
int
padding_x
)
)
{
{
DLIB_CASSERT
(
is_same_object
(
output
,
data
)
==
false
,
""
);
DLIB_CASSERT
(
is_same_object
(
output
,
data
)
==
false
);
DLIB_CASSERT
(
is_same_object
(
output
,
filters
)
==
false
,
""
);
DLIB_CASSERT
(
is_same_object
(
output
,
filters
)
==
false
);
DLIB_CASSERT
(
filters
.
k
()
==
data
.
k
()
,
""
);
DLIB_CASSERT
(
filters
.
k
()
==
data
.
k
());
DLIB_CASSERT
(
stride_y
>
0
&&
stride_x
>
0
,
""
);
DLIB_CASSERT
(
stride_y
>
0
&&
stride_x
>
0
);
DLIB_CASSERT
(
filters
.
nc
()
<=
data
.
nc
()
+
2
*
padding_x
,
DLIB_CASSERT
(
filters
.
nc
()
<=
data
.
nc
()
+
2
*
padding_x
,
"Filter windows must be small enough to fit into the padded image."
"Filter windows must be small enough to fit into the padded image."
<<
"
\n\t
filters.nc(): "
<<
filters
.
nc
()
<<
"
\n\t
filters.nc(): "
<<
filters
.
nc
()
...
@@ -992,9 +992,9 @@ namespace dlib
...
@@ -992,9 +992,9 @@ namespace dlib
output
.
set_size
(
out_num_samples
,
out_k
,
out_nr
,
out_nc
);
output
.
set_size
(
out_num_samples
,
out_k
,
out_nr
,
out_nc
);
DLIB_ASSERT
(
output
.
num_samples
()
==
data
.
num_samples
(),
out_num_samples
<<
" "
<<
data
.
num_samples
());
DLIB_ASSERT
(
output
.
num_samples
()
==
data
.
num_samples
(),
out_num_samples
<<
" "
<<
data
.
num_samples
());
DLIB_ASSERT
(
output
.
k
()
==
filters
.
num_samples
()
,
""
);
DLIB_ASSERT
(
output
.
k
()
==
filters
.
num_samples
());
DLIB_ASSERT
(
output
.
nr
()
==
1
+
(
data
.
nr
()
+
2
*
padding_y
-
filters
.
nr
())
/
stride_y
,
""
);
DLIB_ASSERT
(
output
.
nr
()
==
1
+
(
data
.
nr
()
+
2
*
padding_y
-
filters
.
nr
())
/
stride_y
);
DLIB_ASSERT
(
output
.
nc
()
==
1
+
(
data
.
nc
()
+
2
*
padding_x
-
filters
.
nc
())
/
stride_x
,
""
);
DLIB_ASSERT
(
output
.
nc
()
==
1
+
(
data
.
nc
()
+
2
*
padding_x
-
filters
.
nc
())
/
stride_x
);
...
@@ -1221,8 +1221,8 @@ namespace dlib
...
@@ -1221,8 +1221,8 @@ namespace dlib
dest
.
set_size
(
outN
,
outC
,
outH
,
outW
);
dest
.
set_size
(
outN
,
outC
,
outH
,
outW
);
DLIB_CASSERT
(
dest
.
num_samples
()
==
src
.
num_samples
()
,
""
);
DLIB_CASSERT
(
dest
.
num_samples
()
==
src
.
num_samples
());
DLIB_CASSERT
(
dest
.
k
()
==
src
.
k
()
,
""
);
DLIB_CASSERT
(
dest
.
k
()
==
src
.
k
());
DLIB_CASSERT
(
dest
.
nr
()
==
1
+
(
src
.
nr
()
+
2
*
padding_y
-
window_height
)
/
stride_y
,
DLIB_CASSERT
(
dest
.
nr
()
==
1
+
(
src
.
nr
()
+
2
*
padding_y
-
window_height
)
/
stride_y
,
"
\n
stride_y: "
<<
stride_y
<<
"
\n
stride_y: "
<<
stride_y
<<
"
\n
padding_y: "
<<
padding_y
<<
"
\n
padding_y: "
<<
padding_y
<<
...
@@ -1255,8 +1255,8 @@ namespace dlib
...
@@ -1255,8 +1255,8 @@ namespace dlib
tensor
&
grad
tensor
&
grad
)
)
{
{
DLIB_CASSERT
(
have_same_dimensions
(
gradient_input
,
dest
)
,
""
);
DLIB_CASSERT
(
have_same_dimensions
(
gradient_input
,
dest
));
DLIB_CASSERT
(
have_same_dimensions
(
src
,
grad
)
,
""
);
DLIB_CASSERT
(
have_same_dimensions
(
src
,
grad
));
const
float
alpha
=
1
;
const
float
alpha
=
1
;
const
float
beta
=
1
;
const
float
beta
=
1
;
...
@@ -1282,7 +1282,7 @@ namespace dlib
...
@@ -1282,7 +1282,7 @@ namespace dlib
const
tensor
&
src
const
tensor
&
src
)
)
{
{
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
src
)
,
""
);
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
src
));
if
(
src
.
size
()
==
0
)
if
(
src
.
size
()
==
0
)
return
;
return
;
...
@@ -1309,7 +1309,7 @@ namespace dlib
...
@@ -1309,7 +1309,7 @@ namespace dlib
{
{
DLIB_CASSERT
(
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
grad
)
==
true
,
""
);
have_same_dimensions
(
dest
,
grad
)
==
true
);
if
(
dest
.
size
()
==
0
)
if
(
dest
.
size
()
==
0
)
return
;
return
;
...
@@ -1336,7 +1336,7 @@ namespace dlib
...
@@ -1336,7 +1336,7 @@ namespace dlib
const
tensor
&
src
const
tensor
&
src
)
)
{
{
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
src
)
,
""
);
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
src
));
if
(
src
.
size
()
==
0
)
if
(
src
.
size
()
==
0
)
return
;
return
;
...
@@ -1360,7 +1360,7 @@ namespace dlib
...
@@ -1360,7 +1360,7 @@ namespace dlib
{
{
DLIB_CASSERT
(
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
grad
)
==
true
,
""
);
have_same_dimensions
(
dest
,
grad
)
==
true
);
if
(
dest
.
size
()
==
0
)
if
(
dest
.
size
()
==
0
)
return
;
return
;
...
@@ -1387,7 +1387,7 @@ namespace dlib
...
@@ -1387,7 +1387,7 @@ namespace dlib
const
tensor
&
src
const
tensor
&
src
)
)
{
{
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
src
)
,
""
);
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
src
));
if
(
src
.
size
()
==
0
)
if
(
src
.
size
()
==
0
)
return
;
return
;
...
@@ -1411,7 +1411,7 @@ namespace dlib
...
@@ -1411,7 +1411,7 @@ namespace dlib
{
{
DLIB_CASSERT
(
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
grad
)
==
true
,
""
);
have_same_dimensions
(
dest
,
grad
)
==
true
);
if
(
dest
.
size
()
==
0
)
if
(
dest
.
size
()
==
0
)
return
;
return
;
...
@@ -1438,7 +1438,7 @@ namespace dlib
...
@@ -1438,7 +1438,7 @@ namespace dlib
const
tensor
&
src
const
tensor
&
src
)
)
{
{
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
src
)
,
""
);
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
src
));
if
(
src
.
size
()
==
0
)
if
(
src
.
size
()
==
0
)
return
;
return
;
...
@@ -1462,7 +1462,7 @@ namespace dlib
...
@@ -1462,7 +1462,7 @@ namespace dlib
{
{
DLIB_CASSERT
(
DLIB_CASSERT
(
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
gradient_input
)
==
true
&&
have_same_dimensions
(
dest
,
grad
)
==
true
,
""
);
have_same_dimensions
(
dest
,
grad
)
==
true
);
if
(
dest
.
size
()
==
0
)
if
(
dest
.
size
()
==
0
)
return
;
return
;
...
...
dlib/dnn/gpu_data.cpp
View file @
9eed5974
...
@@ -23,7 +23,7 @@ namespace dlib
...
@@ -23,7 +23,7 @@ namespace dlib
const
gpu_data
&
src
const
gpu_data
&
src
)
)
{
{
DLIB_CASSERT
(
dest
.
size
()
==
src
.
size
()
,
""
);
DLIB_CASSERT
(
dest
.
size
()
==
src
.
size
());
if
(
src
.
size
()
==
0
||
&
dest
==
&
src
)
if
(
src
.
size
()
==
0
||
&
dest
==
&
src
)
return
;
return
;
...
@@ -38,8 +38,8 @@ namespace dlib
...
@@ -38,8 +38,8 @@ namespace dlib
size_t
num
size_t
num
)
)
{
{
DLIB_CASSERT
(
dest_offset
+
num
<=
dest
.
size
()
,
""
);
DLIB_CASSERT
(
dest_offset
+
num
<=
dest
.
size
());
DLIB_CASSERT
(
src_offset
+
num
<=
src
.
size
()
,
""
);
DLIB_CASSERT
(
src_offset
+
num
<=
src
.
size
());
if
(
num
==
0
)
if
(
num
==
0
)
return
;
return
;
...
...
dlib/dnn/gpu_data.h
View file @
9eed5974
...
@@ -221,7 +221,7 @@ namespace dlib
...
@@ -221,7 +221,7 @@ namespace dlib
inline
void
memcpy
(
gpu_data
&
dest
,
const
gpu_data
&
src
)
inline
void
memcpy
(
gpu_data
&
dest
,
const
gpu_data
&
src
)
{
{
DLIB_CASSERT
(
dest
.
size
()
==
src
.
size
()
,
""
);
DLIB_CASSERT
(
dest
.
size
()
==
src
.
size
());
if
(
src
.
size
()
==
0
||
&
dest
==
&
src
)
if
(
src
.
size
()
==
0
||
&
dest
==
&
src
)
return
;
return
;
std
::
memcpy
(
dest
.
host_write_only
(),
src
.
host
(),
sizeof
(
float
)
*
src
.
size
());
std
::
memcpy
(
dest
.
host_write_only
(),
src
.
host
(),
sizeof
(
float
)
*
src
.
size
());
...
@@ -235,8 +235,8 @@ namespace dlib
...
@@ -235,8 +235,8 @@ namespace dlib
size_t
num
size_t
num
)
)
{
{
DLIB_CASSERT
(
dest_offset
+
num
<=
dest
.
size
()
,
""
);
DLIB_CASSERT
(
dest_offset
+
num
<=
dest
.
size
());
DLIB_CASSERT
(
src_offset
+
num
<=
src
.
size
()
,
""
);
DLIB_CASSERT
(
src_offset
+
num
<=
src
.
size
());
if
(
num
==
0
)
if
(
num
==
0
)
return
;
return
;
if
(
&
dest
==
&
src
&&
std
::
max
(
dest_offset
,
src_offset
)
<
std
::
min
(
dest_offset
,
src_offset
)
+
num
)
if
(
&
dest
==
&
src
&&
std
::
max
(
dest_offset
,
src_offset
)
<
std
::
min
(
dest_offset
,
src_offset
)
+
num
)
...
...
dlib/dnn/input.h
View file @
9eed5974
...
@@ -64,7 +64,7 @@ namespace dlib
...
@@ -64,7 +64,7 @@ namespace dlib
resizable_tensor
&
data
resizable_tensor
&
data
)
const
)
const
{
{
DLIB_CASSERT
(
std
::
distance
(
ibegin
,
iend
)
>
0
,
""
);
DLIB_CASSERT
(
std
::
distance
(
ibegin
,
iend
)
>
0
);
const
auto
nr
=
ibegin
->
nr
();
const
auto
nr
=
ibegin
->
nr
();
const
auto
nc
=
ibegin
->
nc
();
const
auto
nc
=
ibegin
->
nc
();
// make sure all the input matrices have the same dimensions
// make sure all the input matrices have the same dimensions
...
@@ -187,7 +187,7 @@ namespace dlib
...
@@ -187,7 +187,7 @@ namespace dlib
resizable_tensor
&
data
resizable_tensor
&
data
)
const
)
const
{
{
DLIB_CASSERT
(
std
::
distance
(
ibegin
,
iend
)
>
0
,
""
);
DLIB_CASSERT
(
std
::
distance
(
ibegin
,
iend
)
>
0
);
// make sure all input images have the correct size
// make sure all input images have the correct size
for
(
auto
i
=
ibegin
;
i
!=
iend
;
++
i
)
for
(
auto
i
=
ibegin
;
i
!=
iend
;
++
i
)
{
{
...
@@ -305,7 +305,7 @@ namespace dlib
...
@@ -305,7 +305,7 @@ namespace dlib
resizable_tensor
&
data
resizable_tensor
&
data
)
const
)
const
{
{
DLIB_CASSERT
(
std
::
distance
(
ibegin
,
iend
)
>
0
,
""
);
DLIB_CASSERT
(
std
::
distance
(
ibegin
,
iend
)
>
0
);
const
auto
nr
=
ibegin
->
nr
();
const
auto
nr
=
ibegin
->
nr
();
const
auto
nc
=
ibegin
->
nc
();
const
auto
nc
=
ibegin
->
nc
();
// make sure all the input matrices have the same dimensions
// make sure all the input matrices have the same dimensions
...
@@ -398,7 +398,7 @@ namespace dlib
...
@@ -398,7 +398,7 @@ namespace dlib
resizable_tensor
&
data
resizable_tensor
&
data
)
const
)
const
{
{
DLIB_CASSERT
(
std
::
distance
(
ibegin
,
iend
)
>
0
,
""
);
DLIB_CASSERT
(
std
::
distance
(
ibegin
,
iend
)
>
0
);
const
auto
nr
=
ibegin
->
nr
();
const
auto
nr
=
ibegin
->
nr
();
const
auto
nc
=
ibegin
->
nc
();
const
auto
nc
=
ibegin
->
nc
();
// make sure all the input matrices have the same dimensions
// make sure all the input matrices have the same dimensions
...
...
dlib/dnn/layers.h
View file @
9eed5974
...
@@ -1092,7 +1092,7 @@ namespace dlib
...
@@ -1092,7 +1092,7 @@ namespace dlib
drop_rate
(
drop_rate_
),
drop_rate
(
drop_rate_
),
rnd
(
std
::
rand
())
rnd
(
std
::
rand
())
{
{
DLIB_CASSERT
(
0
<=
drop_rate
&&
drop_rate
<=
1
,
""
);
DLIB_CASSERT
(
0
<=
drop_rate
&&
drop_rate
<=
1
);
}
}
// We have to add a copy constructor and assignment operator because the rnd object
// We have to add a copy constructor and assignment operator because the rnd object
...
...
dlib/dnn/loss.h
View file @
9eed5974
...
@@ -29,13 +29,13 @@ namespace dlib
...
@@ -29,13 +29,13 @@ namespace dlib
label_iterator
iter
label_iterator
iter
)
const
)
const
{
{
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
,
""
);
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
const
tensor
&
output_tensor
=
sub
.
get_output
();
const
tensor
&
output_tensor
=
sub
.
get_output
();
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
k
()
==
1
,
""
);
output_tensor
.
k
()
==
1
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
()
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
const
float
*
out_data
=
output_tensor
.
host
();
const
float
*
out_data
=
output_tensor
.
host
();
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
...
@@ -57,14 +57,14 @@ namespace dlib
...
@@ -57,14 +57,14 @@ namespace dlib
const
tensor
&
output_tensor
=
sub
.
get_output
();
const
tensor
&
output_tensor
=
sub
.
get_output
();
tensor
&
grad
=
sub
.
get_gradient_input
();
tensor
&
grad
=
sub
.
get_gradient_input
();
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
,
""
);
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
!=
0
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
!=
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
%
sub
.
sample_expansion_factor
()
==
0
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
%
sub
.
sample_expansion_factor
()
==
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
grad
.
num_samples
()
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
grad
.
num_samples
());
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
()
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
k
()
==
1
,
""
);
output_tensor
.
k
()
==
1
);
// The loss we output is the average loss over the mini-batch.
// The loss we output is the average loss over the mini-batch.
const
double
scale
=
1
.
0
/
output_tensor
.
num_samples
();
const
double
scale
=
1
.
0
/
output_tensor
.
num_samples
();
...
@@ -136,13 +136,13 @@ namespace dlib
...
@@ -136,13 +136,13 @@ namespace dlib
label_iterator
iter
label_iterator
iter
)
const
)
const
{
{
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
,
""
);
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
const
tensor
&
output_tensor
=
sub
.
get_output
();
const
tensor
&
output_tensor
=
sub
.
get_output
();
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
k
()
==
1
,
""
);
output_tensor
.
k
()
==
1
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
()
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
const
float
*
out_data
=
output_tensor
.
host
();
const
float
*
out_data
=
output_tensor
.
host
();
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
for
(
long
i
=
0
;
i
<
output_tensor
.
num_samples
();
++
i
)
...
@@ -165,17 +165,17 @@ namespace dlib
...
@@ -165,17 +165,17 @@ namespace dlib
const
tensor
&
output_tensor
=
sub
.
get_output
();
const
tensor
&
output_tensor
=
sub
.
get_output
();
tensor
&
grad
=
sub
.
get_gradient_input
();
tensor
&
grad
=
sub
.
get_gradient_input
();
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
,
""
);
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
!=
0
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
!=
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
%
sub
.
sample_expansion_factor
()
==
0
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
%
sub
.
sample_expansion_factor
()
==
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
grad
.
num_samples
()
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
grad
.
num_samples
());
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
()
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
nc
()
==
1
&&
output_tensor
.
k
()
==
1
,
""
);
output_tensor
.
k
()
==
1
);
DLIB_CASSERT
(
grad
.
nr
()
==
1
&&
DLIB_CASSERT
(
grad
.
nr
()
==
1
&&
grad
.
nc
()
==
1
&&
grad
.
nc
()
==
1
&&
grad
.
k
()
==
1
,
""
);
grad
.
k
()
==
1
);
tt
::
sigmoid
(
grad
,
output_tensor
);
tt
::
sigmoid
(
grad
,
output_tensor
);
...
@@ -253,10 +253,10 @@ namespace dlib
...
@@ -253,10 +253,10 @@ namespace dlib
)
const
)
const
{
{
const
tensor
&
output_tensor
=
sub
.
get_output
();
const
tensor
&
output_tensor
=
sub
.
get_output
();
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
,
""
);
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
,
""
);
output_tensor
.
nc
()
==
1
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
()
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
// Note that output_tensor.k() should match the number of labels.
// Note that output_tensor.k() should match the number of labels.
...
@@ -282,15 +282,15 @@ namespace dlib
...
@@ -282,15 +282,15 @@ namespace dlib
const
tensor
&
output_tensor
=
sub
.
get_output
();
const
tensor
&
output_tensor
=
sub
.
get_output
();
tensor
&
grad
=
sub
.
get_gradient_input
();
tensor
&
grad
=
sub
.
get_gradient_input
();
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
,
""
);
DLIB_CASSERT
(
sub
.
sample_expansion_factor
()
==
1
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
!=
0
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
!=
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
%
sub
.
sample_expansion_factor
()
==
0
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
%
sub
.
sample_expansion_factor
()
==
0
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
grad
.
num_samples
()
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
grad
.
num_samples
());
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
()
,
""
);
DLIB_CASSERT
(
input_tensor
.
num_samples
()
==
output_tensor
.
num_samples
());
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
DLIB_CASSERT
(
output_tensor
.
nr
()
==
1
&&
output_tensor
.
nc
()
==
1
,
""
);
output_tensor
.
nc
()
==
1
);
DLIB_CASSERT
(
grad
.
nr
()
==
1
&&
DLIB_CASSERT
(
grad
.
nr
()
==
1
&&
grad
.
nc
()
==
1
,
""
);
grad
.
nc
()
==
1
);
tt
::
softmax
(
grad
,
output_tensor
);
tt
::
softmax
(
grad
,
output_tensor
);
...
...
dlib/dnn/solvers.h
View file @
9eed5974
...
@@ -43,7 +43,7 @@ namespace dlib
...
@@ -43,7 +43,7 @@ namespace dlib
{
{
const
tensor
&
params
=
l
.
get_layer_params
();
const
tensor
&
params
=
l
.
get_layer_params
();
DLIB_CASSERT
(
params
.
size
()
!=
0
,
""
);
DLIB_CASSERT
(
params
.
size
()
!=
0
);
if
(
v
.
size
()
==
0
)
if
(
v
.
size
()
==
0
)
{
{
v
.
copy_size
(
params_grad
);
v
.
copy_size
(
params_grad
);
...
@@ -131,7 +131,7 @@ namespace dlib
...
@@ -131,7 +131,7 @@ namespace dlib
{
{
const
tensor
&
params
=
l
.
get_layer_params
();
const
tensor
&
params
=
l
.
get_layer_params
();
DLIB_CASSERT
(
params
.
size
()
!=
0
,
""
);
DLIB_CASSERT
(
params
.
size
()
!=
0
);
if
(
v
.
size
()
==
0
)
if
(
v
.
size
()
==
0
)
{
{
v
.
copy_size
(
params_grad
);
v
.
copy_size
(
params_grad
);
...
@@ -204,7 +204,7 @@ namespace dlib
...
@@ -204,7 +204,7 @@ namespace dlib
)
)
{
{
const
tensor
&
params
=
l
.
get_layer_params
();
const
tensor
&
params
=
l
.
get_layer_params
();
DLIB_CASSERT
(
params
.
size
()
!=
0
,
""
);
DLIB_CASSERT
(
params
.
size
()
!=
0
);
if
(
v
.
size
()
==
0
)
if
(
v
.
size
()
==
0
)
{
{
m
.
copy_size
(
params_grad
);
m
.
copy_size
(
params_grad
);
...
@@ -305,7 +305,7 @@ namespace dlib
...
@@ -305,7 +305,7 @@ namespace dlib
)
)
{
{
const
tensor
&
params
=
l
.
get_layer_params
();
const
tensor
&
params
=
l
.
get_layer_params
();
DLIB_CASSERT
(
params
.
size
()
!=
0
,
""
);
DLIB_CASSERT
(
params
.
size
()
!=
0
);
if
(
v
.
size
()
==
0
)
if
(
v
.
size
()
==
0
)
{
{
m
.
copy_size
(
params_grad
);
m
.
copy_size
(
params_grad
);
...
...
dlib/dnn/tensor.h
View file @
9eed5974
...
@@ -101,7 +101,7 @@ namespace dlib
...
@@ -101,7 +101,7 @@ namespace dlib
tensor
&
operator
=
(
const
matrix_exp
<
EXP
>&
item
)
tensor
&
operator
=
(
const
matrix_exp
<
EXP
>&
item
)
{
{
DLIB_CASSERT
(
num_samples
()
==
item
.
nr
()
&&
DLIB_CASSERT
(
num_samples
()
==
item
.
nr
()
&&
nr
()
*
nc
()
*
k
()
==
item
.
nc
()
,
""
);
nr
()
*
nc
()
*
k
()
==
item
.
nc
());
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
"To assign a matrix to a tensor the matrix must contain float values"
);
"To assign a matrix to a tensor the matrix must contain float values"
);
...
@@ -113,7 +113,7 @@ namespace dlib
...
@@ -113,7 +113,7 @@ namespace dlib
tensor
&
operator
+=
(
const
matrix_exp
<
EXP
>&
item
)
tensor
&
operator
+=
(
const
matrix_exp
<
EXP
>&
item
)
{
{
DLIB_CASSERT
(
num_samples
()
==
item
.
nr
()
&&
DLIB_CASSERT
(
num_samples
()
==
item
.
nr
()
&&
nr
()
*
nc
()
*
k
()
==
item
.
nc
()
,
""
);
nr
()
*
nc
()
*
k
()
==
item
.
nc
());
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
"To assign a matrix to a tensor the matrix must contain float values"
);
"To assign a matrix to a tensor the matrix must contain float values"
);
set_ptrm
(
host
(),
m_n
,
m_nr
*
m_nc
*
m_k
)
+=
item
;
set_ptrm
(
host
(),
m_n
,
m_nr
*
m_nc
*
m_k
)
+=
item
;
...
@@ -124,7 +124,7 @@ namespace dlib
...
@@ -124,7 +124,7 @@ namespace dlib
tensor
&
operator
-=
(
const
matrix_exp
<
EXP
>&
item
)
tensor
&
operator
-=
(
const
matrix_exp
<
EXP
>&
item
)
{
{
DLIB_CASSERT
(
num_samples
()
==
item
.
nr
()
&&
DLIB_CASSERT
(
num_samples
()
==
item
.
nr
()
&&
nr
()
*
nc
()
*
k
()
==
item
.
nc
()
,
""
);
nr
()
*
nc
()
*
k
()
==
item
.
nc
());
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
"To assign a matrix to a tensor the matrix must contain float values"
);
"To assign a matrix to a tensor the matrix must contain float values"
);
set_ptrm
(
host
(),
m_n
,
m_nr
*
m_nc
*
m_k
)
-=
item
;
set_ptrm
(
host
(),
m_n
,
m_nr
*
m_nc
*
m_k
)
-=
item
;
...
@@ -137,8 +137,8 @@ namespace dlib
...
@@ -137,8 +137,8 @@ namespace dlib
const
matrix_exp
<
EXP
>&
item
const
matrix_exp
<
EXP
>&
item
)
)
{
{
DLIB_CASSERT
(
idx
<
num_samples
()
,
""
);
DLIB_CASSERT
(
idx
<
num_samples
());
DLIB_CASSERT
(
item
.
size
()
==
nr
()
*
nc
()
*
k
()
,
""
);
DLIB_CASSERT
(
item
.
size
()
==
nr
()
*
nc
()
*
k
());
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
"To assign a matrix to a tensor the matrix must contain float values"
);
"To assign a matrix to a tensor the matrix must contain float values"
);
set_ptrm
(
host
()
+
idx
*
item
.
size
(),
item
.
nr
(),
item
.
nc
())
=
item
;
set_ptrm
(
host
()
+
idx
*
item
.
size
(),
item
.
nr
(),
item
.
nc
())
=
item
;
...
@@ -151,8 +151,8 @@ namespace dlib
...
@@ -151,8 +151,8 @@ namespace dlib
const
matrix_exp
<
EXP
>&
item
const
matrix_exp
<
EXP
>&
item
)
)
{
{
DLIB_CASSERT
(
idx
<
num_samples
()
,
""
);
DLIB_CASSERT
(
idx
<
num_samples
());
DLIB_CASSERT
(
item
.
size
()
==
nr
()
*
nc
()
*
k
()
,
""
);
DLIB_CASSERT
(
item
.
size
()
==
nr
()
*
nc
()
*
k
());
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
static_assert
((
is_same_type
<
float
,
typename
EXP
::
type
>::
value
==
true
),
"To assign a matrix to a tensor the matrix must contain float values"
);
"To assign a matrix to a tensor the matrix must contain float values"
);
set_ptrm
(
host
()
+
idx
*
item
.
size
(),
item
.
nr
(),
item
.
nc
())
+=
item
;
set_ptrm
(
host
()
+
idx
*
item
.
size
(),
item
.
nr
(),
item
.
nc
())
+=
item
;
...
@@ -169,7 +169,7 @@ namespace dlib
...
@@ -169,7 +169,7 @@ namespace dlib
const
tensor
&
src
const
tensor
&
src
)
)
{
{
DLIB_CASSERT
(
dest
.
size
()
==
src
.
size
()
,
""
);
DLIB_CASSERT
(
dest
.
size
()
==
src
.
size
());
memcpy
(
dest
.
data
(),
dest
.
get_alias_offset
(),
memcpy
(
dest
.
data
(),
dest
.
get_alias_offset
(),
src
.
data
(),
src
.
get_alias_offset
(),
src
.
data
(),
src
.
get_alias_offset
(),
src
.
size
());
src
.
size
());
...
@@ -285,7 +285,7 @@ namespace dlib
...
@@ -285,7 +285,7 @@ namespace dlib
long
n_
,
long
k_
=
1
,
long
nr_
=
1
,
long
nc_
=
1
long
n_
,
long
k_
=
1
,
long
nr_
=
1
,
long
nc_
=
1
)
)
{
{
DLIB_ASSERT
(
n_
>=
0
&&
k_
>=
0
&&
nr_
>=
0
&&
nc_
>=
0
,
""
);
DLIB_ASSERT
(
n_
>=
0
&&
k_
>=
0
&&
nr_
>=
0
&&
nc_
>=
0
);
set_size
(
n_
,
k_
,
nr_
,
nc_
);
set_size
(
n_
,
k_
,
nr_
,
nc_
);
}
}
...
@@ -351,7 +351,7 @@ namespace dlib
...
@@ -351,7 +351,7 @@ namespace dlib
long
n_
,
long
k_
=
1
,
long
nr_
=
1
,
long
nc_
=
1
long
n_
,
long
k_
=
1
,
long
nr_
=
1
,
long
nc_
=
1
)
)
{
{
DLIB_ASSERT
(
n_
>=
0
&&
k_
>=
0
&&
nr_
>=
0
&&
nc_
>=
0
,
""
);
DLIB_ASSERT
(
n_
>=
0
&&
k_
>=
0
&&
nr_
>=
0
&&
nc_
>=
0
);
m_n
=
n_
;
m_n
=
n_
;
m_k
=
k_
;
m_k
=
k_
;
...
@@ -469,7 +469,7 @@ namespace dlib
...
@@ -469,7 +469,7 @@ namespace dlib
const
tensor
&
b
const
tensor
&
b
)
)
{
{
DLIB_CASSERT
(
a
.
size
()
==
b
.
size
()
,
""
);
DLIB_CASSERT
(
a
.
size
()
==
b
.
size
());
const
float
*
da
=
a
.
host
();
const
float
*
da
=
a
.
host
();
const
float
*
db
=
b
.
host
();
const
float
*
db
=
b
.
host
();
double
sum
=
0
;
double
sum
=
0
;
...
@@ -559,7 +559,7 @@ namespace dlib
...
@@ -559,7 +559,7 @@ namespace dlib
long
n_
,
long
k_
=
1
,
long
nr_
=
1
,
long
nc_
=
1
long
n_
,
long
k_
=
1
,
long
nr_
=
1
,
long
nc_
=
1
)
)
{
{
DLIB_ASSERT
(
n_
>=
0
&&
k_
>=
0
&&
nr_
>=
0
&&
nc_
>=
0
,
""
);
DLIB_ASSERT
(
n_
>=
0
&&
k_
>=
0
&&
nr_
>=
0
&&
nc_
>=
0
);
inst
.
m_n
=
n_
;
inst
.
m_n
=
n_
;
inst
.
m_k
=
k_
;
inst
.
m_k
=
k_
;
...
@@ -588,7 +588,7 @@ namespace dlib
...
@@ -588,7 +588,7 @@ namespace dlib
size_t
offset
size_t
offset
)
)
{
{
DLIB_CASSERT
(
offset
+
size
()
<=
t
.
size
()
,
""
);
DLIB_CASSERT
(
offset
+
size
()
<=
t
.
size
());
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
if
(
!
inst
.
cudnn_descriptor
)
if
(
!
inst
.
cudnn_descriptor
)
...
...
dlib/dnn/tensor_tools.cpp
View file @
9eed5974
...
@@ -101,7 +101,7 @@ namespace dlib { namespace tt
...
@@ -101,7 +101,7 @@ namespace dlib { namespace tt
float
stddev
float
stddev
)
)
{
{
DLIB_CASSERT
(
data
.
size
()
%
2
==
0
,
""
);
DLIB_CASSERT
(
data
.
size
()
%
2
==
0
);
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
rnd
.
fill_gaussian
(
data
,
mean
,
stddev
);
rnd
.
fill_gaussian
(
data
,
mean
,
stddev
);
#else
#else
...
@@ -135,11 +135,11 @@ namespace dlib { namespace tt
...
@@ -135,11 +135,11 @@ namespace dlib { namespace tt
{
{
DLIB_CASSERT
(
dest
.
k
()
==
src1
.
k
()
&&
src1
.
k
()
==
src2
.
k
()
&&
DLIB_CASSERT
(
dest
.
k
()
==
src1
.
k
()
&&
src1
.
k
()
==
src2
.
k
()
&&
dest
.
nr
()
==
src1
.
nr
()
&&
src1
.
nr
()
==
src2
.
nr
()
&&
dest
.
nr
()
==
src1
.
nr
()
&&
src1
.
nr
()
==
src2
.
nr
()
&&
dest
.
nc
()
==
src1
.
nc
()
&&
src1
.
nc
()
==
src2
.
nc
()
,
""
);
dest
.
nc
()
==
src1
.
nc
()
&&
src1
.
nc
()
==
src2
.
nc
()
);
const
long
MD
=
std
::
max
(
std
::
max
(
dest
.
num_samples
(),
src1
.
num_samples
()),
src2
.
num_samples
());
const
long
MD
=
std
::
max
(
std
::
max
(
dest
.
num_samples
(),
src1
.
num_samples
()),
src2
.
num_samples
());
DLIB_CASSERT
((
dest
.
num_samples
()
==
1
||
dest
.
num_samples
()
==
MD
)
&&
DLIB_CASSERT
((
dest
.
num_samples
()
==
1
||
dest
.
num_samples
()
==
MD
)
&&
(
src1
.
num_samples
()
==
1
||
src1
.
num_samples
()
==
MD
)
&&
(
src1
.
num_samples
()
==
1
||
src1
.
num_samples
()
==
MD
)
&&
(
src2
.
num_samples
()
==
1
||
src2
.
num_samples
()
==
MD
)
,
""
);
(
src2
.
num_samples
()
==
1
||
src2
.
num_samples
()
==
MD
)
);
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
cuda
::
multiply
(
add_to
,
dest
,
src1
,
src2
);
cuda
::
multiply
(
add_to
,
dest
,
src1
,
src2
);
#else
#else
...
...
dlib/dnn/trainer.h
View file @
9eed5974
...
@@ -143,7 +143,7 @@ namespace dlib
...
@@ -143,7 +143,7 @@ namespace dlib
unsigned
long
batch_size
unsigned
long
batch_size
)
)
{
{
DLIB_CASSERT
(
batch_size
>
0
,
""
);
DLIB_CASSERT
(
batch_size
>
0
);
mini_batch_size
=
batch_size
;
mini_batch_size
=
batch_size
;
}
}
...
@@ -154,7 +154,7 @@ namespace dlib
...
@@ -154,7 +154,7 @@ namespace dlib
unsigned
long
num
unsigned
long
num
)
)
{
{
DLIB_CASSERT
(
num
>
0
,
""
);
DLIB_CASSERT
(
num
>
0
);
max_num_epochs
=
num
;
max_num_epochs
=
num
;
}
}
...
@@ -183,7 +183,7 @@ namespace dlib
...
@@ -183,7 +183,7 @@ namespace dlib
const
std
::
vector
<
label_type
>&
labels
const
std
::
vector
<
label_type
>&
labels
)
)
{
{
DLIB_CASSERT
(
data
.
size
()
==
labels
.
size
()
&&
data
.
size
()
>
0
,
""
);
DLIB_CASSERT
(
data
.
size
()
==
labels
.
size
()
&&
data
.
size
()
>
0
);
if
(
verbose
)
if
(
verbose
)
{
{
...
@@ -209,7 +209,7 @@ namespace dlib
...
@@ -209,7 +209,7 @@ namespace dlib
const
std
::
vector
<
input_type
>&
data
const
std
::
vector
<
input_type
>&
data
)
)
{
{
DLIB_CASSERT
(
data
.
size
()
>
0
,
""
);
DLIB_CASSERT
(
data
.
size
()
>
0
);
if
(
verbose
)
if
(
verbose
)
{
{
using
namespace
std
::
chrono
;
using
namespace
std
::
chrono
;
...
@@ -234,7 +234,7 @@ namespace dlib
...
@@ -234,7 +234,7 @@ namespace dlib
const
std
::
vector
<
label_type
>&
labels
const
std
::
vector
<
label_type
>&
labels
)
)
{
{
DLIB_CASSERT
(
data
.
size
()
==
labels
.
size
()
&&
data
.
size
()
>
0
,
""
);
DLIB_CASSERT
(
data
.
size
()
==
labels
.
size
()
&&
data
.
size
()
>
0
);
bool
updated_the_network
=
false
;
bool
updated_the_network
=
false
;
// The reason these two loops don't initialize their counter variables but
// The reason these two loops don't initialize their counter variables but
...
@@ -290,7 +290,7 @@ namespace dlib
...
@@ -290,7 +290,7 @@ namespace dlib
const
std
::
vector
<
input_type
>&
data
const
std
::
vector
<
input_type
>&
data
)
)
{
{
DLIB_CASSERT
(
data
.
size
()
>
0
,
""
);
DLIB_CASSERT
(
data
.
size
()
>
0
);
const
bool
has_unsupervised_loss
=
std
::
is_same
<
no_label_type
,
label_type
>::
value
;
const
bool
has_unsupervised_loss
=
std
::
is_same
<
no_label_type
,
label_type
>::
value
;
static_assert
(
has_unsupervised_loss
,
static_assert
(
has_unsupervised_loss
,
...
@@ -378,7 +378,7 @@ namespace dlib
...
@@ -378,7 +378,7 @@ namespace dlib
double
lr
double
lr
)
)
{
{
DLIB_CASSERT
(
lr
>
0
,
""
);
DLIB_CASSERT
(
lr
>
0
);
wait_for_thread_to_pause
();
wait_for_thread_to_pause
();
if
(
learning_rate
!=
lr
)
if
(
learning_rate
!=
lr
)
{
{
...
@@ -399,7 +399,7 @@ namespace dlib
...
@@ -399,7 +399,7 @@ namespace dlib
double
lr
double
lr
)
)
{
{
DLIB_CASSERT
(
lr
>
0
,
""
);
DLIB_CASSERT
(
lr
>
0
);
wait_for_thread_to_pause
();
wait_for_thread_to_pause
();
lr_schedule
.
set_size
(
0
);
lr_schedule
.
set_size
(
0
);
min_learning_rate
=
lr
;
min_learning_rate
=
lr
;
...
@@ -416,8 +416,8 @@ namespace dlib
...
@@ -416,8 +416,8 @@ namespace dlib
const
matrix_exp
<
EXP
>&
schedule
const
matrix_exp
<
EXP
>&
schedule
)
)
{
{
DLIB_CASSERT
(
schedule
.
size
()
>
0
,
""
);
DLIB_CASSERT
(
schedule
.
size
()
>
0
);
DLIB_CASSERT
(
min
(
schedule
)
>
0
,
""
);
DLIB_CASSERT
(
min
(
schedule
)
>
0
);
set_learning_rate
(
schedule
(
0
,
0
));
set_learning_rate
(
schedule
(
0
,
0
));
set_min_learning_rate
(
min
(
schedule
));
set_min_learning_rate
(
min
(
schedule
));
set_learning_rate_shrink_factor
(
1
);
set_learning_rate_shrink_factor
(
1
);
...
@@ -456,7 +456,7 @@ namespace dlib
...
@@ -456,7 +456,7 @@ namespace dlib
double
shrink
double
shrink
)
)
{
{
DLIB_CASSERT
(
0
<
shrink
&&
shrink
<=
1
,
""
);
DLIB_CASSERT
(
0
<
shrink
&&
shrink
<=
1
);
wait_for_thread_to_pause
();
wait_for_thread_to_pause
();
lr_schedule
.
set_size
(
0
);
lr_schedule
.
set_size
(
0
);
learning_rate_shrink
=
shrink
;
learning_rate_shrink
=
shrink
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment