Commit 438b84b9 authored by Davis King's avatar Davis King

Added some missing "explicit" keywords.

--HG--
extra : convert_revision : svn%3Afdd8eb12-d10e-0410-9acb-85c331704f74/trunk%403781
parent 71bec0bd
...@@ -154,7 +154,7 @@ namespace dlib ...@@ -154,7 +154,7 @@ namespace dlib
class lbfgs_search_strategy class lbfgs_search_strategy
{ {
public: public:
lbfgs_search_strategy(unsigned long max_size_) : max_size(max_size_), been_used(false) explicit lbfgs_search_strategy(unsigned long max_size_) : max_size(max_size_), been_used(false)
{ {
DLIB_ASSERT ( DLIB_ASSERT (
max_size > 0, max_size > 0,
...@@ -284,7 +284,7 @@ namespace dlib ...@@ -284,7 +284,7 @@ namespace dlib
class newton_search_strategy_obj class newton_search_strategy_obj
{ {
public: public:
newton_search_strategy_obj( explicit newton_search_strategy_obj(
const hessian_funct& hess const hessian_funct& hess
) : hessian(hess) {} ) : hessian(hess) {}
......
...@@ -172,7 +172,7 @@ namespace dlib ...@@ -172,7 +172,7 @@ namespace dlib
to use when an optimization problem has a large number of variables. to use when an optimization problem has a large number of variables.
!*/ !*/
public: public:
lbfgs_search_strategy( explicit lbfgs_search_strategy(
unsigned long max_size unsigned long max_size
); );
/*! /*!
...@@ -254,7 +254,7 @@ namespace dlib ...@@ -254,7 +254,7 @@ namespace dlib
search_direction = -inv(hessian(x))*derivative search_direction = -inv(hessian(x))*derivative
!*/ !*/
public: public:
newton_search_strategy_obj( explicit newton_search_strategy_obj(
const hessian_funct& hess const hessian_funct& hess
); );
/*! /*!
......
...@@ -18,7 +18,7 @@ namespace dlib ...@@ -18,7 +18,7 @@ namespace dlib
class objective_delta_stop_strategy class objective_delta_stop_strategy
{ {
public: public:
objective_delta_stop_strategy ( explicit objective_delta_stop_strategy (
double min_delta = 1e-7 double min_delta = 1e-7
) : _verbose(false), _been_used(false), _min_delta(min_delta), _max_iter(0), _cur_iter(0), _prev_funct_value(0) ) : _verbose(false), _been_used(false), _min_delta(min_delta), _max_iter(0), _cur_iter(0), _prev_funct_value(0)
{ {
...@@ -97,7 +97,7 @@ namespace dlib ...@@ -97,7 +97,7 @@ namespace dlib
class gradient_norm_stop_strategy class gradient_norm_stop_strategy
{ {
public: public:
gradient_norm_stop_strategy ( explicit gradient_norm_stop_strategy (
double min_norm = 1e-7 double min_norm = 1e-7
) : _verbose(false), _min_norm(min_norm), _max_iter(0), _cur_iter(0) ) : _verbose(false), _min_norm(min_norm), _max_iter(0), _cur_iter(0)
{ {
......
...@@ -26,7 +26,7 @@ namespace dlib ...@@ -26,7 +26,7 @@ namespace dlib
!*/ !*/
public: public:
objective_delta_stop_strategy ( explicit objective_delta_stop_strategy (
double min_delta = 1e-7 double min_delta = 1e-7
); );
/*! /*!
...@@ -95,7 +95,7 @@ namespace dlib ...@@ -95,7 +95,7 @@ namespace dlib
!*/ !*/
public: public:
gradient_norm_stop_strategy ( explicit gradient_norm_stop_strategy (
double min_norm = 1e-7 double min_norm = 1e-7
); );
/*! /*!
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment