Commit 535de71d authored by Davis King's avatar Davis King

merged

parents abaa238f 3e9b7ebb
......@@ -414,6 +414,9 @@ namespace dlib
dlib::rand rnd;
public:
const std::vector<scalar_type>& get_alpha () const { return alpha; }
friend void serialize(const optimizer_state& item, std::ostream& out)
{
const int version = 1;
......
......@@ -304,10 +304,16 @@ namespace dlib
!*/
// optimizer_state is used to record the internal state of the SVM optimizer. It
// can be used with the following train() routine to warm-start the optimizer.
// Note, that optimizer_state objects are serializable but are otherwise completely
// opaque to the user.
class optimizer_state;
// can be used with the following train() routine to warm-start the optimizer or
// access the optimal alpha values (see the Hsieh paper mentioned above). The
// optimizer_state objects are serializable and allow you to get the alphas, but
// are otherwise completely opaque to the user.
class optimizer_state
{
public:
const std::vector<scalar_type>& get_alpha (
) const;
};
template <
typename in_sample_vector_type,
......@@ -355,6 +361,8 @@ namespace dlib
- #state == the internal state of the optimizer at the solution to the SVM
problem. Therefore, passing #state to a new call to train() will start
the optimizer from the current solution.
- #state.get_alpha().size() == x.size()
- #state.get_alpha() == the optimal alpha/dual values learned by the optimizer.
- returns a decision function F with the following properties:
- F.alpha.size() == 1
- F.basis_vectors.size() == 1
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment