Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
2ddc0d74
Commit
2ddc0d74
authored
Feb 23, 2012
by
Davis King
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added an implementation of the linear recursive least squares algorithm.
parent
e3fc99ae
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
293 additions
and
0 deletions
+293
-0
svm.h
dlib/svm.h
+1
-0
rls.h
dlib/svm/rls.h
+151
-0
rls_abstract.h
dlib/svm/rls_abstract.h
+141
-0
No files found.
dlib/svm.h
View file @
2ddc0d74
...
...
@@ -5,6 +5,7 @@
#include "svm/svm.h"
#include "svm/krls.h"
#include "svm/rls.h"
#include "svm/kcentroid.h"
#include "svm/kcentroid_overloads.h"
#include "svm/kkmeans.h"
...
...
dlib/svm/rls.h
0 → 100644
View file @
2ddc0d74
// Copyright (C) 2012 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#ifndef DLIB_RLs_H__
#define DLIB_RLs_H__
#include "rls_abstract.h"
#include "../matrix.h"
#include "function.h"
namespace
dlib
{
// ----------------------------------------------------------------------------------------
class
rls
{
public
:
explicit
rls
(
double
forget_factor_
,
double
C_
=
1000
)
{
// make sure requires clause is not broken
DLIB_ASSERT
(
0
<
forget_factor_
&&
forget_factor_
<=
1
&&
0
<
C_
,
"
\t
rls::rls()"
<<
"
\n\t
invalid arguments were given to this function"
<<
"
\n\t
forget_factor_: "
<<
forget_factor_
<<
"
\n\t
C_: "
<<
C_
<<
"
\n\t
this: "
<<
this
);
C
=
C_
;
forget_factor
=
forget_factor_
;
}
rls
(
)
{
C
=
1000
;
forget_factor
=
1
;
}
double
get_c
(
)
const
{
return
C
;
}
double
get_forget_factor
(
)
const
{
return
forget_factor
;
}
template
<
typename
EXP
>
void
train
(
const
matrix_exp
<
EXP
>&
x
,
double
y
)
{
// make sure requires clause is not broken
DLIB_ASSERT
(
is_col_vector
(
x
)
&&
(
get_w
().
size
()
==
0
||
get_w
().
size
()
==
x
.
size
()),
"
\t
void rls::train()"
<<
"
\n\t
invalid arguments were given to this function"
<<
"
\n\t
is_col_vector(x): "
<<
is_col_vector
(
x
)
<<
"
\n\t
x.size(): "
<<
x
.
size
()
<<
"
\n\t
get_w().size(): "
<<
get_w
().
size
()
<<
"
\n\t
this: "
<<
this
);
if
(
R
.
size
()
==
0
)
{
R
=
identity_matrix
<
double
>
(
x
.
size
())
*
C
;
w
.
set_size
(
x
.
size
());
w
=
0
;
}
const
double
l
=
1
.
0
/
forget_factor
;
R
=
l
*
R
-
(
l
*
l
*
R
*
x
*
trans
(
x
)
*
trans
(
R
))
/
(
1
+
l
*
trans
(
x
)
*
R
*
x
);
// R should always be symmetric. This line improves numeric stability of this algorithm.
R
=
0
.
5
*
(
R
+
trans
(
R
));
w
=
w
+
R
*
x
*
(
y
-
trans
(
x
)
*
w
);
}
const
matrix
<
double
,
0
,
1
>&
get_w
(
)
const
{
return
w
;
}
template
<
typename
EXP
>
double
operator
()
(
const
matrix_exp
<
EXP
>&
x
)
const
{
// make sure requires clause is not broken
DLIB_ASSERT
(
is_col_vector
(
x
)
&&
get_w
().
size
()
==
x
.
size
(),
"
\t
double rls::operator()()"
<<
"
\n\t
invalid arguments were given to this function"
<<
"
\n\t
is_col_vector(x): "
<<
is_col_vector
(
x
)
<<
"
\n\t
x.size(): "
<<
x
.
size
()
<<
"
\n\t
get_w().size(): "
<<
get_w
().
size
()
<<
"
\n\t
this: "
<<
this
);
return
dot
(
x
,
w
);
}
decision_function
<
linear_kernel
<
matrix
<
double
,
0
,
1
>
>
>
get_decision_function
(
)
const
{
// make sure requires clause is not broken
DLIB_ASSERT
(
get_w
().
size
()
!=
0
,
"
\t
decision_function rls::get_decision_function()"
<<
"
\n\t
invalid arguments were given to this function"
<<
"
\n\t
get_w().size(): "
<<
get_w
().
size
()
<<
"
\n\t
this: "
<<
this
);
decision_function
<
linear_kernel
<
matrix
<
double
,
0
,
1
>
>
>
df
;
df
.
alpha
.
set_size
(
1
);
df
.
basis_vectors
.
set_size
(
1
);
df
.
b
=
0
;
df
.
alpha
=
1
;
df
.
basis_vectors
(
0
)
=
w
;
return
df
;
}
private
:
matrix
<
double
,
0
,
1
>
w
;
matrix
<
double
>
R
;
double
C
;
double
forget_factor
;
};
// ----------------------------------------------------------------------------------------
}
#endif // DLIB_RLs_H__
dlib/svm/rls_abstract.h
0 → 100644
View file @
2ddc0d74
// Copyright (C) 2012 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#undef DLIB_RLs_ABSTRACT_H__
#ifdef DLIB_RLs_ABSTRACT_H__
#include "../matrix/matrix_abstract.h"
#include "function_abstract.h"
namespace
dlib
{
// ----------------------------------------------------------------------------------------
class
rls
{
/*!
WHAT THIS OBJECT REPRESENTS
This is an implementation of the linear version of the recursive least
squares algorithm. It accepts training points incrementally and, at
each step, maintains the solution to the following optimization problem:
find w minimizing: 0.5*dot(w,w) + C*sum_i(y_i - trans(x_i)*w)^2
Where (x_i,y_i) are training pairs. x_i is some vector and y_i is a target
scalar value.
This object can also be configured to use exponential forgetting. This is
where each training example is weighted by pow(forget_factor, i), where i
indicates the sample's age. So older samples are weighted less in the
least squares solution and therefore become forgotten after some time. Note
also that this forgetting applies to the regularizer as well. So if forgetting
is used then this object slowly converts itself to an unregularized version
of recursive least squares.
!*/
public
:
explicit
rls
(
double
forget_factor
,
double
C
=
1000
);
/*!
requires
- 0 < forget_factor <= 1
- 0 < C
ensures
- #get_w().size() == 0
- #get_c() == C
- #get_forget_factor() == forget_factor
!*/
rls
(
);
/*!
ensures
- #get_w().size() == 0
- #get_c() == 1000
- #get_forget_factor() == 1
!*/
double
get_c
(
)
const
;
/*!
ensures
- returns the regularization parameter. It is the parameter
that determines the trade-off between trying to fit the training
data or allowing more errors but hopefully improving the generalization
of the resulting regression. Larger values encourage exact fitting while
smaller values of C may encourage better generalization.
!*/
double
get_forget_factor
(
)
const
;
/*!
ensures
- returns the exponential forgetting factor. A value of 1 disables forgetting
and results in normal least squares regression. On the other hand, a smaller
value causes the regression to forget about old training examples and prefer
instead to fit more recent examples. The closer the forget factor is to
zero the faster old examples are forgotten.
!*/
template
<
typename
EXP
>
void
train
(
const
matrix_exp
<
EXP
>&
x
,
double
y
)
/*!
requires
- is_col_vector(x) == true
- if (get_w().size() != 0) then
- x.size() == get_w().size()
(i.e. all training examples must have the same
dimensionality)
ensures
- #get_w().size() == x.size()
- updates #get_w() such that it contains the solution to the least
squares problem of regressing the given x onto the given y as well
as all the previous training examples supplied to train().
!*/
const
matrix
<
double
,
0
,
1
>&
get_w
(
)
const
;
/*!
ensures
- returns the regression weights. These are the values learned by the
least squares procedure. If train() has not been called then this
function returns an empty vector.
!*/
template
<
typename
EXP
>
double
operator
()
(
const
matrix_exp
<
EXP
>&
x
)
const
;
/*!
requires
- is_col_vector(x) == true
- get_w().size() == x.size()
ensures
- returns dot(x, get_w())
!*/
decision_function
<
linear_kernel
<
matrix
<
double
,
0
,
1
>
>
>
get_decision_function
(
)
const
;
/*!
requires
- get_w().size() != 0
ensures
- returns a decision function DF such that:
- DF(x) == dot(x, get_w())
!*/
};
// ----------------------------------------------------------------------------------------
}
#endif // DLIB_RLs_ABSTRACT_H__
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment