Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
0cdbbe85
Commit
0cdbbe85
authored
May 04, 2013
by
Davis King
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added the svr_linear_trainer.
parent
73551a87
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
855 additions
and
1 deletion
+855
-1
svm.h
dlib/svm.h
+1
-0
svr_linear_trainer.h
dlib/svm/svr_linear_trainer.h
+422
-0
svr_linear_trainer_abstract.h
dlib/svm/svr_linear_trainer_abstract.h
+269
-0
CMakeLists.txt
dlib/test/CMakeLists.txt
+2
-1
makefile
dlib/test/makefile
+1
-0
svr_linear_trainer.cpp
dlib/test/svr_linear_trainer.cpp
+160
-0
No files found.
dlib/svm.h
View file @
0cdbbe85
...
...
@@ -48,6 +48,7 @@
#include "svm/sequence_labeler.h"
#include "svm/assignment_function.h"
#include "svm/active_learning.h"
#include "svm/svr_linear_trainer.h"
#endif // DLIB_SVm_HEADER
...
...
dlib/svm/svr_linear_trainer.h
0 → 100644
View file @
0cdbbe85
// Copyright (C) 2013 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#ifndef DLIB_SVR_LINEAR_TrAINER_H__
#define DLIB_SVR_LINEAR_TrAINER_H__
#include "svr_linear_trainer_abstract.h"
#include "../algs.h"
#include "../optimization.h"
#include "function.h"
#include "kernel.h"
#include "sparse_vector.h"
#include <iostream>
namespace
dlib
{
// ----------------------------------------------------------------------------------------
template
<
typename
matrix_type
,
typename
sample_type
>
class
oca_problem_linear_svr
:
public
oca_problem
<
matrix_type
>
{
public
:
/*
This class is used as part of the implementation of the svr_linear_trainer
defined towards the end of this file.
*/
typedef
typename
matrix_type
::
type
scalar_type
;
oca_problem_linear_svr
(
const
scalar_type
C_
,
const
std
::
vector
<
sample_type
>&
samples_
,
const
std
::
vector
<
scalar_type
>&
targets_
,
const
bool
be_verbose_
,
const
scalar_type
eps_
,
const
scalar_type
eps_insensitivity_
,
const
unsigned
long
max_iter
)
:
samples
(
samples_
),
targets
(
targets_
),
C
(
C_
),
be_verbose
(
be_verbose_
),
eps
(
eps_
),
eps_insensitivity
(
eps_insensitivity_
),
max_iterations
(
max_iter
)
{
}
virtual
scalar_type
get_c
(
)
const
{
return
C
;
}
virtual
long
get_num_dimensions
(
)
const
{
// plus one for the bias term
return
max_index_plus_one
(
samples
)
+
1
;
}
virtual
bool
optimization_status
(
scalar_type
current_objective_value
,
scalar_type
current_error_gap
,
scalar_type
current_risk_value
,
scalar_type
current_risk_gap
,
unsigned
long
num_cutting_planes
,
unsigned
long
num_iterations
)
const
{
if
(
be_verbose
)
{
using
namespace
std
;
cout
<<
"objective: "
<<
current_objective_value
<<
endl
;
cout
<<
"objective gap: "
<<
current_error_gap
<<
endl
;
cout
<<
"risk: "
<<
current_risk_value
<<
endl
;
cout
<<
"risk gap: "
<<
current_risk_gap
<<
endl
;
cout
<<
"num planes: "
<<
num_cutting_planes
<<
endl
;
cout
<<
"iter: "
<<
num_iterations
<<
endl
;
cout
<<
endl
;
}
if
(
num_iterations
>=
max_iterations
)
return
true
;
if
(
current_risk_gap
<
eps
*
eps_insensitivity
)
return
true
;
return
false
;
}
virtual
bool
risk_has_lower_bound
(
scalar_type
&
lower_bound
)
const
{
lower_bound
=
0
;
return
true
;
}
virtual
void
get_risk
(
matrix_type
&
w
,
scalar_type
&
risk
,
matrix_type
&
subgradient
)
const
{
subgradient
.
set_size
(
w
.
size
(),
1
);
subgradient
=
0
;
risk
=
0
;
// loop over all the samples and compute the risk and its subgradient at the current solution point w
for
(
unsigned
long
i
=
0
;
i
<
samples
.
size
();
++
i
)
{
const
long
w_size_m1
=
w
.
size
()
-
1
;
const
scalar_type
prediction
=
dot
(
colm
(
w
,
0
,
w_size_m1
),
samples
[
i
])
-
w
(
w_size_m1
);
if
(
std
::
abs
(
prediction
-
targets
[
i
])
>
eps_insensitivity
)
{
if
(
prediction
<
targets
[
i
])
{
subtract_from
(
subgradient
,
samples
[
i
]);
subgradient
(
w_size_m1
)
+=
1
;
}
else
{
add_to
(
subgradient
,
samples
[
i
]);
subgradient
(
w_size_m1
)
-=
1
;
}
risk
+=
std
::
abs
(
prediction
-
targets
[
i
])
-
eps_insensitivity
;
}
}
}
private
:
// -----------------------------------------------------
// -----------------------------------------------------
const
std
::
vector
<
sample_type
>&
samples
;
const
std
::
vector
<
scalar_type
>&
targets
;
const
scalar_type
C
;
const
bool
be_verbose
;
const
scalar_type
eps
;
const
scalar_type
eps_insensitivity
;
const
unsigned
long
max_iterations
;
};
// ----------------------------------------------------------------------------------------
template
<
typename
matrix_type
,
typename
sample_type
,
typename
scalar_type
>
oca_problem_linear_svr
<
matrix_type
,
sample_type
>
make_oca_problem_linear_svr
(
const
scalar_type
C
,
const
std
::
vector
<
sample_type
>&
samples
,
const
std
::
vector
<
scalar_type
>&
targets
,
const
bool
be_verbose
,
const
scalar_type
eps
,
const
scalar_type
eps_insensitivity
,
const
unsigned
long
max_iterations
)
{
return
oca_problem_linear_svr
<
matrix_type
,
sample_type
>
(
C
,
samples
,
targets
,
be_verbose
,
eps
,
eps_insensitivity
,
max_iterations
);
}
// ----------------------------------------------------------------------------------------
template
<
typename
K
>
class
svr_linear_trainer
{
public
:
typedef
K
kernel_type
;
typedef
typename
kernel_type
::
scalar_type
scalar_type
;
typedef
typename
kernel_type
::
sample_type
sample_type
;
typedef
typename
kernel_type
::
mem_manager_type
mem_manager_type
;
typedef
decision_function
<
kernel_type
>
trained_function_type
;
// You are getting a compiler error on this line because you supplied a non-linear kernel
// to the svr_linear_trainer object. You have to use one of the linear kernels with this
// trainer.
COMPILE_TIME_ASSERT
((
is_same_type
<
K
,
linear_kernel
<
sample_type
>
>::
value
||
is_same_type
<
K
,
sparse_linear_kernel
<
sample_type
>
>::
value
));
svr_linear_trainer
(
)
{
C
=
1
;
verbose
=
false
;
eps
=
0
.
01
;
max_iterations
=
10000
;
learn_nonnegative_weights
=
false
;
last_weight_1
=
false
;
eps_insensitivity
=
0
.
1
;
}
explicit
svr_linear_trainer
(
const
scalar_type
&
C_
)
{
// make sure requires clause is not broken
DLIB_ASSERT
(
C_
>
0
,
"
\t
svr_linear_trainer::svr_linear_trainer()"
<<
"
\n\t
C_ must be greater than 0"
<<
"
\n\t
C_: "
<<
C_
<<
"
\n\t
this: "
<<
this
);
C
=
C_
;
verbose
=
false
;
eps
=
0
.
01
;
max_iterations
=
10000
;
learn_nonnegative_weights
=
false
;
last_weight_1
=
false
;
eps_insensitivity
=
0
.
1
;
}
void
set_epsilon
(
scalar_type
eps_
)
{
// make sure requires clause is not broken
DLIB_ASSERT
(
eps_
>
0
,
"
\t
void svr_linear_trainer::set_epsilon()"
<<
"
\n\t
eps_ must be greater than 0"
<<
"
\n\t
eps_: "
<<
eps_
<<
"
\n\t
this: "
<<
this
);
eps
=
eps_
;
}
const
scalar_type
get_epsilon
(
)
const
{
return
eps
;
}
void
set_epsilon_insensitivity
(
scalar_type
eps_
)
{
// make sure requires clause is not broken
DLIB_ASSERT
(
eps_
>
0
,
"
\t
void svr_linear_trainer::set_epsilon_insensitivity(eps_)"
<<
"
\n\t
invalid inputs were given to this function"
<<
"
\n\t
eps_: "
<<
eps_
);
eps_insensitivity
=
eps_
;
}
const
scalar_type
get_epsilon_insensitivity
(
)
const
{
return
eps_insensitivity
;
}
unsigned
long
get_max_iterations
(
)
const
{
return
max_iterations
;
}
void
set_max_iterations
(
unsigned
long
max_iter
)
{
max_iterations
=
max_iter
;
}
void
be_verbose
(
)
{
verbose
=
true
;
}
void
be_quiet
(
)
{
verbose
=
false
;
}
bool
forces_last_weight_to_1
(
)
const
{
return
last_weight_1
;
}
void
force_last_weight_to_1
(
bool
should_last_weight_be_1
)
{
last_weight_1
=
should_last_weight_be_1
;
}
void
set_oca
(
const
oca
&
item
)
{
solver
=
item
;
}
const
oca
get_oca
(
)
const
{
return
solver
;
}
const
kernel_type
get_kernel
(
)
const
{
return
kernel_type
();
}
bool
learns_nonnegative_weights
(
)
const
{
return
learn_nonnegative_weights
;
}
void
set_learns_nonnegative_weights
(
bool
value
)
{
learn_nonnegative_weights
=
value
;
}
void
set_c
(
scalar_type
C_
)
{
// make sure requires clause is not broken
DLIB_ASSERT
(
C_
>
0
,
"
\t
void svr_linear_trainer::set_c()"
<<
"
\n\t
C_ must be greater than 0"
<<
"
\n\t
C_: "
<<
C_
<<
"
\n\t
this: "
<<
this
);
C
=
C_
;
}
const
scalar_type
get_c
(
)
const
{
return
C
;
}
const
decision_function
<
kernel_type
>
train
(
const
std
::
vector
<
sample_type
>&
samples
,
const
std
::
vector
<
scalar_type
>&
targets
)
const
{
// make sure requires clause is not broken
DLIB_CASSERT
(
is_learning_problem
(
samples
,
targets
)
==
true
,
"
\t
decision_function svr_linear_trainer::train(samples, targets)"
<<
"
\n\t
invalid inputs were given to this function"
<<
"
\n\t
samples.size(): "
<<
samples
.
size
()
<<
"
\n\t
targets.size(): "
<<
targets
.
size
()
<<
"
\n\t
is_learning_problem(samples,targets): "
<<
is_learning_problem
(
samples
,
targets
)
);
typedef
matrix
<
scalar_type
,
0
,
1
>
w_type
;
w_type
w
;
const
unsigned
long
num_dims
=
max_index_plus_one
(
samples
);
unsigned
long
num_nonnegative
=
0
;
if
(
learn_nonnegative_weights
)
{
num_nonnegative
=
num_dims
;
}
unsigned
long
force_weight_1_idx
=
std
::
numeric_limits
<
unsigned
long
>::
max
();
if
(
last_weight_1
)
{
force_weight_1_idx
=
num_dims
-
1
;
}
solver
(
make_oca_problem_linear_svr
<
w_type
>
(
C
,
samples
,
targets
,
verbose
,
eps
,
eps_insensitivity
,
max_iterations
),
w
,
num_nonnegative
,
force_weight_1_idx
);
// put the solution into a decision function and then return it
decision_function
<
kernel_type
>
df
;
df
.
b
=
static_cast
<
scalar_type
>
(
w
(
w
.
size
()
-
1
));
df
.
basis_vectors
.
set_size
(
1
);
// Copy the plane normal into the output basis vector. The output vector might be a
// sparse vector container so we need to use this special kind of copy to handle that case.
// As an aside, the reason for using max_index_plus_one() and not just w.size()-1 is because
// doing it this way avoids an inane warning from gcc that can occur in some cases.
const
long
out_size
=
max_index_plus_one
(
samples
);
assign
(
df
.
basis_vectors
(
0
),
matrix_cast
<
scalar_type
>
(
colm
(
w
,
0
,
out_size
)));
df
.
alpha
.
set_size
(
1
);
df
.
alpha
(
0
)
=
1
;
return
df
;
}
private
:
scalar_type
C
;
oca
solver
;
scalar_type
eps
;
bool
verbose
;
unsigned
long
max_iterations
;
bool
learn_nonnegative_weights
;
bool
last_weight_1
;
scalar_type
eps_insensitivity
;
};
// ----------------------------------------------------------------------------------------
}
#endif // DLIB_SVR_LINEAR_TrAINER_H__
dlib/svm/svr_linear_trainer_abstract.h
0 → 100644
View file @
0cdbbe85
// Copyright (C) 2013 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#undef DLIB_SVR_LINEAR_TrAINER_ABSTRACT_H__
#ifdef DLIB_SVR_LINEAR_TrAINER_ABSTRACT_H__
#include "sparse_vector_abstract.h"
#include "function_abstract.h"
#include "kernel_abstract.h"
#include "../algs.h"
namespace
dlib
{
// ----------------------------------------------------------------------------------------
template
<
typename
K
>
class
svr_linear_trainer
{
/*!
REQUIREMENTS ON K
Is either linear_kernel or sparse_linear_kernel.
WHAT THIS OBJECT REPRESENTS
This object implements a trainer for performing epsilon-insensitive support
vector regression. It uses the oca optimizer so it is very efficient at
solving this problem when linear kernels are used, making it suitable for
use with large datasets.
For an introduction to support vector regression see the following paper:
A Tutorial on Support Vector Regression by Alex J. Smola and Bernhard Scholkopf.
Note that this object solves the version of support vector regression
defined by equation (3) in the paper, except that we incorporate the bias
term into the w vector by appending a 1 to the end of each sample.
!*/
public
:
typedef
K
kernel_type
;
typedef
typename
kernel_type
::
scalar_type
scalar_type
;
typedef
typename
kernel_type
::
sample_type
sample_type
;
typedef
typename
kernel_type
::
mem_manager_type
mem_manager_type
;
typedef
decision_function
<
kernel_type
>
trained_function_type
;
svr_linear_trainer
(
);
/*!
ensures
- This object is properly initialized and ready to be used to train a
ranking support vector machine.
- #get_oca() == oca() (i.e. an instance of oca with default parameters)
- #get_c() == 1
- #get_epsilon() == 0.01
- #get_epsilon_insensitivity() = 0.1
- This object will not be verbose unless be_verbose() is called
- #get_max_iterations() == 10000
- #learns_nonnegative_weights() == false
- #forces_last_weight_to_1() == false
!*/
explicit
svr_linear_trainer
(
const
scalar_type
&
C
);
/*!
requires
- C > 0
ensures
- This object is properly initialized and ready to be used to train a
ranking support vector machine.
- #get_oca() == oca() (i.e. an instance of oca with default parameters)
- #get_c() == C
- #get_epsilon() == 0.01
- #get_epsilon_insensitivity() = 0.1
- This object will not be verbose unless be_verbose() is called
- #get_max_iterations() == 10000
- #learns_nonnegative_weights() == false
- #forces_last_weight_to_1() == false
!*/
void
set_epsilon
(
scalar_type
eps_
);
/*!
requires
- eps > 0
ensures
- #get_epsilon() == eps
!*/
const
scalar_type
get_epsilon
(
)
const
;
/*!
ensures
- returns the error epsilon that determines when training should stop.
Smaller values may result in a more accurate solution but take longer to
train. You can think of this epsilon value as saying "solve the
optimization problem until the average regression error is within epsilon
of its optimal value". See get_epsilon_insensitivity() below for a
definition of "regression error".
!*/
void
set_epsilon_insensitivity
(
scalar_type
eps_
);
/*!
requires
- eps > 0
ensures
- #get_epsilon_insensitivity() == eps
!*/
const
scalar_type
get_epsilon_insensitivity
(
)
const
;
/*!
ensures
- This object tries to find a function which minimizes the regression error
on a training set. This error is measured in the following way:
- if (abs(predicted_value - true_labeled_value) < eps) then
- The error is 0. That is, any function which gets within eps of
the correct output is good enough.
- else
- The error grows linearly once it gets bigger than eps.
So epsilon-insensitive regression means we do regression but stop trying
to fit a data point once it is "close enough". This function returns
that eps value which controls what we mean by "close enough".
!*/
unsigned
long
get_max_iterations
(
)
const
;
/*!
ensures
- returns the maximum number of iterations the SVM optimizer is allowed to
run before it is required to stop and return a result.
!*/
void
set_max_iterations
(
unsigned
long
max_iter
);
/*!
ensures
- #get_max_iterations() == max_iter
!*/
void
be_verbose
(
);
/*!
ensures
- This object will print status messages to standard out so that a user can
observe the progress of the algorithm.
!*/
void
be_quiet
(
);
/*!
ensures
- this object will not print anything to standard out
!*/
bool
forces_last_weight_to_1
(
)
const
;
/*!
ensures
- returns true if this trainer has the constraint that the last weight in
the learned parameter vector must be 1. This is the weight corresponding
to the feature in the training vectors with the highest dimension.
- Forcing the last weight to 1 also disables the bias and therefore the b
field of the learned decision_function will be 0 when forces_last_weight_to_1() == true.
!*/
void
force_last_weight_to_1
(
bool
should_last_weight_be_1
);
/*!
ensures
- #forces_last_weight_to_1() == should_last_weight_be_1
!*/
void
set_oca
(
const
oca
&
item
);
/*!
ensures
- #get_oca() == item
!*/
const
oca
get_oca
(
)
const
;
/*!
ensures
- returns a copy of the optimizer used to solve the SVM problem.
!*/
const
kernel_type
get_kernel
(
)
const
;
/*!
ensures
- returns a copy of the kernel function in use by this object. Since the
linear kernels don't have any parameters this function just returns
kernel_type()
!*/
bool
learns_nonnegative_weights
(
)
const
;
/*!
ensures
- The output of training is a weight vector and a bias value. These two
things define the resulting decision function. That is, the decision
function simply takes the dot product between the learned weight vector
and a test sample, then subtracts the bias value. Therefore, if
learns_nonnegative_weights() == true then the resulting learned weight
vector will always have non-negative entries. The bias value may still
be negative though.
!*/
void
set_learns_nonnegative_weights
(
bool
value
);
/*!
ensures
- #learns_nonnegative_weights() == value
!*/
void
set_c
(
scalar_type
C_
);
/*!
requires
- C > 0
ensures
- #get_c() == C
!*/
const
scalar_type
get_c
(
)
const
;
/*!
ensures
- returns the SVM regularization parameter. It is the parameter that
determines the trade off between trying to fit the training data exactly
or allowing more errors but hopefully improving the generalization of the
resulting classifier. Larger values encourage exact fitting while
smaller values of C may encourage better generalization.
!*/
const
decision_function
<
kernel_type
>
train
(
const
std
::
vector
<
sample_type
>&
samples
,
const
std
::
vector
<
scalar_type
>&
targets
)
const
;
/*!
requires
- is_learning_problem(samples,targets) == true
ensures
- performs support vector regression given the training samples and targets.
- returns a decision_function F with the following properties:
- F(new_sample) == predicted target value for new_sample
- F.alpha.size() == 1
- F.basis_vectors.size() == 1
- F.alpha(0) == 1
!*/
};
// ----------------------------------------------------------------------------------------
}
#endif // DLIB_SVR_LINEAR_TrAINER_ABSTRACT_H__
dlib/test/CMakeLists.txt
View file @
0cdbbe85
...
...
@@ -124,15 +124,16 @@ set (tests
svm.cpp
svm_multiclass_linear.cpp
svm_struct.cpp
svr_linear_trainer.cpp
symmetric_matrix_cache.cpp
thread_pool.cpp
threads.cpp
timer.cpp
tokenizer.cpp
trust_region.cpp
vectorstream.cpp
tuple.cpp
type_safe_union.cpp
vectorstream.cpp
)
# create a variable called target_name and set it to the string "test"
...
...
dlib/test/makefile
View file @
0cdbbe85
...
...
@@ -139,6 +139,7 @@ SRC += svm_c_linear_dcd.cpp
SRC
+=
svm.cpp
SRC
+=
svm_multiclass_linear.cpp
SRC
+=
svm_struct.cpp
SRC
+=
svr_linear_trainer.cpp
SRC
+=
symmetric_matrix_cache.cpp
SRC
+=
thread_pool.cpp
SRC
+=
threads.cpp
...
...
dlib/test/svr_linear_trainer.cpp
0 → 100644
View file @
0cdbbe85
// Copyright (C) 2013 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#include <dlib/matrix.h>
#include <sstream>
#include <string>
#include <ctime>
#include <vector>
#include <dlib/statistics.h>
#include "tester.h"
#include <dlib/svm.h>
namespace
{
using
namespace
test
;
using
namespace
dlib
;
using
namespace
std
;
logger
dlog
(
"test.svr_linear_trainer"
);
typedef
matrix
<
double
,
0
,
1
>
sample_type
;
typedef
std
::
vector
<
std
::
pair
<
unsigned
int
,
double
>
>
sparse_sample_type
;
// ----------------------------------------------------------------------------------------
double
sinc
(
double
x
)
{
if
(
x
==
0
)
return
1
;
return
sin
(
x
)
/
x
;
}
template
<
typename
scalar_type
>
void
test1
()
{
typedef
matrix
<
scalar_type
,
0
,
1
>
sample_type
;
typedef
radial_basis_kernel
<
sample_type
>
kernel_type
;
print_spinner
();
std
::
vector
<
sample_type
>
samples
;
std
::
vector
<
scalar_type
>
targets
;
// The first thing we do is pick a few training points from the sinc() function.
sample_type
m
(
1
);
for
(
scalar_type
x
=
-
10
;
x
<=
4
;
x
+=
1
)
{
m
(
0
)
=
x
;
samples
.
push_back
(
m
);
targets
.
push_back
(
sinc
(
x
)
+
1.1
);
}
randomize_samples
(
samples
,
targets
);
empirical_kernel_map
<
kernel_type
>
ekm
;
ekm
.
load
(
kernel_type
(
0.1
),
samples
);
for
(
unsigned
long
i
=
0
;
i
<
samples
.
size
();
++
i
)
samples
[
i
]
=
ekm
.
project
(
samples
[
i
]);
svr_linear_trainer
<
linear_kernel
<
sample_type
>
>
linear_trainer
;
linear_trainer
.
set_c
(
30
);
linear_trainer
.
set_epsilon_insensitivity
(
0.001
);
matrix
<
double
>
res
=
cross_validate_regression_trainer
(
linear_trainer
,
samples
,
targets
,
5
);
dlog
<<
LINFO
<<
"MSE and R-Squared: "
<<
res
;
DLIB_TEST
(
res
(
0
)
<
1e-4
);
DLIB_TEST
(
res
(
1
)
>
0.99
);
dlib
::
rand
rnd
;
samples
.
clear
();
targets
.
clear
();
std
::
vector
<
scalar_type
>
noisefree_targets
;
for
(
scalar_type
x
=
0
;
x
<=
5
;
x
+=
0.1
)
{
m
(
0
)
=
x
;
samples
.
push_back
(
matrix_cast
<
scalar_type
>
(
linpiece
(
m
,
linspace
(
0
,
5
,
20
))));
targets
.
push_back
(
x
*
x
+
rnd
.
get_random_gaussian
());
noisefree_targets
.
push_back
(
x
*
x
);
}
linear_trainer
.
set_learns_nonnegative_weights
(
true
);
linear_trainer
.
set_epsilon_insensitivity
(
1.0
);
decision_function
<
linear_kernel
<
sample_type
>
>
df2
=
linear_trainer
.
train
(
samples
,
targets
);
print_spinner
();
res
=
test_regression_function
(
df2
,
samples
,
noisefree_targets
);
dlog
<<
LINFO
<<
"MSE and R-Squared: "
<<
res
;
DLIB_TEST
(
res
(
0
)
<
0.15
);
DLIB_TEST
(
res
(
1
)
>
0.98
);
DLIB_TEST
(
df2
.
basis_vectors
.
size
()
==
1
);
DLIB_TEST
(
max
(
df2
.
basis_vectors
(
0
))
>=
0
);
linear_trainer
.
force_last_weight_to_1
(
true
);
df2
=
linear_trainer
.
train
(
samples
,
targets
);
DLIB_TEST
(
std
::
abs
(
df2
.
basis_vectors
(
0
)(
samples
[
0
].
size
()
-
1
)
-
1.0
)
<
1e-14
);
res
=
test_regression_function
(
df2
,
samples
,
noisefree_targets
);
dlog
<<
LINFO
<<
"MSE and R-Squared: "
<<
res
;
DLIB_TEST
(
res
(
0
)
<
0.20
);
DLIB_TEST
(
res
(
1
)
>
0.98
);
// convert into sparse vectors and try it out
typedef
std
::
vector
<
std
::
pair
<
unsigned
long
,
scalar_type
>
>
sparse_samp
;
std
::
vector
<
sparse_samp
>
ssamples
;
for
(
unsigned
long
i
=
0
;
i
<
samples
.
size
();
++
i
)
{
sparse_samp
s
;
for
(
long
j
=
0
;
j
<
samples
[
i
].
size
();
++
j
)
s
.
push_back
(
make_pair
(
j
,
samples
[
i
](
j
)));
ssamples
.
push_back
(
s
);
}
svr_linear_trainer
<
sparse_linear_kernel
<
sparse_samp
>
>
strainer
;
strainer
.
set_learns_nonnegative_weights
(
true
);
strainer
.
set_epsilon_insensitivity
(
1.0
);
strainer
.
set_c
(
30
);
decision_function
<
sparse_linear_kernel
<
sparse_samp
>
>
df
;
df
=
strainer
.
train
(
ssamples
,
targets
);
res
=
test_regression_function
(
df
,
ssamples
,
noisefree_targets
);
dlog
<<
LINFO
<<
"MSE and R-Squared: "
<<
res
;
DLIB_TEST
(
res
(
0
)
<
0.15
);
DLIB_TEST
(
res
(
1
)
>
0.98
);
DLIB_TEST
(
df2
.
basis_vectors
.
size
()
==
1
);
DLIB_TEST
(
max
(
sparse_to_dense
(
df2
.
basis_vectors
(
0
)))
>=
0
);
}
// ----------------------------------------------------------------------------------------
class
tester_svr_linear_trainer
:
public
tester
{
public
:
tester_svr_linear_trainer
(
)
:
tester
(
"test_svr_linear_trainer"
,
"Runs tests on the svr_linear_trainer."
)
{}
void
perform_test
(
)
{
dlog
<<
LINFO
<<
"TEST double"
;
test1
<
double
>
();
dlog
<<
LINFO
<<
"TEST float"
;
test1
<
float
>
();
}
}
a
;
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment