Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
40e8ce17
Commit
40e8ce17
authored
Feb 18, 2016
by
Davis King
Browse files
Options
Browse Files
Download
Plain Diff
merged
parents
1d5c7ac0
ce517cfa
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
123 additions
and
8 deletions
+123
-8
svm_c_linear_dcd_trainer.h
dlib/svm/svm_c_linear_dcd_trainer.h
+43
-8
svm_c_linear_dcd_trainer_abstract.h
dlib/svm/svm_c_linear_dcd_trainer_abstract.h
+19
-0
svm_c_linear_dcd.cpp
dlib/test/svm_c_linear_dcd.cpp
+61
-0
No files found.
dlib/svm/svm_c_linear_dcd_trainer.h
View file @
40e8ce17
...
@@ -47,7 +47,8 @@ namespace dlib
...
@@ -47,7 +47,8 @@ namespace dlib
verbose
(
false
),
verbose
(
false
),
have_bias
(
true
),
have_bias
(
true
),
last_weight_1
(
false
),
last_weight_1
(
false
),
do_shrinking
(
true
)
do_shrinking
(
true
),
do_svm_l2
(
false
)
{
{
}
}
...
@@ -61,7 +62,8 @@ namespace dlib
...
@@ -61,7 +62,8 @@ namespace dlib
verbose
(
false
),
verbose
(
false
),
have_bias
(
true
),
have_bias
(
true
),
last_weight_1
(
false
),
last_weight_1
(
false
),
do_shrinking
(
true
)
do_shrinking
(
true
),
do_svm_l2
(
false
)
{
{
// make sure requires clause is not broken
// make sure requires clause is not broken
DLIB_ASSERT
(
0
<
C_
,
DLIB_ASSERT
(
0
<
C_
,
...
@@ -104,6 +106,13 @@ namespace dlib
...
@@ -104,6 +106,13 @@ namespace dlib
bool
enabled
bool
enabled
)
{
do_shrinking
=
enabled
;
}
)
{
do_shrinking
=
enabled
;
}
bool
solving_svm_l2_problem
(
)
const
{
return
do_svm_l2
;
}
void
solve_svm_l2_problem
(
bool
enabled
)
{
do_svm_l2
=
enabled
;
}
void
be_verbose
(
void
be_verbose
(
)
)
{
{
...
@@ -219,12 +228,17 @@ namespace dlib
...
@@ -219,12 +228,17 @@ namespace dlib
private
:
private
:
template
<
template
<
typename
in_sample_vector_type
typename
in_sample_vector_type
,
typename
in_scalar_vector_type
>
>
void
init
(
void
init
(
const
in_sample_vector_type
&
x
,
const
in_sample_vector_type
&
x
,
const
in_scalar_vector_type
&
y
,
bool
have_bias_
,
bool
have_bias_
,
bool
last_weight_1_
bool
last_weight_1_
,
bool
do_svm_l2_
,
scalar_type
Cpos
,
scalar_type
Cneg
)
)
{
{
const
long
new_dims
=
max_index_plus_one
(
x
);
const
long
new_dims
=
max_index_plus_one
(
x
);
...
@@ -337,6 +351,14 @@ namespace dlib
...
@@ -337,6 +351,14 @@ namespace dlib
{
{
index
.
push_back
(
i
);
index
.
push_back
(
i
);
}
}
if
(
do_svm_l2_
)
{
if
(
y
(
i
)
>
0
)
Q
.
back
()
+=
1
/
(
2
*
Cpos
);
else
Q
.
back
()
+=
1
/
(
2
*
Cneg
);
}
}
}
if
(
last_weight_1
)
if
(
last_weight_1
)
...
@@ -490,18 +512,22 @@ namespace dlib
...
@@ -490,18 +512,22 @@ namespace dlib
}
}
#endif
#endif
state
.
init
(
x
,
have_bias
,
last_weight_1
);
state
.
init
(
x
,
y
,
have_bias
,
last_weight_1
,
do_svm_l2
,
Cpos
,
Cneg
);
std
::
vector
<
scalar_type
>&
alpha
=
state
.
alpha
;
std
::
vector
<
scalar_type
>&
alpha
=
state
.
alpha
;
scalar_vector_type
&
w
=
state
.
w
;
scalar_vector_type
&
w
=
state
.
w
;
std
::
vector
<
long
>&
index
=
state
.
index
;
std
::
vector
<
long
>&
index
=
state
.
index
;
const
long
dims
=
state
.
dims
;
const
long
dims
=
state
.
dims
;
unsigned
long
active_size
=
index
.
size
();
unsigned
long
active_size
=
index
.
size
();
scalar_type
PG_max_prev
=
std
::
numeric_limits
<
scalar_type
>::
infinity
();
scalar_type
PG_max_prev
=
std
::
numeric_limits
<
scalar_type
>::
infinity
();
scalar_type
PG_min_prev
=
-
std
::
numeric_limits
<
scalar_type
>::
infinity
();
scalar_type
PG_min_prev
=
-
std
::
numeric_limits
<
scalar_type
>::
infinity
();
const
scalar_type
Dii_pos
=
1
/
(
2
*
Cpos
);
const
scalar_type
Dii_neg
=
1
/
(
2
*
Cneg
);
// main loop
// main loop
for
(
unsigned
long
iter
=
0
;
iter
<
max_iterations
;
++
iter
)
for
(
unsigned
long
iter
=
0
;
iter
<
max_iterations
;
++
iter
)
{
{
...
@@ -521,8 +547,16 @@ namespace dlib
...
@@ -521,8 +547,16 @@ namespace dlib
{
{
const
long
i
=
index
[
ii
];
const
long
i
=
index
[
ii
];
const
scalar_type
G
=
y
(
i
)
*
dot
(
w
,
x
(
i
))
-
1
;
scalar_type
G
=
y
(
i
)
*
dot
(
w
,
x
(
i
))
-
1
;
if
(
do_svm_l2
)
{
if
(
y
(
i
)
>
0
)
G
+=
Dii_pos
*
alpha
[
i
];
else
G
+=
Dii_neg
*
alpha
[
i
];
}
const
scalar_type
C
=
(
y
(
i
)
>
0
)
?
Cpos
:
Cneg
;
const
scalar_type
C
=
(
y
(
i
)
>
0
)
?
Cpos
:
Cneg
;
const
scalar_type
U
=
do_svm_l2
?
std
::
numeric_limits
<
scalar_type
>::
infinity
()
:
C
;
scalar_type
PG
=
0
;
scalar_type
PG
=
0
;
if
(
alpha
[
i
]
==
0
)
if
(
alpha
[
i
]
==
0
)
...
@@ -539,7 +573,7 @@ namespace dlib
...
@@ -539,7 +573,7 @@ namespace dlib
if
(
G
<
0
)
if
(
G
<
0
)
PG
=
G
;
PG
=
G
;
}
}
else
if
(
alpha
[
i
]
==
C
)
else
if
(
alpha
[
i
]
==
U
)
{
{
if
(
G
<
PG_min_prev
)
if
(
G
<
PG_min_prev
)
{
{
...
@@ -567,7 +601,7 @@ namespace dlib
...
@@ -567,7 +601,7 @@ namespace dlib
if
(
std
::
abs
(
PG
)
>
1e-12
)
if
(
std
::
abs
(
PG
)
>
1e-12
)
{
{
const
scalar_type
alpha_old
=
alpha
[
i
];
const
scalar_type
alpha_old
=
alpha
[
i
];
alpha
[
i
]
=
std
::
min
(
std
::
max
(
alpha
[
i
]
-
G
/
state
.
Q
[
i
],
(
scalar_type
)
0
.
0
),
C
);
alpha
[
i
]
=
std
::
min
(
std
::
max
(
alpha
[
i
]
-
G
/
state
.
Q
[
i
],
(
scalar_type
)
0
.
0
),
U
);
const
scalar_type
delta
=
(
alpha
[
i
]
-
alpha_old
)
*
y
(
i
);
const
scalar_type
delta
=
(
alpha
[
i
]
-
alpha_old
)
*
y
(
i
);
add_to
(
w
,
x
(
i
),
delta
);
add_to
(
w
,
x
(
i
),
delta
);
if
(
have_bias
&&
!
last_weight_1
)
if
(
have_bias
&&
!
last_weight_1
)
...
@@ -660,6 +694,7 @@ namespace dlib
...
@@ -660,6 +694,7 @@ namespace dlib
bool
have_bias
;
// having a bias means we pretend all x vectors have an extra element which is always -1.
bool
have_bias
;
// having a bias means we pretend all x vectors have an extra element which is always -1.
bool
last_weight_1
;
bool
last_weight_1
;
bool
do_shrinking
;
bool
do_shrinking
;
bool
do_svm_l2
;
};
// end of class svm_c_linear_dcd_trainer
};
// end of class svm_c_linear_dcd_trainer
...
...
dlib/svm/svm_c_linear_dcd_trainer_abstract.h
View file @
40e8ce17
...
@@ -67,6 +67,7 @@ namespace dlib
...
@@ -67,6 +67,7 @@ namespace dlib
- #forces_last_weight_to_1() == false
- #forces_last_weight_to_1() == false
- #includes_bias() == true
- #includes_bias() == true
- #shrinking_enabled() == true
- #shrinking_enabled() == true
- #solving_svm_l2_problem() == false
!*/
!*/
explicit
svm_c_linear_dcd_trainer
(
explicit
svm_c_linear_dcd_trainer
(
...
@@ -86,6 +87,7 @@ namespace dlib
...
@@ -86,6 +87,7 @@ namespace dlib
- #forces_last_weight_to_1() == false
- #forces_last_weight_to_1() == false
- #includes_bias() == true
- #includes_bias() == true
- #shrinking_enabled() == true
- #shrinking_enabled() == true
- #solving_svm_l2_problem() == false
!*/
!*/
bool
includes_bias
(
bool
includes_bias
(
...
@@ -140,6 +142,23 @@ namespace dlib
...
@@ -140,6 +142,23 @@ namespace dlib
- #shrinking_enabled() == enabled
- #shrinking_enabled() == enabled
!*/
!*/
bool
solving_svm_l2_problem
(
)
const
;
/*!
ensures
- returns true if this solver will solve the L2 version of the SVM
objective function. That is, if solving_svm_l2_problem()==true then this
object, rather than using the hinge loss, uses the squared hinge loss.
!*/
void
solve_svm_l2_problem
(
bool
enabled
);
/*!
ensures
- #solving_svm_l2_problem() == enabled
!*/
void
be_verbose
(
void
be_verbose
(
);
);
/*!
/*!
...
...
dlib/test/svm_c_linear_dcd.cpp
View file @
40e8ce17
...
@@ -440,6 +440,65 @@ namespace
...
@@ -440,6 +440,65 @@ namespace
// ----------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------
void
test_l2_version
()
{
typedef
std
::
map
<
unsigned
long
,
double
>
sample_type
;
typedef
sparse_linear_kernel
<
sample_type
>
kernel_type
;
svm_c_linear_dcd_trainer
<
kernel_type
>
linear_trainer
;
linear_trainer
.
set_c
(
10
);
linear_trainer
.
set_epsilon
(
1e-5
);
std
::
vector
<
sample_type
>
samples
;
std
::
vector
<
double
>
labels
;
// make an instance of a sample vector so we can use it below
sample_type
sample
;
// Now let's go into a loop and randomly generate 10000 samples.
double
label
=
+
1
;
for
(
int
i
=
0
;
i
<
1000
;
++
i
)
{
// flip this flag
label
*=
-
1
;
sample
.
clear
();
// now make a random sparse sample with at most 10 non-zero elements
for
(
int
j
=
0
;
j
<
10
;
++
j
)
{
int
idx
=
std
::
rand
()
%
100
;
double
value
=
static_cast
<
double
>
(
std
::
rand
())
/
RAND_MAX
;
sample
[
idx
]
=
label
*
value
;
}
// Also save the samples we are generating so we can let the svm_c_linear_trainer
// learn from them below.
samples
.
push_back
(
sample
);
labels
.
push_back
(
label
);
}
decision_function
<
kernel_type
>
df
=
linear_trainer
.
train
(
samples
,
labels
);
sample
.
clear
();
sample
[
4
]
=
0.3
;
sample
[
10
]
=
0.9
;
DLIB_TEST
(
df
(
sample
)
>
0
);
sample
.
clear
();
sample
[
83
]
=
-
0.3
;
sample
[
26
]
=
-
0.9
;
sample
[
58
]
=
-
0.7
;
DLIB_TEST
(
df
(
sample
)
<
0
);
sample
.
clear
();
sample
[
0
]
=
-
0.2
;
sample
[
9
]
=
-
0.8
;
DLIB_TEST
(
df
(
sample
)
<
0
);
}
class
tester_svm_c_linear_dcd
:
public
tester
class
tester_svm_c_linear_dcd
:
public
tester
{
{
public
:
public
:
...
@@ -474,6 +533,8 @@ namespace
...
@@ -474,6 +533,8 @@ namespace
print_spinner
();
print_spinner
();
test_sparse_1_sample
(
-
1
);
test_sparse_1_sample
(
-
1
);
print_spinner
();
print_spinner
();
test_l2_version
();
}
}
}
a
;
}
a
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment