Commit bbeac285 authored by Evgeniy Fominov's avatar Evgeniy Fominov Committed by Davis E. King

Shape predictor trainer optimizations (#126)

* Shape predictor trainer optimizations

* Fixed performance leak in single thread mode & made VS2010 support
parent 6eb5bd80
This diff is collapsed.
......@@ -148,6 +148,7 @@ namespace dlib
- #get_num_test_splits() == 20
- #get_feature_pool_region_padding() == 0
- #get_random_seed() == ""
- #get_num_threads() == 0
- This object will not be verbose
!*/
......@@ -367,6 +368,26 @@ namespace dlib
- #get_num_test_splits() == num
!*/
unsigned long get_num_threads (
) const;
/*!
ensures
- When running training process, it is possible to make some parts of it parallel
using CPU threads with #parallel_for() extension and creating #thread_pool internally
When get_num_threads() == 0, trainer will not create threads and all processing will
be done in the calling thread
!*/
void set_num_threads (
unsigned long num
);
/*!
requires
- num >= 0
ensures
- #get_num_threads() == num
!*/
void be_verbose (
);
/*!
......
......@@ -39,7 +39,7 @@ std::vector<std::vector<double> > get_interocular_distances (
// ----------------------------------------------------------------------------------------
int main(int argc, char** argv)
{
{
try
{
// In this example we are going to train a shape_predictor based on the
......@@ -108,6 +108,9 @@ int main(int argc, char** argv)
trainer.set_nu(0.05);
trainer.set_tree_depth(2);
// some parts of training process can be parellelized.
// Trainer will use this count of threads when possible
trainer.set_num_threads(2);
// Tell the trainer to print status messages to the console so we can
// see how long the training will take.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment