Commit d73f58ae authored by Davis King's avatar Davis King

Added running_gradient

parent 6124442f
// Copyright (C) 2016 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#ifndef DLIB_RuNNING_GRADIENT_Hh_
#define DLIB_RuNNING_GRADIENT_Hh_
#include "running_gradient_abstract.h"
#include "../algs.h"
#include <cmath>
#include "../matrix.h"
namespace dlib
{
class running_gradient
{
public:
running_gradient (
)
{
clear();
}
void clear(
)
{
n = 0;
R = identity_matrix<double>(2)*1e6;
w = 0;
residual_squared = 0;
}
double current_n (
) const
{
return n;
}
void add(
double y
)
{
matrix<double,2,1> x;
x = n, 1;
// Do recursive least squares computations
const double temp = 1 + trans(x)*R*x;
matrix<double,2,1> tmp = R*x;
R = R - (tmp*trans(tmp))/temp;
// R should always be symmetric. This line improves numeric stability of this algorithm.
R = 0.5*(R + trans(R));
w = w + R*x*(y - trans(x)*w);
// Also, recursively keep track of the residual error between the given value
// and what our linear predictor outputs.
residual_squared = residual_squared + std::pow((y - trans(x)*w),2.0)*temp;
++n;
}
double gradient (
) const
{
// make sure requires clause is not broken
DLIB_ASSERT(current_n() > 1,
"\t double running_gradient::gradient()"
<< "\n\t You must add more values into this object before calling this function."
<< "\n\t this: " << this
);
return w(0);
}
double standard_error (
) const
{
// make sure requires clause is not broken
DLIB_ASSERT(current_n() > 2,
"\t double running_gradient::standard_error()"
<< "\n\t You must add more values into this object before calling this function."
<< "\n\t this: " << this
);
const double s = residual_squared/(n-2);
const double adjust = 12.0/(std::pow(current_n(),3.0) - current_n());
return std::sqrt(s*adjust);
}
double probability_gradient_less_than (
double thresh
) const
{
// make sure requires clause is not broken
DLIB_ASSERT(current_n() > 2,
"\t double running_gradient::probability_gradient_less_than()"
<< "\n\t You must add more values into this object before calling this function."
<< "\n\t this: " << this
);
return normal_cfd(thresh, gradient(), standard_error());
}
double probability_gradient_greater_than (
double thresh
) const
{
// make sure requires clause is not broken
DLIB_ASSERT(current_n() > 2,
"\t double running_gradient::probability_gradient_greater_than()"
<< "\n\t You must add more values into this object before calling this function."
<< "\n\t this: " << this
);
return 1-probability_gradient_less_than(thresh);
}
private:
static double normal_cfd(double value, double mean, double stddev)
{
value = (value-mean)/stddev;
return 0.5 * erfc(-value / std::sqrt(2.0));
}
double n;
matrix<double,2,2> R;
matrix<double,2,1> w;
double residual_squared;
};
}
#endif // DLIB_RuNNING_GRADIENT_Hh_
// Copyright (C) 2016 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#undef DLIB_RuNNING_GRADIENT_ABSTRACT_Hh_
#ifdef DLIB_RuNNING_GRADIENT_ABSTRACT_Hh_
namespace dlib
{
class running_gradient
{
/*!
WHAT THIS OBJECT REPRESENTS
This object is a tool for estimating if a noisy sequence of numbers is
trending up or down and by how much. It does this by finding the least
squares fit of a line to the data and then allows you to perform a
statistical test on the slope of that line.
!*/
public:
running_gradient (
);
/*!
ensures
- #current_n() == 0
!*/
void clear(
);
/*!
ensures
- #current_n() == 0
- this object has its initial value
- clears all memory of any previous data points
!*/
double current_n (
) const;
/*!
ensures
- returns the number of values given to this object by add().
!*/
void add(
double y
);
/*!
ensures
- Updates the gradient() and standard_error() estimates in this object
based on the new y value.
- #current_n() == current_n() + 1
!*/
double gradient (
) const;
/*!
requires
- current_n() > 1
ensures
- If we consider the values given to add() as time series data, we can
estimate the rate-of-change of those values. That is, how much,
typically, do those values change from sample to sample? The gradient()
function returns the current estimate. It does this by finding the least
squares fit of a line to the data given to add() and returning the slope
of this line.
!*/
double standard_error (
) const;
/*!
requires
- current_n() > 2
ensures
- returns the standard deviation of the estimate of gradient().
!*/
double probability_gradient_less_than (
double thresh
) const;
/*!
requires
- current_n() > 2
ensures
- If we can assume the values given to add() are linearly related to each
other and corrupted by Gaussian additive noise then our estimate of
gradient() is a random variable with a mean value of gradient() and a
standard deviation of standard_error(). This lets us compute the
probability that the true gradient of the data is less than thresh, which
is what this function returns.
!*/
double probability_gradient_greater_than (
double thresh
) const;
/*!
requires
- current_n() > 2
ensures
- returns 1-probability_gradient_less_than(thresh)
!*/
};
}
#endif // DLIB_RuNNING_GRADIENT_ABSTRACT_Hh_
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment