Commit a5b2454c authored by Davis King's avatar Davis King

Removed the dlib::sparse_vector namespace. I put everything from this

namespace into the normal dlib:: namespace so that code which works
with both sparse and dense vectors is more cohesive.
parent 2f2aecc9
......@@ -298,7 +298,7 @@ namespace dlib
// figure out how many elements we need in our dense vectors.
const unsigned long max_dim = sparse_vector::max_index_plus_one(samples);
const unsigned long max_dim = max_index_plus_one(samples);
// now turn all the samples into dense samples
......
......@@ -415,7 +415,8 @@ namespace dlib
template <
typename vector_type
>
unsigned long max_index_plus_one (
typename enable_if<is_same_type<sample_pair, typename vector_type::value_type>,unsigned long>::type
max_index_plus_one (
const vector_type& pairs
)
{
......
......@@ -201,7 +201,7 @@ namespace dlib
// add an element into the stored data sequence
dh_temp.s = x - prev_x;
dh_temp.y = funct_derivative - prev_derivative;
double temp = dlib::dot(dh_temp.s, dh_temp.y);
double temp = dot(dh_temp.s, dh_temp.y);
// only accept this bit of data if temp isn't zero
if (std::abs(temp) > std::numeric_limits<double>::epsilon())
{
......
......@@ -105,9 +105,6 @@ namespace dlib
result_type& assignment
) const
{
using dlib::sparse_vector::dot;
using dlib::dot;
assignment.clear();
matrix<double> cost;
......
......@@ -779,8 +779,6 @@ namespace dlib
// Rather than doing something like, best_idx = index_of_max(weights*x-b)
// we do the following somewhat more complex thing because this supports
// both sparse and dense samples.
using dlib::sparse_vector::dot;
using dlib::dot;
scalar_type best_val = dot(rowm(weights,0),x) - b(0);
unsigned long best_idx = 0;
......
......@@ -57,8 +57,6 @@ namespace dlib
result_type& labels
) const
{
using dlib::sparse_vector::dot;
using dlib::dot;
labels.clear();
......
......@@ -744,14 +744,14 @@ namespace dlib
<< "\n\tthis: " << this
);
return sparse_vector::distance(alpha,w , x.alpha,x.w);
return distance(alpha,w , x.alpha,x.w);
}
scalar_type inner_product (
const sample_type& x
) const
{
return alpha*sparse_vector::dot(w,x);
return alpha*dot(w,x);
}
scalar_type inner_product (
......@@ -765,20 +765,20 @@ namespace dlib
<< "\n\tthis: " << this
);
return alpha*x.alpha*sparse_vector::dot(w,x.w);
return alpha*x.alpha*dot(w,x.w);
}
scalar_type squared_norm (
) const
{
return alpha*alpha* sparse_vector::length_squared(w);
return alpha*alpha*length_squared(w);
}
scalar_type operator() (
const sample_type& x
) const
{
return sparse_vector::distance(static_cast<scalar_type>(1), x, alpha, w);
return distance(static_cast<scalar_type>(1), x, alpha, w);
}
scalar_type test_and_train (
......@@ -1032,7 +1032,7 @@ namespace dlib
if (samples_seen > 0)
{
scalar_type temp1 = sparse_vector::distance_squared(alpha,w , x.alpha,x.w);
scalar_type temp1 = distance_squared(alpha,w , x.alpha,x.w);
scalar_type temp2 = alpha*w_extra - x.alpha*x.w_extra;
return std::sqrt(temp1 + temp2*temp2);
}
......@@ -1047,7 +1047,7 @@ namespace dlib
) const
{
if (samples_seen > 0)
return alpha*(sparse_vector::dot(w,x) + w_extra*x_extra);
return alpha*(dot(w,x) + w_extra*x_extra);
else
return 0;
}
......@@ -1064,7 +1064,7 @@ namespace dlib
);
if (samples_seen > 0 && x.samples_seen > 0)
return alpha*x.alpha*(sparse_vector::dot(w,x.w) + w_extra*x.w_extra);
return alpha*x.alpha*(dot(w,x.w) + w_extra*x.w_extra);
else
return 0;
}
......@@ -1073,7 +1073,7 @@ namespace dlib
) const
{
if (samples_seen > 0)
return alpha*alpha*(sparse_vector::length_squared(w) + w_extra*w_extra);
return alpha*alpha*(length_squared(w) + w_extra*w_extra);
else
return 0;
}
......@@ -1084,13 +1084,13 @@ namespace dlib
{
if (samples_seen > 0)
{
scalar_type temp1 = sparse_vector::distance_squared(1,x,alpha,w);
scalar_type temp1 = distance_squared(1,x,alpha,w);
scalar_type temp2 = x_extra - alpha*w_extra;
return std::sqrt(temp1 + temp2*temp2);
}
else
{
return std::sqrt(sparse_vector::length_squared(x) + x_extra*x_extra);
return std::sqrt(length_squared(x) + x_extra*x_extra);
}
}
......@@ -1229,7 +1229,7 @@ namespace dlib
temp_basis_vectors.set_size(1);
temp_alpha.set_size(1);
temp_basis_vectors(0) = sample_type(w.begin(), w.end());
sparse_vector::scale_by(temp_basis_vectors(0), scale);
dlib::scale_by(temp_basis_vectors(0), scale);
temp_alpha(0) = alpha/scale;
}
else
......@@ -1239,7 +1239,7 @@ namespace dlib
temp_basis_vectors.set_size(2);
temp_alpha.set_size(2);
temp_basis_vectors(0) = sample_type(w.begin(), w.end());
sparse_vector::scale_by(temp_basis_vectors(0), 2);
dlib::scale_by(temp_basis_vectors(0), 2);
temp_alpha(0) = alpha;
temp_basis_vectors(1) = sample_type(w.begin(), w.end());
temp_alpha(1) = -alpha;
......
......@@ -39,7 +39,7 @@ namespace dlib
const sample_type& b
) const
{
const scalar_type d = sparse_vector::distance_squared(a,b);
const scalar_type d = distance_squared(a,b);
return std::exp(-gamma*d);
}
......@@ -123,7 +123,7 @@ namespace dlib
const sample_type& b
) const
{
return std::pow(gamma*(sparse_vector::dot(a,b)) + coef, degree);
return std::pow(gamma*(dot(a,b)) + coef, degree);
}
sparse_polynomial_kernel& operator= (
......@@ -211,7 +211,7 @@ namespace dlib
const sample_type& b
) const
{
return std::tanh(gamma*(sparse_vector::dot(a,b)) + coef);
return std::tanh(gamma*(dot(a,b)) + coef);
}
sparse_sigmoid_kernel& operator= (
......@@ -284,7 +284,7 @@ namespace dlib
const sample_type& b
) const
{
return sparse_vector::dot(a,b);
return dot(a,b);
}
bool operator== (
......
This diff is collapsed.
This diff is collapsed.
......@@ -149,9 +149,6 @@ namespace dlib
feature_vector_type& psi
) const
{
using dlib::sparse_vector::dot;
using dlib::dot;
matrix<double> cost;
unsigned long size;
if (force_assignment)
......
......@@ -184,7 +184,7 @@ namespace dlib
{
cache[i].get_truth_joint_feature_vector_cached(ftemp);
sparse_vector::subtract_from(psi_true, ftemp);
subtract_from(psi_true, ftemp);
}
}
......@@ -249,7 +249,7 @@ namespace dlib
auto_mutex lock(self.accum_mutex);
data.loss += loss;
sparse_vector::add_to(data.subgradient, ftemp);
add_to(data.subgradient, ftemp);
}
else
{
......@@ -268,12 +268,12 @@ namespace dlib
loss_temp,
ftemp);
loss += loss_temp;
sparse_vector::add_to(faccum, ftemp);
add_to(faccum, ftemp);
}
auto_mutex lock(self.accum_mutex);
data.loss += loss;
sparse_vector::add_to(data.subgradient, faccum);
add_to(data.subgradient, faccum);
}
}
......
......@@ -59,9 +59,6 @@ namespace dlib
- All vectors have non-zero size. That is, they have more than 0 dimensions.
!*/
{
using namespace dlib::sparse_vector;
using namespace dlib;
if (!is_learning_problem(samples, labels))
return false;
......@@ -171,8 +168,6 @@ namespace dlib
"\t structural_svm_graph_labeling_problem::structural_svm_graph_labeling_problem()"
<< "\n\t invalid inputs were given to this function");
using namespace dlib::sparse_vector;
// figure out how many dimensions are in the node and edge vectors.
node_dims = 0;
......@@ -321,9 +316,6 @@ namespace dlib
feature_vector_type& psi
) const
{
using dlib::sparse_vector::dot;
using dlib::dot;
const sample_type& samp = samples[idx];
// setup the potts graph based on samples[idx] and current_solution.
......
......@@ -75,9 +75,6 @@ namespace dlib
unsigned long best_idx = 0;
using sparse_vector::dot;
using dlib::dot;
const scalar_type dot_true_psi = dot(true_psi, current_solution);
// figure out which element in the cache is the best (i.e. has the biggest risk)
......@@ -338,7 +335,7 @@ namespace dlib
{
cache[i].get_truth_joint_feature_vector_cached(ftemp);
sparse_vector::subtract_from(psi_true, ftemp);
subtract_from(psi_true, ftemp);
}
}
......@@ -364,7 +361,7 @@ namespace dlib
scalar_type loss;
separation_oracle_cached(i, w, loss, ftemp);
total_loss += loss;
sparse_vector::add_to(subgradient, ftemp);
add_to(subgradient, ftemp);
}
}
......
......@@ -64,7 +64,7 @@ namespace dlib
auto_mutex lock(self.accum_mutex);
total_loss += loss;
sparse_vector::add_to(subgradient, ftemp);
add_to(subgradient, ftemp);
}
else
{
......@@ -79,12 +79,12 @@ namespace dlib
scalar_type loss_temp;
self.separation_oracle_cached(i, w, loss_temp, ftemp);
loss += loss_temp;
sparse_vector::add_to(faccum, ftemp);
add_to(faccum, ftemp);
}
auto_mutex lock(self.accum_mutex);
total_loss += loss;
sparse_vector::add_to(subgradient, faccum);
add_to(subgradient, faccum);
}
}
......
......@@ -68,7 +68,7 @@ namespace dlib
) const
{
// plus 1 for the bias term
return sparse_vector::max_index_plus_one(samples) + 1;
return max_index_plus_one(samples) + 1;
}
virtual bool optimization_status (
......@@ -138,13 +138,13 @@ namespace dlib
{
if (labels(i) > 0)
{
sparse_vector::subtract_from(subgradient, samples(i), Cpos);
subtract_from(subgradient, samples(i), Cpos);
subgradient(subgradient.size()-1) += Cpos;
}
else
{
sparse_vector::add_to(subgradient, samples(i), Cneg);
add_to(subgradient, samples(i), Cneg);
subgradient(subgradient.size()-1) -= Cneg;
}
......@@ -171,8 +171,6 @@ namespace dlib
- for all i: #dot_prods[i] == dot(colm(#w,0,w.size()-1), samples(i)) - #w(w.size()-1)
!*/
{
using dlib::sparse_vector::dot;
using dlib::dot;
// The reason for using w_size_m1 and not just w.size()-1 is because
// doing it this way avoids an inane warning from gcc that can occur in some cases.
const long w_size_m1 = w.size()-1;
......@@ -558,8 +556,8 @@ namespace dlib
// sparse vector container so we need to use this special kind of copy to handle that case.
// As an aside, the reason for using max_index_plus_one() and not just w.size()-1 is because
// doing it this way avoids an inane warning from gcc that can occur in some cases.
const long out_size = sparse_vector::max_index_plus_one(x);
sparse_vector::assign(df.basis_vectors(0), matrix_cast<scalar_type>(colm(w, 0, out_size)));
const long out_size = max_index_plus_one(x);
assign(df.basis_vectors(0), matrix_cast<scalar_type>(colm(w, 0, out_size)));
df.alpha.set_size(1);
df.alpha(0) = 1;
......
......@@ -50,7 +50,7 @@ namespace dlib
samples(samples_),
labels(labels_),
distinct_labels(select_all_distinct_labels(labels_)),
dims(sparse_vector::max_index_plus_one(samples_)+1) // +1 for the bias
dims(max_index_plus_one(samples_)+1) // +1 for the bias
{}
virtual long get_num_dimensions (
......@@ -70,7 +70,7 @@ namespace dlib
feature_vector_type& psi
) const
{
sparse_vector::assign(psi, samples[idx]);
assign(psi, samples[idx]);
// Add a constant -1 to account for the bias term.
psi.push_back(std::make_pair(dims-1,static_cast<scalar_type>(-1)));
......@@ -94,8 +94,6 @@ namespace dlib
// LOSS(idx,y) + F(x,y). Note that y in this case is given by distinct_labels[i].
for (unsigned long i = 0; i < distinct_labels.size(); ++i)
{
using dlib::sparse_vector::dot;
using dlib::dot;
// Compute the F(x,y) part:
// perform: temp == dot(relevant part of current solution, samples[idx]) - current_bias
scalar_type temp = dot(rowm(current_solution, range(i*dims, (i+1)*dims-2)), samples[idx]) - current_solution((i+1)*dims-1);
......@@ -112,7 +110,7 @@ namespace dlib
}
}
sparse_vector::assign(psi, samples[idx]);
assign(psi, samples[idx]);
// add a constant -1 to account for the bias term
psi.push_back(std::make_pair(dims-1,static_cast<scalar_type>(-1)));
......@@ -287,7 +285,7 @@ namespace dlib
trained_function_type df;
const long dims = sparse_vector::max_index_plus_one(all_samples);
const long dims = max_index_plus_one(all_samples);
df.labels = select_all_distinct_labels(all_labels);
df.weights = colm(reshape(weights, df.labels.size(), dims+1), range(0,dims-1));
df.b = colm(reshape(weights, df.labels.size(), dims+1), dims);
......
......@@ -50,18 +50,18 @@ namespace
DLIB_TEST(samples.size() == 150);
DLIB_TEST(labels.size() == 150);
DLIB_TEST(sparse_vector::max_index_plus_one(samples) == 5);
DLIB_TEST(max_index_plus_one(samples) == 5);
fix_nonzero_indexing(samples);
DLIB_TEST(sparse_vector::max_index_plus_one(samples) == 4);
DLIB_TEST(max_index_plus_one(samples) == 4);
load_libsvm_formatted_data("iris.scale2",samples, labels);
DLIB_TEST(samples.size() == 150);
DLIB_TEST(labels.size() == 150);
DLIB_TEST(sparse_vector::max_index_plus_one(samples) == 5);
DLIB_TEST(max_index_plus_one(samples) == 5);
fix_nonzero_indexing(samples);
DLIB_TEST(sparse_vector::max_index_plus_one(samples) == 4);
DLIB_TEST(max_index_plus_one(samples) == 4);
one_vs_one_trainer<any_trainer<sample_type,scalar_type>,scalar_type> trainer;
......@@ -85,7 +85,7 @@ namespace
std::vector<dsample_type> dsamples = sparse_to_dense(samples);
DLIB_TEST(dsamples.size() == 150);
DLIB_TEST(dsamples[0].size() == 4);
DLIB_TEST(sparse_vector::max_index_plus_one(dsamples) == 4);
DLIB_TEST(max_index_plus_one(dsamples) == 4);
one_vs_one_trainer<any_trainer<dsample_type,scalar_type>,scalar_type> trainer;
......
......@@ -42,7 +42,6 @@ namespace
const sample_type& b
) const
{
using namespace sparse_vector;
return dot(a,b);
}
......@@ -365,7 +364,6 @@ namespace
- tests the kcentroid object with the given kernel
!*/
{
using namespace dlib::sparse_vector;
// Here we declare that our samples will be 2 dimensional column vectors.
typedef typename kernel_type::sample_type sample_type;
......@@ -439,7 +437,7 @@ namespace
temp[3] = 4;
temp[4] = 5;
dlog << LDEBUG << "AAAA 3.4" ;
double junk = sparse_vector::distance(temp2,temp);
double junk = dlib::distance(temp2,temp);
dlog << LDEBUG << "AAAA 3.5" ;
DLIB_TEST(approx_equal(test(temp), junk) );
......@@ -462,7 +460,7 @@ namespace
temp[2] = 3;
temp[3] = 4;
temp[4] = 5;
DLIB_TEST(approx_equal(test(temp), sparse_vector::distance(temp2,temp)));
DLIB_TEST(approx_equal(test(temp), dlib::distance(temp2,temp)));
// make test store the -1*point(0,1,0,3,-1)
......@@ -483,7 +481,7 @@ namespace
temp[2] = -3;
temp[3] = 4;
temp[4] = 5;
DLIB_TEST(approx_equal(test(temp), sparse_vector::distance(temp2,temp)));
DLIB_TEST(approx_equal(test(temp), dlib::distance(temp2,temp)));
......@@ -500,8 +498,8 @@ namespace
temp[2] = -3;
temp[3] = 4;
temp[4] = 5;
DLIB_TEST(approx_equal(test(temp), sparse_vector::distance(temp2,temp)));
DLIB_TEST(approx_equal(test.get_distance_function()(temp), sparse_vector::distance(temp2,temp)));
DLIB_TEST(approx_equal(test(temp), dlib::distance(temp2,temp)));
DLIB_TEST(approx_equal(test.get_distance_function()(temp), dlib::distance(temp2,temp)));
dlog << LDEBUG << "AAAA 6" ;
......@@ -522,8 +520,8 @@ namespace
temp[2] = -3;
temp[3] = 4;
temp[4] = 5;
DLIB_TEST(approx_equal(test(temp), sparse_vector::distance(temp2,temp)));
DLIB_TEST(approx_equal(test.get_distance_function()(temp), sparse_vector::distance(temp2,temp)));
DLIB_TEST(approx_equal(test(temp), dlib::distance(temp2,temp)));
DLIB_TEST(approx_equal(test.get_distance_function()(temp), dlib::distance(temp2,temp)));
DLIB_TEST(approx_equal(test(test), 0));
DLIB_TEST(approx_equal(test.get_distance_function()(test.get_distance_function()), 0));
......@@ -545,7 +543,6 @@ namespace
- tests the kcentroid object with the given kernel
!*/
{
using namespace sparse_vector;
// Here we declare that our samples will be 2 dimensional column vectors.
typedef typename kernel_type::sample_type sample_type;
......
......@@ -14,7 +14,6 @@ namespace
using namespace test;
using namespace dlib;
using namespace std;
using namespace dlib::sparse_vector;
dlib::logger dlog("test.sparse_vector");
......
......@@ -135,30 +135,30 @@ namespace
// Now test some of the sparse helper functions
DLIB_TEST(sparse_vector::max_index_plus_one(samples) == 2);
DLIB_TEST(sparse_vector::max_index_plus_one(samples[0]) == 2);
DLIB_TEST(max_index_plus_one(samples) == 2);
DLIB_TEST(max_index_plus_one(samples[0]) == 2);
matrix<double,3,1> m;
m = 1;
sparse_vector::add_to(m, samples[3]);
add_to(m, samples[3]);
DLIB_TEST(m(0) == 1 + samples[3][0].second);
DLIB_TEST(m(1) == 1 + samples[3][1].second);
DLIB_TEST(m(2) == 1);
m = 1;
sparse_vector::subtract_from(m, samples[3]);
subtract_from(m, samples[3]);
DLIB_TEST(m(0) == 1 - samples[3][0].second);
DLIB_TEST(m(1) == 1 - samples[3][1].second);
DLIB_TEST(m(2) == 1);
m = 1;
sparse_vector::add_to(m, samples[3], 2);
add_to(m, samples[3], 2);
DLIB_TEST(m(0) == 1 + 2*samples[3][0].second);
DLIB_TEST(m(1) == 1 + 2*samples[3][1].second);
DLIB_TEST(m(2) == 1);
m = 1;
sparse_vector::subtract_from(m, samples[3], 2);
subtract_from(m, samples[3], 2);
DLIB_TEST(m(0) == 1 - 2*samples[3][0].second);
DLIB_TEST(m(1) == 1 - 2*samples[3][1].second);
DLIB_TEST(m(2) == 1);
......@@ -227,7 +227,6 @@ namespace
sv[0] = 1;
sv[3] = 1;
using namespace sparse_vector;
DLIB_TEST(dot(sv,dv) == 5);
DLIB_TEST(dot(dv,sv) == 5);
......@@ -249,7 +248,6 @@ namespace
sv[0] = 1;
sv[3] = 1;
using namespace sparse_vector;
assign(dv2, dv);
......
......@@ -64,7 +64,7 @@ namespace
feature_vector_type& psi
) const
{
sparse_vector::assign(psi, samples[idx]);
assign(psi, samples[idx]);
// Add a constant -1 to account for the bias term.
psi.push_back(std::make_pair(dims-1,static_cast<scalar_type>(-1)));
......@@ -88,8 +88,6 @@ namespace
// LOSS(idx,y) + F(x,y). Note that y in this case is given by distinct_labels[i].
for (unsigned long i = 0; i < distinct_labels.size(); ++i)
{
using dlib::sparse_vector::dot;
using dlib::dot;
// Compute the F(x,y) part:
// perform: temp == dot(relevant part of current solution, samples[idx]) - current_bias
scalar_type temp = dot(rowm(current_solution, range(i*dims, (i+1)*dims-2)), samples[idx]) - current_solution((i+1)*dims-1);
......@@ -106,7 +104,7 @@ namespace
}
}
sparse_vector::assign(psi, samples[idx]);
assign(psi, samples[idx]);
// add a constant -1 to account for the bias term
psi.push_back(std::make_pair(dims-1,static_cast<scalar_type>(-1)));
......@@ -221,7 +219,7 @@ namespace
trained_function_type df;
const long dims = sparse_vector::max_index_plus_one(all_samples);
const long dims = max_index_plus_one(all_samples);
df.labels = select_all_distinct_labels(all_labels);
df.weights = colm(reshape(weights, df.labels.size(), dims+1), range(0,dims-1));
df.b = colm(reshape(weights, df.labels.size(), dims+1), dims);
......@@ -302,7 +300,7 @@ namespace
trained_function_type df;
const long dims = sparse_vector::max_index_plus_one(all_samples);
const long dims = max_index_plus_one(all_samples);
df.labels = select_all_distinct_labels(all_labels);
df.weights = colm(reshape(weights, df.labels.size(), dims+1), range(0,dims-1));
df.b = colm(reshape(weights, df.labels.size(), dims+1), dims);
......@@ -383,7 +381,7 @@ namespace
trained_function_type df;
const long dims = sparse_vector::max_index_plus_one(all_samples);
const long dims = max_index_plus_one(all_samples);
df.labels = select_all_distinct_labels(all_labels);
df.weights = colm(reshape(weights, df.labels.size(), dims+1), range(0,dims-1));
df.b = colm(reshape(weights, df.labels.size(), dims+1), dims);
......@@ -464,7 +462,7 @@ namespace
trained_function_type df;
const long dims = sparse_vector::max_index_plus_one(all_samples);
const long dims = max_index_plus_one(all_samples);
df.labels = select_all_distinct_labels(all_labels);
df.weights = colm(reshape(weights, df.labels.size(), dims+1), range(0,dims-1));
df.b = colm(reshape(weights, df.labels.size(), dims+1), dims);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment