Commit d7df21a8 authored by Davis King's avatar Davis King

switched examples over to the new mat() method.

parent a5d30218
...@@ -177,8 +177,8 @@ int main() ...@@ -177,8 +177,8 @@ int main()
{ {
// Predict the assignments for the LHS and RHS in samples[i]. // Predict the assignments for the LHS and RHS in samples[i].
std::vector<long> predicted_assignments = assigner(samples[i]); std::vector<long> predicted_assignments = assigner(samples[i]);
cout << "true labels: " << trans(vector_to_matrix(labels[i])); cout << "true labels: " << trans(mat(labels[i]));
cout << "predicted labels: " << trans(vector_to_matrix(predicted_assignments)) << endl; cout << "predicted labels: " << trans(mat(predicted_assignments)) << endl;
} }
// We can also use this tool to compute the percentage of assignments predicted correctly. // We can also use this tool to compute the percentage of assignments predicted correctly.
......
...@@ -118,9 +118,9 @@ public: ...@@ -118,9 +118,9 @@ public:
} }
// divide by number of +1 samples // divide by number of +1 samples
positive_center /= sum(vector_to_matrix(labels) == +1); positive_center /= sum(mat(labels) == +1);
// divide by number of -1 samples // divide by number of -1 samples
negative_center /= sum(vector_to_matrix(labels) == -1); negative_center /= sum(mat(labels) == -1);
custom_decision_function df; custom_decision_function df;
df.positive_center = positive_center; df.positive_center = positive_center;
......
...@@ -65,8 +65,8 @@ int main() ...@@ -65,8 +65,8 @@ int main()
} }
cout << "samples generated: " << samples.size() << endl; cout << "samples generated: " << samples.size() << endl;
cout << " number of +1 samples: " << sum(vector_to_matrix(labels) > 0) << endl; cout << " number of +1 samples: " << sum(mat(labels) > 0) << endl;
cout << " number of -1 samples: " << sum(vector_to_matrix(labels) < 0) << endl; cout << " number of -1 samples: " << sum(mat(labels) < 0) << endl;
// Here we normalize all the samples by subtracting their mean and dividing by their standard deviation. // Here we normalize all the samples by subtracting their mean and dividing by their standard deviation.
// This is generally a good idea since it often heads off numerical stability problems and also // This is generally a good idea since it often heads off numerical stability problems and also
......
...@@ -384,7 +384,7 @@ void custom_matrix_expressions_example( ...@@ -384,7 +384,7 @@ void custom_matrix_expressions_example(
As an aside, note that dlib contains functions equivalent to the ones we As an aside, note that dlib contains functions equivalent to the ones we
defined above. They are: defined above. They are:
- dlib::trans() - dlib::trans()
- dlib::vector_to_matrix() - dlib::mat() (converts things into matrices)
- operator+ (e.g. you can say my_mat + 1) - operator+ (e.g. you can say my_mat + 1)
......
...@@ -79,8 +79,8 @@ int main() ...@@ -79,8 +79,8 @@ int main()
// Here we normalize all the samples by subtracting their mean and dividing by their standard deviation. // Here we normalize all the samples by subtracting their mean and dividing by their standard deviation.
// This is generally a good idea since it often heads off numerical stability problems and also // This is generally a good idea since it often heads off numerical stability problems and also
// prevents one large feature from smothering others. // prevents one large feature from smothering others.
const sample_type m(mean(vector_to_matrix(samples))); // compute a mean vector const sample_type m(mean(mat(samples))); // compute a mean vector
const sample_type sd(reciprocal(sqrt(variance(vector_to_matrix(samples))))); // compute a standard deviation vector const sample_type sd(reciprocal(stddev(mat(samples)))); // compute a standard deviation vector
// now normalize each sample // now normalize each sample
for (unsigned long i = 0; i < samples.size(); ++i) for (unsigned long i = 0; i < samples.size(); ++i)
samples[i] = pointwise_multiply(samples[i] - m, sd); samples[i] = pointwise_multiply(samples[i] - m, sd);
......
...@@ -228,8 +228,8 @@ int main() ...@@ -228,8 +228,8 @@ int main()
// print out some of the randomly sampled sequences // print out some of the randomly sampled sequences
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
{ {
cout << "hidden states: " << trans(vector_to_matrix(labels[i])); cout << "hidden states: " << trans(mat(labels[i]));
cout << "observed states: " << trans(vector_to_matrix(samples[i])); cout << "observed states: " << trans(mat(samples[i]));
cout << "******************************" << endl; cout << "******************************" << endl;
} }
...@@ -251,8 +251,8 @@ int main() ...@@ -251,8 +251,8 @@ int main()
// Test the learned labeler on one of the training samples. In this // Test the learned labeler on one of the training samples. In this
// case it will give the correct sequence of labels. // case it will give the correct sequence of labels.
std::vector<unsigned long> predicted_labels = labeler(samples[0]); std::vector<unsigned long> predicted_labels = labeler(samples[0]);
cout << "true hidden states: "<< trans(vector_to_matrix(labels[0])); cout << "true hidden states: "<< trans(mat(labels[0]));
cout << "predicted hidden states: "<< trans(vector_to_matrix(predicted_labels)); cout << "predicted hidden states: "<< trans(mat(predicted_labels));
......
...@@ -160,8 +160,8 @@ int main() ...@@ -160,8 +160,8 @@ int main()
} }
} }
cout << "samples generated: " << samples.size() << endl; cout << "samples generated: " << samples.size() << endl;
cout << " number of +1 samples: " << sum(vector_to_matrix(labels) > 0) << endl; cout << " number of +1 samples: " << sum(mat(labels) > 0) << endl;
cout << " number of -1 samples: " << sum(vector_to_matrix(labels) < 0) << endl; cout << " number of -1 samples: " << sum(mat(labels) < 0) << endl;
// A valid kernel must always give rise to kernel matrices which are symmetric // A valid kernel must always give rise to kernel matrices which are symmetric
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment