Commit 20b9e4c5 authored by Davis King's avatar Davis King

Fixed bug in minibatch creation.

parent 11f47957
...@@ -1346,6 +1346,8 @@ namespace dlib ...@@ -1346,6 +1346,8 @@ namespace dlib
- data.size() == labels.size() - data.size() == labels.size()
!*/ !*/
{ {
DLIB_CASSERT(data.size() == labels.size(), "");
const int batch_size = 11; const int batch_size = 11;
for (int iter = 0; iter < 300; ++iter) for (int iter = 0; iter < 300; ++iter)
{ {
...@@ -1354,7 +1356,7 @@ namespace dlib ...@@ -1354,7 +1356,7 @@ namespace dlib
// TODO, move the contents of update() here and do the alternating tensor // TODO, move the contents of update() here and do the alternating tensor
// loading thing to hide GPU transfer latency. // loading thing to hide GPU transfer latency.
std::cout << "loss: "<<net.update(data.begin()+i, std::cout << "loss: "<<net.update(data.begin()+i,
data.begin()+std::min(i+batch_size,i+data.size()-1), data.begin()+std::min(i+batch_size,data.size()),
labels.begin()+i, labels.begin()+i,
solvers) << std::endl; solvers) << std::endl;
} }
...@@ -1382,7 +1384,7 @@ namespace dlib ...@@ -1382,7 +1384,7 @@ namespace dlib
// TODO, move the contents of update() here and do the alternating tensor // TODO, move the contents of update() here and do the alternating tensor
// loading thing to hide GPU transfer latency. // loading thing to hide GPU transfer latency.
std::cout << "loss: "<<net.update(data.begin()+i, std::cout << "loss: "<<net.update(data.begin()+i,
data.begin()+std::min(i+batch_size,i+data.size()-1), data.begin()+std::min(i+batch_size,data.size()),
solvers) << std::endl; solvers) << std::endl;
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment