#include #include #include using namespace dlib; using namespace std; std::vector> load_objects_list ( const string& dir ) { std::vector> objects; for (auto subdir : directory(dir).get_dirs()) { std::vector imgs; for (auto img : subdir.get_files()) imgs.push_back(img); objects.push_back(imgs); } return objects; } void load_mini_batch ( const size_t num_ids, const size_t samples_per_id, dlib::rand& rnd, const std::vector>& objs, std::vector>& images, std::vector& labels ) { images.clear(); labels.clear(); matrix image; for (size_t i = 0; i < num_ids; ++i) { const size_t id = rnd.get_random_32bit_number()%objs.size(); for (size_t j = 0; j < samples_per_id; ++j) { const auto& obj = objs[id][rnd.get_random_32bit_number()%objs[id].size()]; load_image(image, obj); images.push_back(std::move(image)); labels.push_back(id); } } // You might want to do some data augmentation at this point. Here we so some simple // color augmentation. for (auto&& crop : images) disturb_colors(crop,rnd); // All the images going into a mini-batch have to be the same size. And really, all // the images in your entire training dataset should be the same size for what we are // doing to make the most sense. DLIB_CASSERT(images.size() > 0); for (auto&& img : images) { DLIB_CASSERT(img.nr() == images[0].nr() && img.nc() == images[0].nc(), "All the images in a single mini-batch must be the same size."); } } // ---------------------------------------------------------------------------------------- template