Commit 7c991b18 authored by Davis King's avatar Davis King

merged

parents 1569d590 c1da9dc9
...@@ -1511,7 +1511,7 @@ namespace dlib ...@@ -1511,7 +1511,7 @@ namespace dlib
// ================================================================== // ==================================================================
// first validate the way the parameter gradients are computed // first validate the way the parameter gradients are computed
for (long i = 0; i < params_grad.size(); ++i) for (unsigned long i = 0; i < params_grad.size(); ++i)
{ {
layer_details_type l1(l); layer_details_type l1(l);
......
...@@ -35,7 +35,7 @@ namespace dlib ...@@ -35,7 +35,7 @@ namespace dlib
DLIB_CASSERT(output_tensor.num_samples()%sample_expansion_factor == 0,""); DLIB_CASSERT(output_tensor.num_samples()%sample_expansion_factor == 0,"");
const float* out_data = output_tensor.host(); const float* out_data = output_tensor.host();
for (unsigned long i = 0; i < output_tensor.num_samples(); ++i) for (long i = 0; i < output_tensor.num_samples(); ++i)
{ {
*iter++ = out_data[i]; *iter++ = out_data[i];
} }
...@@ -67,7 +67,7 @@ namespace dlib ...@@ -67,7 +67,7 @@ namespace dlib
double loss = 0; double loss = 0;
const float* out_data = output_tensor.host(); const float* out_data = output_tensor.host();
float* g = grad.host(); float* g = grad.host();
for (unsigned long i = 0; i < output_tensor.num_samples(); ++i) for (long i = 0; i < output_tensor.num_samples(); ++i)
{ {
const float y = *truth++; const float y = *truth++;
DLIB_CASSERT(y == +1 || y == -1, "y: " << y); DLIB_CASSERT(y == +1 || y == -1, "y: " << y);
......
...@@ -123,7 +123,7 @@ namespace dlib ...@@ -123,7 +123,7 @@ namespace dlib
{ {
running_stats<double> rs; running_stats<double> rs;
unsigned long j = 0; size_t j = 0;
// Load two tensors worth of data at once so we can overlap the computation // Load two tensors worth of data at once so we can overlap the computation
// and data transfer between the host and the device. // and data transfer between the host and the device.
...@@ -140,7 +140,7 @@ namespace dlib ...@@ -140,7 +140,7 @@ namespace dlib
j += mini_batch_size; j += mini_batch_size;
} }
unsigned long i = 0; size_t i = 0;
using namespace std::chrono; using namespace std::chrono;
auto last_time = system_clock::now(); auto last_time = system_clock::now();
while (i < data.size()) while (i < data.size())
...@@ -211,7 +211,7 @@ namespace dlib ...@@ -211,7 +211,7 @@ namespace dlib
for (unsigned long epoch_iteration = 0; epoch_iteration < num_epochs; ++epoch_iteration) for (unsigned long epoch_iteration = 0; epoch_iteration < num_epochs; ++epoch_iteration)
{ {
running_stats<double> rs; running_stats<double> rs;
unsigned long j = 0; size_t j = 0;
// Load two tensors worth of data at once so we can overlap the computation // Load two tensors worth of data at once so we can overlap the computation
// and data transfer between the host and the device. // and data transfer between the host and the device.
...@@ -228,7 +228,7 @@ namespace dlib ...@@ -228,7 +228,7 @@ namespace dlib
j += mini_batch_size; j += mini_batch_size;
} }
unsigned long i = 0; size_t i = 0;
using namespace std::chrono; using namespace std::chrono;
auto last_time = system_clock::now(); auto last_time = system_clock::now();
while (i < data.size()) while (i < data.size())
...@@ -318,7 +318,7 @@ namespace dlib ...@@ -318,7 +318,7 @@ namespace dlib
} }
unsigned long num_epochs; unsigned long num_epochs;
unsigned long mini_batch_size; size_t mini_batch_size;
bool verbose; bool verbose;
net_type net; net_type net;
......
...@@ -39,6 +39,7 @@ else() ...@@ -39,6 +39,7 @@ else()
if (";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_rvalue_references;" AND if (";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_rvalue_references;" AND
";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_variadic_templates;" AND ";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_variadic_templates;" AND
";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_lambdas;" AND ";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_lambdas;" AND
";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_defaulted_move_initializers;" AND
";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_auto_type;") ";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_auto_type;")
set(COMPILER_CAN_DO_CPP_11 1) set(COMPILER_CAN_DO_CPP_11 1)
# Set which standard to use unless someone has already set it to something # Set which standard to use unless someone has already set it to something
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment