Commit c1da9dc9 authored by Davis King's avatar Davis King

Fixed some warnings and errors from visual studio 2015

parent 78109ac9
...@@ -876,7 +876,7 @@ namespace dlib ...@@ -876,7 +876,7 @@ namespace dlib
"The loss layer and input layer must agree on the sample_expansion_factor."); "The loss layer and input layer must agree on the sample_expansion_factor.");
add_loss_layer() = default; add_loss_layer() {};
add_loss_layer(const add_loss_layer&) = default; add_loss_layer(const add_loss_layer&) = default;
add_loss_layer(add_loss_layer&&) = default; add_loss_layer(add_loss_layer&&) = default;
add_loss_layer& operator=(add_loss_layer&&) = default; add_loss_layer& operator=(add_loss_layer&&) = default;
...@@ -1478,7 +1478,7 @@ namespace dlib ...@@ -1478,7 +1478,7 @@ namespace dlib
// ================================================================== // ==================================================================
// first validate the way the parameter gradients are computed // first validate the way the parameter gradients are computed
for (long i = 0; i < params_grad.size(); ++i) for (unsigned long i = 0; i < params_grad.size(); ++i)
{ {
layer_details_type l1(l); layer_details_type l1(l);
......
...@@ -35,7 +35,7 @@ namespace dlib ...@@ -35,7 +35,7 @@ namespace dlib
DLIB_CASSERT(output_tensor.num_samples()%sample_expansion_factor == 0,""); DLIB_CASSERT(output_tensor.num_samples()%sample_expansion_factor == 0,"");
const float* out_data = output_tensor.host(); const float* out_data = output_tensor.host();
for (unsigned long i = 0; i < output_tensor.num_samples(); ++i) for (long i = 0; i < output_tensor.num_samples(); ++i)
{ {
*iter++ = out_data[i]; *iter++ = out_data[i];
} }
...@@ -67,7 +67,7 @@ namespace dlib ...@@ -67,7 +67,7 @@ namespace dlib
double loss = 0; double loss = 0;
const float* out_data = output_tensor.host(); const float* out_data = output_tensor.host();
float* g = grad.host(); float* g = grad.host();
for (unsigned long i = 0; i < output_tensor.num_samples(); ++i) for (long i = 0; i < output_tensor.num_samples(); ++i)
{ {
const float y = *truth++; const float y = *truth++;
DLIB_CASSERT(y == +1 || y == -1, "y: " << y); DLIB_CASSERT(y == +1 || y == -1, "y: " << y);
......
...@@ -123,7 +123,7 @@ namespace dlib ...@@ -123,7 +123,7 @@ namespace dlib
{ {
running_stats<double> rs; running_stats<double> rs;
unsigned long j = 0; size_t j = 0;
// Load two tensors worth of data at once so we can overlap the computation // Load two tensors worth of data at once so we can overlap the computation
// and data transfer between the host and the device. // and data transfer between the host and the device.
...@@ -140,7 +140,7 @@ namespace dlib ...@@ -140,7 +140,7 @@ namespace dlib
j += mini_batch_size; j += mini_batch_size;
} }
unsigned long i = 0; size_t i = 0;
using namespace std::chrono; using namespace std::chrono;
auto last_time = system_clock::now(); auto last_time = system_clock::now();
while (i < data.size()) while (i < data.size())
...@@ -211,7 +211,7 @@ namespace dlib ...@@ -211,7 +211,7 @@ namespace dlib
for (unsigned long epoch_iteration = 0; epoch_iteration < num_epochs; ++epoch_iteration) for (unsigned long epoch_iteration = 0; epoch_iteration < num_epochs; ++epoch_iteration)
{ {
running_stats<double> rs; running_stats<double> rs;
unsigned long j = 0; size_t j = 0;
// Load two tensors worth of data at once so we can overlap the computation // Load two tensors worth of data at once so we can overlap the computation
// and data transfer between the host and the device. // and data transfer between the host and the device.
...@@ -228,7 +228,7 @@ namespace dlib ...@@ -228,7 +228,7 @@ namespace dlib
j += mini_batch_size; j += mini_batch_size;
} }
unsigned long i = 0; size_t i = 0;
using namespace std::chrono; using namespace std::chrono;
auto last_time = system_clock::now(); auto last_time = system_clock::now();
while (i < data.size()) while (i < data.size())
...@@ -318,7 +318,7 @@ namespace dlib ...@@ -318,7 +318,7 @@ namespace dlib
} }
unsigned long num_epochs; unsigned long num_epochs;
unsigned long mini_batch_size; size_t mini_batch_size;
bool verbose; bool verbose;
net_type net; net_type net;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment