Commit 6c05ff45 authored by Davis King's avatar Davis King

Added CPU version of batch normalization functions

parent 141b384b
......@@ -135,6 +135,12 @@ if (NOT TARGET dlib)
data_io/image_dataset_metadata.cpp
data_io/mnist.cpp)
if (COMPILER_CAN_DO_CPP_11)
set(source_files ${source_files}
dnn/cpu_dlib.cpp
)
endif()
if (DLIB_ISO_CPP_ONLY)
add_library(dlib STATIC ${source_files} )
if (UNIX AND NOT DLIB_IN_PROJECT_BUILD)
......
......@@ -18,6 +18,11 @@
#include "../data_io/image_dataset_metadata.cpp"
#include "../data_io/mnist.cpp"
// Stuff that requires C++11
#if __cplusplus >= 201103
#include "../dnn/cpu_dlib.cpp"
#endif
#ifndef DLIB_ISO_CPP_ONLY
// Code that depends on OS specific APIs
......
......@@ -10,6 +10,7 @@
#include "dnn/core.h"
#include "dnn/solvers.h"
#include "dnn/trainer.h"
#include "dnn/cpu_dlib.h"
#endif // DLIB_DNn_
......
This diff is collapsed.
// Copyright (C) 2015 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#ifndef DLIB_DNN_CPU_H_
#define DLIB_DNN_CPU_H_
// This file contains CPU implementations of the GPU based functions in cuda_dlib.h
#include "tensor.h"
namespace dlib
{
namespace cpu
{
// -----------------------------------------------------------------------------------
void affine_transform(
resizable_tensor& dest,
const tensor& src,
const float A,
const float B
);
// -----------------------------------------------------------------------------------
void affine_transform(
resizable_tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
);
// -----------------------------------------------------------------------------------
void batch_normalize (
resizable_tensor& dest,
resizable_tensor& means,
resizable_tensor& vars,
const tensor& src,
const tensor& gamma,
const tensor& beta
);
void batch_normalize_gradient (
const tensor& gradient_input,
const tensor& means,
const tensor& vars,
const tensor& src,
const tensor& gamma,
tensor& src_grad,
tensor& gamma_grad,
tensor& beta_grad
);
void batch_normalize_conv (
resizable_tensor& dest,
resizable_tensor& means,
resizable_tensor& vars,
const tensor& src,
const tensor& gamma,
const tensor& beta
);
void batch_normalize_conv_gradient (
const tensor& gradient_input,
const tensor& means,
const tensor& vars,
const tensor& src,
const tensor& gamma,
tensor& src_grad,
tensor& gamma_grad,
tensor& beta_grad
);
// -----------------------------------------------------------------------------------
class dropout
{
public:
// not copyable
dropout(const dropout&) = delete;
dropout& operator=(const dropout&) = delete;
// but is movable
dropout(dropout&& item) : dropout() { swap(item); }
dropout& operator=(dropout&& item) { swap(item); return *this; }
dropout(float drop_rate = 0.5);
dropout(float drop_rate, int seed);
void swap(dropout& item)
{
// TODO
}
void operator() (
resizable_tensor& dest,
resizable_tensor& random_mask,
const tensor& src
);
void get_gradient(
const tensor& gradient_input,
const tensor& random_mask,
tensor& grad
);
};
// -----------------------------------------------------------------------------------
}
}
#endif // DLIB_DNN_CPU_H_
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment