Commit c1c9a59c authored by Davis King's avatar Davis King

Added upsample_ layer that upsamples a tensor using bilinear interpolation.

parent 19856946
......@@ -608,6 +608,108 @@ namespace dlib
>
using cont = add_layer<cont_<num_filters,nr,nc,stride_y,stride_x>, SUBNET>;
// ----------------------------------------------------------------------------------------
template <
int scale_y,
int scale_x
>
class upsample_
{
public:
static_assert(scale_y >= 1, "upsampling scale factor can't be less than 1.");
static_assert(scale_x >= 1, "upsampling scale factor can't be less than 1.");
upsample_()
{
}
template <typename SUBNET>
void setup (const SUBNET& /*sub*/)
{
}
template <typename SUBNET>
void forward(const SUBNET& sub, resizable_tensor& output)
{
output.set_size(
sub.get_output().num_samples(),
sub.get_output().k(),
scale_y*sub.get_output().nr(),
scale_x*sub.get_output().nc());
tt::resize_bilinear(output, sub.get_output());
}
template <typename SUBNET>
void backward(const tensor& gradient_input, SUBNET& sub, tensor& /*params_grad*/)
{
tt::resize_bilinear_gradient(sub.get_gradient_input(), gradient_input);
}
inline point map_input_to_output (point p) const
{
p.x() = p.x()*scale_x;
p.y() = p.y()*scale_y;
return p;
}
inline point map_output_to_input (point p) const
{
p.x() = p.x()/scale_x;
p.y() = p.y()/scale_y;
return p;
}
const tensor& get_layer_params() const { return params; }
tensor& get_layer_params() { return params; }
friend void serialize(const upsample_& , std::ostream& out)
{
serialize("upsample_", out);
serialize(scale_y, out);
serialize(scale_x, out);
}
friend void deserialize(upsample_& , std::istream& in)
{
std::string version;
deserialize(version, in);
if (version != "upsample_")
throw serialization_error("Unexpected version '"+version+"' found while deserializing dlib::upsample_.");
int _scale_y;
int _scale_x;
deserialize(_scale_y, in);
deserialize(_scale_x, in);
if (_scale_y != scale_y || _scale_x != scale_x)
throw serialization_error("Wrong scale found while deserializing dlib::upsample_");
}
friend std::ostream& operator<<(std::ostream& out, const upsample_& )
{
out << "upsample\t ("
<< "scale_y="<<scale_y
<< ", scale_x="<<scale_x
<< ")";
return out;
}
friend void to_xml(const upsample_& /*item*/, std::ostream& out)
{
out << "<upsample"
<< " scale_y='"<<scale_y<<"'"
<< " scale_x='"<<scale_x<<"'/>\n";
}
private:
resizable_tensor params;
};
template <
int scale,
typename SUBNET
>
using upsample = add_layer<upsample_<scale,scale>, SUBNET>;
// ----------------------------------------------------------------------------------------
template <
......
......@@ -1092,6 +1092,60 @@ namespace dlib
>
using cont = add_layer<cont_<num_filters,nr,nc,stride_y,stride_x>, SUBNET>;
// ----------------------------------------------------------------------------------------
template <
int scale_y,
int scale_x
>
class upsample_
{
/*!
REQUIREMENTS ON TEMPLATE ARGUMENTS
All of them must be >= 1.
WHAT THIS OBJECT REPRESENTS
This is an implementation of the EXAMPLE_COMPUTATIONAL_LAYER_ interface
defined above. In particular, it allows you to upsample a layer using
bilinear interpolation. To be very specific, it upsamples each of the
channels in an input tensor. Therefore, if IN is the input tensor to this
layer and OUT the output tensor, then we will have:
- OUT.num_samples() == IN.num_samples()
- OUT.k() == IN.k()
- OUT.nr() == IN.nr()*scale_y
- OUT.nc() == IN.nr()*scale_x
- for all valid i,k: image_plane(OUT,i,k) is a copy of
image_plane(IN,i,k) that has been bilinearly interpolated to fit into
the shape of image_plane(OUT,i,k).
!*/
public:
upsample_(
);
/*!
ensures
- This object has no state, so the constructor does nothing, aside from
providing default constructability.
!*/
template <typename SUBNET> void setup (const SUBNET& sub);
template <typename SUBNET> void forward(const SUBNET& sub, resizable_tensor& output);
template <typename SUBNET> void backward(const tensor& gradient_input, SUBNET& sub, tensor& params_grad);
point map_input_to_output(point p) const;
point map_output_to_input(point p) const;
const tensor& get_layer_params() const;
tensor& get_layer_params();
/*!
These functions are implemented as described in the EXAMPLE_COMPUTATIONAL_LAYER_ interface.
!*/
};
template <
int scale,
typename SUBNET
>
using upsample = add_layer<upsample_<scale,scale>, SUBNET>;
// ----------------------------------------------------------------------------------------
class dropout_
......
......@@ -1449,6 +1449,30 @@ namespace
void test_layers()
{
{
print_spinner();
upsample_<1,1> l;
auto res = test_layer(l);
DLIB_TEST_MSG(res, res);
}
{
print_spinner();
upsample_<2,1> l;
auto res = test_layer(l);
DLIB_TEST_MSG(res, res);
}
{
print_spinner();
upsample_<2,2> l;
auto res = test_layer(l);
DLIB_TEST_MSG(res, res);
}
{
print_spinner();
upsample_<3,3> l;
auto res = test_layer(l);
DLIB_TEST_MSG(res, res);
}
{
print_spinner();
l2normalize_ l;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment