Commit 92e11581 authored by Davis King's avatar Davis King

Added label_connected_blobs_watershed()

parent 2673694c
......@@ -7,6 +7,9 @@
#include "../geometry.h"
#include <stack>
#include <vector>
#include "thresholding.h"
#include "assign_image.h"
#include <queue>
namespace dlib
{
......@@ -194,6 +197,150 @@ namespace dlib
return next;
}
// ----------------------------------------------------------------------------------------
template <
typename in_image_type,
typename out_image_type
>
unsigned long label_connected_blobs_watershed (
const in_image_type& img_,
out_image_type& labels_,
typename pixel_traits<typename image_traits<in_image_type>::pixel_type>::basic_pixel_type background_thresh,
const double smoothing = 0
)
{
// make sure requires clause is not broken
DLIB_ASSERT(is_same_object(img_, labels_) == false,
"\t unsigned long segment_image_watersheds()"
<< "\n\t The input images can't be the same object."
);
DLIB_ASSERT(smoothing >= 0);
COMPILE_TIME_ASSERT(is_unsigned_type<typename image_traits<out_image_type>::pixel_type>::value);
struct watershed_points
{
point p;
float score = 0;
unsigned int label = std::numeric_limits<unsigned int>::max();
bool is_seed() const { return label == std::numeric_limits<unsigned int>::max(); }
bool operator< (const watershed_points& rhs) const
{
// If two pixels have the same score then we take the one with the smallest
// label out of the priority queue first. We do this so that seed points
// that are downhill from some larger blob will be consumed by it if they
// haven't grown before the larger blob's flooding reaches them. Doing
// this helps a lot to avoid spuriously splitting blobs.
if (score == rhs.score)
{
return label > rhs.label;
}
return score < rhs.score;
}
};
const_image_view<in_image_type> img(img_);
image_view<out_image_type> labels(labels_);
labels.set_size(img.nr(), img.nc());
// Initially, all pixels have the background label of 0.
assign_all_pixels(labels, 0);
std::priority_queue<watershed_points> next;
// Note that we never blur the image values we use to check against the
// background_thresh. We do however blur, if smoothing!=0, the pixel values used
// to do the watershed.
in_image_type img2_;
if (smoothing != 0)
gaussian_blur(img_, img2_, smoothing);
const_image_view<in_image_type> img2view(img2_);
// point us at img2 if we are doing smoothing, otherwise point us at the input
// image.
const auto& img2 = smoothing!=0?img2view:img;
// first find all the local maxima
for (long r = 1; r+1 < img.nr(); ++r)
{
for (long c = 1; c+1 < img.nc(); ++c)
{
if (img[r][c] < background_thresh)
continue;
auto val = img2[r][c];
// if img2[r][c] isn't a local maximum then skip it
if (val < img2[r+1][c] ||
val < img2[r-1][c] ||
val < img2[r][c+1] ||
val < img2[r][c-1]
)
{
continue;
}
next.push({point(c,r), val, std::numeric_limits<unsigned int>::max()});
}
}
const rectangle area = get_rect(img);
unsigned int next_label = 1;
std::vector<point> neighbors;
neighbors_8 get_neighbors;
while(next.size() > 0)
{
auto p = next.top();
next.pop();
unsigned int label;
// If the next pixel is a seed of a new blob and is still labeled as a
// background pixel (i.e. it hasn't been flooded over by a neighboring blob and
// consumed by it) then we create a new label for this new blob.
if (p.is_seed() && labels[p.p.y()][p.p.x()] == 0)
label = next_label++;
else
label = p.label;
neighbors.clear();
get_neighbors(p.p, neighbors);
for (auto& n : neighbors)
{
if (!area.contains(n) || labels[n.y()][n.x()] != 0 || img[n.y()][n.x()] < background_thresh)
continue;
labels[n.y()][n.x()] = label;
next.push({n, img2[n.y()][n.x()], label});
}
}
return next_label;
}
template <
typename in_image_type,
typename out_image_type
>
unsigned long label_connected_blobs_watershed (
const in_image_type& img,
out_image_type& labels
)
{
return segment_image_watersheds(img, labels, partition_pixels(img));
}
// ----------------------------------------------------------------------------------------
}
......
......@@ -209,6 +209,68 @@ namespace dlib
called with points outside the image.
!*/
// ----------------------------------------------------------------------------------------
template <
typename in_image_type,
typename out_image_type
>
unsigned long label_connected_blobs_watershed (
const in_image_type& img,
out_image_type& labels,
typename pixel_traits<typename image_traits<in_image_type>::pixel_type>::basic_pixel_type background_thresh,
const double smoothing = 0
);
/*!
requires
- in_image_type == an image object that implements the interface defined in
dlib/image_processing/generic_image.h
- out_image_type == an image object that implements the interface defined in
dlib/image_processing/generic_image.h
- in_image_type must contain a grayscale pixel type.
- out_image_type must contain an unsigned integer pixel type.
- is_same_object(img, labels) == false
- smoothing >= 0
ensures
- This routine performs a watershed segmentation of the given input image and
labels each resulting flooding region with a unique integer label. It does
this by marking the brightest pixels as sources of flooding and then flood
fills the image outward from those sources. Each flooded area is labeled
with the identity of the source pixel and flooding stops when another flooded
area is reached or pixels with values < background_thresh are encountered.
- The flooding will also overrun a source pixel if that source pixel has yet to
label any neighboring pixels. This behavior helps to mitigate spurious
splits of objects due to noise. You can further control this behavior by
setting the smoothing parameter. The flooding will take place on an image
that has been Gaussian blurred with a sigma==smoothing. So setting smoothing
to a larger number will in general cause more regions to be merged together.
Note that the smoothing parameter has no effect on the interpretation of
background_thresh since the decision of "background or not background" is
always made relative to the unsmoothed input image.
- #labels.nr() == img.nr()
- #labels.nc() == img.nc()
- for all valid r and c:
- if (img[r][c] < background_thresh) then
- #labels[r][c] == 0, (i.e. the pixel is labeled as background)
- else
- #labels[r][c] == an integer value indicating the identity of the segment
containing the pixel img[r][c].
- returns the number of labeled segments, including the background segment.
Therefore, the returned number is 1+(the max value in #labels).
!*/
template <
typename in_image_type,
typename out_image_type
>
unsigned long label_connected_blobs_watershed (
const in_image_type& img,
out_image_type& labels
);
/*!
simply invokes: return label_connected_blobs_watershed(img, labels, partition_pixels(img));
!*/
// ----------------------------------------------------------------------------------------
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment