% January 2010 %
% %
% %
-% Copyright 1999-2010 ImageMagick Studio LLC, a non-profit organization %
+% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/token.h"
+#include "magick/utility.h"
\f
/*
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
- ExpandKernelInfo(KernelInfo *, const double),
+ ExpandMirrorKernelInfo(KernelInfo *),
+ ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
\f
return(kernel);
}
-
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
-% center as origin, this is no ssize_ter the case, and any rectangular kernel
+% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
%
% Input kernel defintion strings can consist of any of three types.
%
-% "name:args"
+% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
-% "WxH[+X+Y][^@]:num, num, num ..."
+% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
-% If a '^' is included the kernel expanded with 90-degree rotations,
-% While a '@' will allow you to expand a 3x3 kernel using 45-degree
-% circular rotates.
-%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
-% operators A list is defined as a semi-colon seperated list kernels.
+% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
+% The special flags will expand a single kernel, into a list of rotated
+% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
+% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
+% The '<' also exands using 90-degree rotates, but giving a 180-degree
+% reflected kernel before the +/- 90-degree rotations, which can be important
+% for Thinning operations.
+%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
- /* clear flags - for Expanding kernal lists thorugh rotations */
+ /* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification */
kernel->values[i] = nan; /* do not include this value in kernel */
}
else {
- kernel->values[i] = StringToDouble(token);
+ kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
- ExpandKernelInfo(kernel, 45.0);
- else if ( (flags & MinimumValue) != 0 ) /* '^' symbol in kernel size */
- ExpandKernelInfo(kernel, 90.0);
+ ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
+ else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
+ ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
+ else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
+ ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string)
{
- KernelInfo
- *kernel;
-
char
token[MaxTextExtent];
- ssize_t
- type;
-
const char
*p,
*end;
+ GeometryInfo
+ args;
+
+ KernelInfo
+ *kernel;
+
MagickStatusType
flags;
- GeometryInfo
- args;
+ ssize_t
+ type;
/* Parse special 'named' kernel */
GetMagickToken(kernel_string,&p,token);
- type=ParseMagickOption(MagickKernelOptions,MagickFalse,token);
+ type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *)NULL); /* not a valid named kernel */
/* special handling of missing values in input string */
switch( type ) {
- case RectangleKernel:
- if ( (flags & WidthValue) == 0 ) /* if no width then */
- args.rho = args.sigma; /* then width = height */
- if ( args.rho < 1.0 ) /* if width too small */
- args.rho = 3; /* then width = 3 */
- if ( args.sigma < 1.0 ) /* if height too small */
- args.sigma = args.rho; /* then height = width */
- if ( (flags & XValue) == 0 ) /* center offset if not defined */
- args.xi = (double)(((ssize_t)args.rho-1)/2);
- if ( (flags & YValue) == 0 )
- args.psi = (double)(((ssize_t)args.sigma-1)/2);
+ /* Shape Kernel Defaults */
+ case UnityKernel:
+ if ( (flags & WidthValue) == 0 )
+ args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
+ case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
- /* If no scale given (a 0 scale is valid! - set it to 1.0 */
if ( (flags & HeightValue) == 0 )
- args.sigma = 1.0;
+ args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
- args.xi = 1.0;
+ args.xi = 1.0; /* Default scale = 1.0, zero is valid */
+ break;
+ case RectangleKernel: /* Rectangle - set size defaults */
+ if ( (flags & WidthValue) == 0 ) /* if no width then */
+ args.rho = args.sigma; /* then width = height */
+ if ( args.rho < 1.0 ) /* if width too small */
+ args.rho = 3; /* then width = 3 */
+ if ( args.sigma < 1.0 ) /* if height too small */
+ args.sigma = args.rho; /* then height = width */
+ if ( (flags & XValue) == 0 ) /* center offset if not defined */
+ args.xi = (double)(((ssize_t)args.rho-1)/2);
+ if ( (flags & YValue) == 0 )
+ args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
+ /* Distance Kernel Defaults */
case ChebyshevKernel:
- case ManhattenKernel:
+ case ManhattanKernel:
+ case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args);
+ if ( kernel == (KernelInfo *) NULL )
+ return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
- ExpandKernelInfo(kernel, 45.0);
- else if ( (flags & MinimumValue) != 0 ) /* '^' symbol in kernel args */
- ExpandKernelInfo(kernel, 90.0);
+ ExpandRotateKernelInfo(kernel, 45.0);
+ else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
+ ExpandRotateKernelInfo(kernel, 90.0);
+ else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
+ ExpandMirrorKernelInfo(kernel);
}
return(kernel);
while ( GetMagickToken(p,NULL,token), *token != '\0' ) {
- /* ignore extra or multiple ';' kernel seperators */
+ /* ignore extra or multiple ';' kernel separators */
if ( *token != ';' ) {
/* tokens starting with alpha is a Named kernel */
/* Error handling -- this is not proper error handling! */
if ( new_kernel == (KernelInfo *) NULL ) {
- fprintf(stderr, "Failed to parse kernel number #%lu\n",(unsigned long)
+ fprintf(stderr, "Failed to parse kernel number #%.20g\n",(double)
kernel_number);
if ( kernel != (KernelInfo *) NULL )
kernel=DestroyKernelInfo(kernel);
% Convolution Kernels
%
% Unity
-% the No-Op kernel, also requivelent to Gaussian of sigma zero.
-% Basically a 3x3 kernel of a 1 surrounded by zeros.
+% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimentional gaussian kernel, as used by -gaussian.
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
-% DOG:{radius},{sigma1},{sigma2}
+% LoG:{radius},{sigma}
+% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
+% The supposed ideal edge detection, zero-summing kernel.
+%
+% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
+% approx 1.6 (according to wikipedia).
+%
+% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
-% LOG:{radius},{sigma}
-% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
-% The supposed ideal edge detection, zero-summing kernel.
-%
-% An alturnative to this kernel is to use a "DOG" with a sigma ratio of
-% approx 1.6, which can also be applied as a 2 pass "DOB" (see below).
-%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
-% each other, is equivelent to a far larger "Gaussian" kernel with the
+% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
-% DOB:{radius},{sigma1},{sigma2}[,{angle}]
-% "Difference of Blurs" Kernel.
-% As "Blur" but with the 1D gaussian produced by 'sigma2' subtracted
-% from thethe 1D gaussian produced by 'sigma1'.
-% The result is a zero-summing kernel.
-%
-% This can be used to generate a faster "DOG" convolution, in the same
-% way "Blur" can.
-%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
-% Type 15 : 5x5 LOG (sigma approx 1.4)
-% Type 19 : 9x9 LOG (sigma approx 1.4)
+% Type 15 : 5x5 LoG (sigma approx 1.4)
+% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
-% -1, 0, 1
-% -2, 0,-2
-% -1, 0, 1
+% | -1, 0, 1 |
+% | -2, 0,-2 |
+% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
-% 0, 0, 0
-% -1, 1, 0
-% 0, 0, 0
+% | 0, 0, 0 |
+% | -1, 1, 0 |
+% | 0, 0, 0 |
+%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
-% -1, 0, 1
-% -1, 0, 1
-% -1, 0, 1
+% | -1, 0, 1 |
+% | -1, 0, 1 |
+% | -1, 0, 1 |
+%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
-% -1, 1, 1
-% -1,-2, 1
-% -1, 1, 1
+% | -1, 1, 1 |
+% | -1,-2, 1 |
+% | -1, 1, 1 |
+%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
-% -3,-3, 5
-% -3, 0, 5
-% -3,-3, 5
+% | -3,-3, 5 |
+% | -3, 0, 5 |
+% | -3,-3, 5 |
+%
+% FreiChen:{angle}
+% Frei-Chen Edge Detector is based on a kernel that is similar to
+% the Sobel Kernel, but is designed to be isotropic. That is it takes
+% into account the distance of the diagonal in the kernel.
+%
+% | 1, 0, -1 |
+% | sqrt(2), 0, -sqrt(2) |
+% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
-% Frei-Chen Edge Detector is a set of 9 unique convolution kernels that
-% are specially weighted.
%
-% Type 0: | -1, 0, 1 |
-% | -sqrt(2), 0, sqrt(2) |
-% | -1, 0, 1 |
+% Frei-Chen Pre-weighted kernels...
%
-% This is basically the unnormalized discrete kernel that can be used
-% instead ot a Sobel kernel.
+% Type 0: default un-nomalized version shown above.
%
-% The next 9 kernel types are specially pre-weighted. They should not
-% be normalized. After applying each to the original image, the results
-% is then added together. The square root of the resulting image is
-% the cosine of the edge, and the direction of the feature detection.
+% Type 1: Orthogonal Kernel (same as type 11 below)
+% | 1, 0, -1 |
+% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
+% | 1, 0, -1 |
%
-% Type 1: | 1, sqrt(2), 1 |
-% | 0, 0, 0 | / 2*sqrt(2)
-% | -1, -sqrt(2), -1 |
+% Type 2: Diagonal form of Kernel...
+% | 1, sqrt(2), 0 |
+% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
+% | 0, -sqrt(2) -1 |
%
-% Type 2: | 1, 0, 1 |
-% | sqrt(2), 0, sqrt(2) | / 2*sqrt(2)
-% | 1, 0, 1 |
+% However this kernel is als at the heart of the FreiChen Edge Detection
+% Process which uses a set of 9 specially weighted kernel. These 9
+% kernels not be normalized, but directly applied to the image. The
+% results is then added together, to produce the intensity of an edge in
+% a specific direction. The square root of the pixel value can then be
+% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
+% from each other, both the direction and the strength of the edge can be
+% determined.
%
-% Type 3: | 0, -1, sqrt(2) |
-% | 1, 0, -1 | / 2*sqrt(2)
-% | -sqrt(2), 1, 0 |
+% Type 10: All 9 of the following pre-weighted kernels...
%
-% Type 4: | sqrt(2), -1, 0 |
-% | -1, 0, 1 | / 2*sqrt(2)
-% | 0, 1, -sqrt(2) |
+% Type 11: | 1, 0, -1 |
+% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
+% | 1, 0, -1 |
%
-% Type 5: | 0, 1, 0 |
-% | -1, 0, -1 | / 2
-% | 0, 1, 0 |
+% Type 12: | 1, sqrt(2), 1 |
+% | 0, 0, 0 | / 2*sqrt(2)
+% | 1, sqrt(2), 1 |
%
-% Type 6: | -1, 0, 1 |
-% | 0, 0, 0 | / 2
-% | 1, 0, -1 |
+% Type 13: | sqrt(2), -1, 0 |
+% | -1, 0, 1 | / 2*sqrt(2)
+% | 0, 1, -sqrt(2) |
%
-% Type 7: | 1, -2, 1 |
-% | -2, 4, -2 | / 6
-% | 1, -2, 1 |
+% Type 14: | 0, 1, -sqrt(2) |
+% | -1, 0, 1 | / 2*sqrt(2)
+% | sqrt(2), -1, 0 |
%
-% Type 8: | -2, 1, -2 |
-% | 1, 4, 1 | / 6
-% | -2, 1, -2 |
+% Type 15: | 0, -1, 0 |
+% | 1, 0, 1 | / 2
+% | 0, -1, 0 |
%
-% Type 9: | 1, 1, 1 |
-% | 1, 1, 1 | / 3
-% | 1, 1, 1 |
+% Type 16: | 1, 0, -1 |
+% | 0, 0, 0 | / 2
+% | -1, 0, 1 |
+%
+% Type 17: | 1, -2, 1 |
+% | -2, 4, -2 | / 6
+% | -1, -2, 1 |
+%
+% Type 18: | -2, 1, -2 |
+% | 1, 4, 1 | / 6
+% | -2, 1, -2 |
+%
+% Type 19: | 1, 1, 1 |
+% | 1, 1, 1 | / 3
+% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
+% WARNING: The above was layed out as per
+% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
+% But rotated 90 degrees so direction is from left rather than the top.
+% I have yet to find any secondary confirmation of the above. The only
+% other source found was actual source code at
+% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
+% Neigher paper defineds the kernels in a way that looks locical or
+% correct when taken as a whole.
%
% Boolean Kernels
%
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
-% Note that using a larger radius for the "Square" or the "Diamond" is
-% also equivelent to iterating the basic morphological method that many
-% times. However iterating with the smaller radius is actually faster
-% than using a larger kernel radius.
+% Octagon:[{radius}[,{scale}]]
+% Generate octagonal shaped kernel of given radius and constant scale.
+% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
+% in "Diamond" kernel.
+%
+% Disk:[{radius}[,{scale}]]
+% Generate a binary disk, thresholded at the radius given, the radius
+% may be a float-point value. Final Kernel size is floor(radius)*2+1
+% square. A radius of 5.3 is the default.
+%
+% NOTE: That a low radii Disk kernels produce the same results as
+% many of the previously defined kernels, but differ greatly at larger
+% radii. Here is a table of equivalences...
+% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
+% "Disk:1.5" => "Square"
+% "Disk:2" => "Diamond:2"
+% "Disk:2.5" => "Octagon"
+% "Disk:2.9" => "Square:2"
+% "Disk:3.5" => "Octagon:3"
+% "Disk:4.5" => "Octagon:4"
+% "Disk:5.4" => "Octagon:5"
+% "Disk:6.4" => "Octagon:6"
+% All other Disk shapes are unique to this kernel, but because a "Disk"
+% is more circular when using a larger radius, using a larger radius is
+% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
%
% Properly centered and odd sized rectangles work the best.
%
-% Disk:[{radius}[,{scale}]]
-% Generate a binary disk of the radius given, radius may be a float.
-% Kernel size will be ceil(radius)*2+1 square.
-% NOTE: Here are some disk shapes of specific interest
-% "Disk:1" => "diamond" or "cross:1"
-% "Disk:1.5" => "square"
-% "Disk:2" => "diamond:2"
-% "Disk:2.5" => a general disk shape of radius 2
-% "Disk:2.9" => "square:2"
-% "Disk:3.5" => default - octagonal/disk shape of radius 3
-% "Disk:4.2" => roughly octagonal shape of radius 4
-% "Disk:4.3" => a general disk shape of radius 4
-% After this all the kernel shape becomes more and more circular.
-%
-% Because a "disk" is more circular when using a larger radius, using a
-% larger radius is preferred over iterating the morphological operation.
-%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
-% NOTE: "plus:1" is equivelent to a "Diamond" kernel.
+% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
-% Find edges of a binary shape
+% Find flat orthogonal edges of a binary shape
% Corners
-% Find corners of a binary shape
-% Ridges
-% Find single pixel ridges or thin lines
-% Ridges2
-% Find 2 pixel thick ridges or lines
-% Ridges3
-% Find 2 pixel thick diagonal ridges (experimental)
-% LineEnds
+% Find 90 degree corners of a binary shape
+% Diagonals:type
+% A special kernel to thin the 'outside' of diagonals
+% LineEnds:type
% Find end points of lines (for pruning a skeletion)
+% Two types of lines ends (default to both) can be searched for
+% Type 0: All line ends
+% Type 1: single kernel for 4-conneected line ends
+% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
+% Type 0: all line junctions
+% Type 1: Y Junction kernel
+% Type 2: Diagonal T Junction kernel
+% Type 3: Orthogonal T Junction kernel
+% Type 4: Diagonal X Junction kernel
+% Type 5: Orthogonal + Junction kernel
+% Ridges:type
+% Find single pixel ridges or thin lines
+% Type 1: Fine single pixel thick lines and ridges
+% Type 2: Find two pixel thick lines and ridges
% ConvexHull
-% Octagonal thicken kernel, to generate convex hulls of 45 degrees
-% Skeleton
-% Thinning kernel, which leaves behind a skeletion of a shape
+% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
+% Skeleton:type
+% Traditional skeleton generating kernels.
+% Type 1: Tradional Skeleton kernel (4 connected skeleton)
+% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
+% Type 3: Thinning skeleton based on a ressearch paper by
+% Dan S. Bloomberg (Default Type)
+% ThinSE:type
+% A huge variety of Thinning Kernels designed to preserve conectivity.
+% many other kernel sets use these kernels as source definitions.
+% Type numbers are 41-49, 81-89, 481, and 482 which are based on
+% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
-% Chebyshev Distance (also known as Tchebychev Distance) is a value of
-% one to any neighbour, orthogonal or diagonal. One why of thinking of
-% it is the number of squares a 'King' or 'Queen' in chess needs to
-% traverse reach any other position on a chess board. It results in a
-% 'square' like distance function, but one where diagonals are closer
-% than expected.
-%
-% Manhatten:[{radius}][x{scale}[%!]]
-% Manhatten Distance (also known as Rectilinear Distance, or the Taxi
-% Cab metric), is the distance needed when you can only travel in
-% orthogonal (horizontal or vertical) only. It is the distance a 'Rook'
-% in chess would travel. It results in a diamond like distances, where
-% diagonals are further than expected.
+% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
+% is a value of one to any neighbour, orthogonal or diagonal. One why
+% of thinking of it is the number of squares a 'King' or 'Queen' in
+% chess needs to traverse reach any other position on a chess board.
+% It results in a 'square' like distance function, but one where
+% diagonals are given a value that is closer than expected.
+%
+% Manhattan:[{radius}][x{scale}[%!]]
+% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
+% Cab distance metric), it is the distance needed when you can only
+% travel in horizontal or vertical directions only. It is the
+% distance a 'Rook' in chess would have to travel, and results in a
+% diamond like distances, where diagonals are further than expected.
+%
+% Octagonal:[{radius}][x{scale}[%!]]
+% An interleving of Manhatten and Chebyshev metrics producing an
+% increasing octagonally shaped distance. Distances matches those of
+% the "Octagon" shaped kernel of the same radius. The minimum radius
+% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
-% Euclidean Distance is the 'direct' or 'as the crow flys distance.
+% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
-% you will get octagonal like distance function, which is reasonally
-% accurate.
-%
-% However if you use a larger radius such as "Euclidean:4" you will
-% get a much smoother distance gradient from the edge of the shape.
-% Of course a larger kernel is slower to use, and generally not needed.
-%
-% To allow the use of fractional distances that you get with diagonals
-% the actual distance is scaled by a fixed value which the user can
-% provide. This is not actually nessary for either ""Chebyshev" or
-% "Manhatten" distance kernels, but is done for all three distance
-% kernels. If no scale is provided it is set to a value of 100,
-% allowing for a maximum distance measurement of 655 pixels using a Q16
-% version of IM, from any edge. However for small images this can
-% result in quite a dark gradient.
+% you will get octagonal like distance function.
+%
+% However using a larger radius such as "Euclidean:4" you will get a
+% much smoother distance gradient from the edge of the shape. Especially
+% if the image is pre-processed to include any anti-aliasing pixels.
+% Of course a larger kernel is slower to use, and not always needed.
+%
+% The first three Distance Measuring Kernels will only generate distances
+% of exact multiples of {scale} in binary images. As such you can use a
+% scale of 1 without loosing any information. However you also need some
+% scaling when handling non-binary anti-aliased shapes.
+%
+% The "Euclidean" Distance Kernel however does generate a non-integer
+% fractional results, and as such scaling is vital even for binary shapes.
%
*/
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
- case TestKernel:
+ assert("Should not call this function" != (char *)NULL);
break;
- case UnityKernel: /* Named Descrete Convolution Kernels */
- case LaplacianKernel:
- case SobelKernel:
+ case LaplacianKernel: /* Named Descrete Convolution Kernels */
+ case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
- case CornersKernel: /* Hit and Miss kernels */
+ case EdgesKernel: /* Hit and Miss kernels */
+ case CornersKernel:
+ case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
- case EdgesKernel:
case RidgesKernel:
- case Ridges2Kernel:
case ConvexHullKernel:
case SkeletonKernel:
- case MatKernel:
- /* A pre-generated kernel is not needed */
- break;
-#if 0 /* set to 1 to do a compile-time check that we haven't missed anything */
+ case ThinSEKernel:
+ break; /* A pre-generated kernel is not needed */
+#if 0
+ /* set to 1 to do a compile-time check that we haven't missed anything */
+ case UnityKernel:
case GaussianKernel:
- case DOGKernel:
- case LOGKernel:
+ case DoGKernel:
+ case LoGKernel:
case BlurKernel:
- case DOBKernel:
case CometKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
+ case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
- case ManhattenKernel:
+ case ManhattanKernel:
+ case OctangonalKernel:
case EuclideanKernel:
#else
default:
}
switch(type) {
- /* Convolution Kernels */
+ /*
+ Convolution Kernels
+ */
+ case UnityKernel:
+ {
+ kernel->height = kernel->width = (size_t) 1;
+ kernel->x = kernel->y = (ssize_t) 0;
+ kernel->values=(double *) AcquireQuantumMemory(1,sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+ kernel->maximum = kernel->values[0] = args->rho;
+ break;
+ }
+ break;
case GaussianKernel:
- case DOGKernel:
- case LOGKernel:
+ case DoGKernel:
+ case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
- else if ( (type != DOGKernel) || (sigma >= sigma2) )
+ else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
- * How to do this is currently not known, but appears to be
- * basied on the Error Function 'erf()' (intergral of a gaussian)
+ * How to do this is I don't know, but appears to be basied on the
+ * Error Function 'erf()' (intergral of a gaussian)
*/
- if ( type == GaussianKernel || type == DOGKernel )
- { /* Calculate a Gaussian, OR positive half of a DOG */
+ if ( type == GaussianKernel || type == DoGKernel )
+ { /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
- B = 1.0/(Magick2PI*sigma*sigma);
+ B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
}
- if ( type == DOGKernel )
+ if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
- B = 1.0/(Magick2PI*sigma*sigma);
+ B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
- if ( type == LOGKernel )
+ if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
- B = 1.0/(MagickPI*sigma*sigma*sigma*sigma);
+ B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
break;
}
case BlurKernel:
- case DOBKernel:
{ double
sigma = fabs(args->sigma),
- sigma2 = fabs(args->xi),
- A, B;
+ alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
- else if ( (type == BlurKernel) || (sigma >= sigma2) )
- kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
else
- kernel->width = GetOptimalKernelWidth1D(args->rho,sigma2);
+ kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
- A = 1.0/(2.0*sigma*sigma);
- B = 1.0/(MagickSQ2PI*sigma );
+ alpha = 1.0/(2.0*sigma*sigma);
+ beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
- kernel->values[(u+v)/KernelRank] += exp(-((double)(u*u))*A)*B;
+ kernel->values[(u+v)/KernelRank] +=
+ exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
-
- /* Subtract a Second 1D Gaussian for "Difference of Blur" */
- if ( type == DOBKernel )
- {
- if ( sigma2 > MagickEpsilon )
- { sigma = sigma2*KernelRank; /* simplify loop expressions */
- A = 1.0/(2.0*sigma*sigma);
- B = 1.0/(MagickSQ2PI*sigma);
- for ( u=-v; u <= v; u++)
- kernel->values[(u+v)/KernelRank] -= exp(-((double)(u*u))*A)*B;
- }
- else /* limiting case - a unity (normalized Dirac) kernel */
- kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
- }
#else
/* Direct calculation without curve averaging */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
- { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
- B = 1.0/(MagickSQ2PI*sigma);
+ { alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
+ beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- kernel->values[i] = exp(-((double)(u*u))*A)*B;
+ kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
-
- /* Subtract a Second 1D Gaussian for "Difference of Blur" */
- if ( type == DOBKernel )
- {
- if ( sigma2 > MagickEpsilon )
- { sigma = sigma2; /* simplify loop expressions */
- A = 1.0/(2.0*sigma*sigma);
- B = 1.0/(MagickSQ2PI*sigma);
- for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- kernel->values[i] -= exp(-((double)(u*u))*A)*B;
- }
- else /* limiting case - a unity (normalized Dirac) kernel */
- kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
- }
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
- RotateKernelInfo(kernel, (type == BlurKernel) ? args->xi : args->psi );
+ RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
break;
}
- /* Convolution Kernels - Well Known Constants */
+ /*
+ Convolution Kernels - Well Known Named Constant Kernels
+ */
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
- case 15: /* a 5x5 LOG (sigma approx 1.4) */
+ case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
- case 19: /* a 9x9 LOG (sigma approx 1.4) */
+ case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
- "9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,@12,@24,@12,-3,-5,-2 -2,-5,-0,@24,@40,@24,-0,-5,-2 -2,-5,-3,@12,@24,@12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
+ "9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
break;
}
case SobelKernel:
- {
- kernel=ParseKernelArray("3: -1,0,1 -2,0,2 -1,0,1");
+ { /* Simple Sobel Kernel */
+ kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
- RotateKernelInfo(kernel, args->rho); /* Rotate by angle */
+ RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
- kernel=ParseKernelArray("3: 0,0,0 -1,1,0 0,0,0");
+ kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
}
case PrewittKernel:
{
- kernel=ParseKernelArray("3: -1,1,1 0,0,0 -1,1,1");
+ kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
}
case CompassKernel:
{
- kernel=ParseKernelArray("3: -1,1,1 -1,-2,1 -1,1,1");
+ kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
}
case KirschKernel:
{
- kernel=ParseKernelArray("3: -3,-3,5 -3,0,5 -3,-3,5");
+ kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case FreiChenKernel:
- /* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf */
- /* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf */
+ /* Direction is set to be left to right positive */
+ /* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
+ /* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
- kernel=ParseKernelArray("3: -1,0,1 -2,0,2 -1,0,1");
+ kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
- kernel->values[3] = -MagickSQ2;
- kernel->values[5] = +MagickSQ2;
+ kernel->type = type;
+ kernel->values[3] = +MagickSQ2;
+ kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
- case 1:
- kernel=ParseKernelArray("3: 1,2,1 0,0,0 -1,2,-1");
+ case 2:
+ kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
- kernel->values[1] = +MagickSQ2;
- kernel->values[7] = -MagickSQ2;
+ kernel->values[1] = kernel->values[3] = +MagickSQ2;
+ kernel->values[5] = kernel->values[7] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
- ScaleKernelInfo(kernel, 1.0/2.0*MagickSQ2, NoValue);
+ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
- case 2:
- kernel=ParseKernelArray("3: 1,0,1 2,0,2 1,0,1");
+ case 10:
+ kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ break;
+ case 1:
+ case 11:
+ kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
- kernel->values[5] = +MagickSQ2;
- CalcKernelMetaData(kernel);
- ScaleKernelInfo(kernel, 1.0/2.0*MagickSQ2, NoValue);
+ kernel->values[5] = -MagickSQ2;
+ CalcKernelMetaData(kernel); /* recalculate meta-data */
+ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
- case 3:
- kernel=ParseKernelArray("3: 0,-1,2 1,0,-1 -2,1,0");
+ case 12:
+ kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
- kernel->values[2] = +MagickSQ2;
- kernel->values[6] = -MagickSQ2;
+ kernel->values[1] = +MagickSQ2;
+ kernel->values[7] = +MagickSQ2;
CalcKernelMetaData(kernel);
- ScaleKernelInfo(kernel, 1.0/2.0*MagickSQ2, NoValue);
+ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
- case 4:
+ case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->values[0] = +MagickSQ2;
kernel->values[8] = -MagickSQ2;
CalcKernelMetaData(kernel);
- ScaleKernelInfo(kernel, 1.0/2.0*MagickSQ2, NoValue);
+ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
+ break;
+ case 14:
+ kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ kernel->values[2] = -MagickSQ2;
+ kernel->values[6] = +MagickSQ2;
+ CalcKernelMetaData(kernel);
+ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
- case 5:
- kernel=ParseKernelArray("3: 0,1,0 -1,0,-1 0,1,0");
+ case 15:
+ kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
- case 6:
- kernel=ParseKernelArray("3: -1,0,1 0,0,0 1,0,-1");
+ case 16:
+ kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
- case 7:
- kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
+ case 17:
+ kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
- case 8:
+ case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
- case 9:
+ case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
- case -1:
- kernel=AcquireKernelInfo("FreiChen:1;FreiChen:2;FreiChen:3;FreiChen:4;FreiChen:5;FreiChen:6;FreiChen:7;FreiChen:8;FreiChen:9");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- break;
}
if ( fabs(args->sigma) > MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
break;
}
- /* Boolean Kernels */
+ /*
+ Boolean or Shaped Kernels
+ */
case DiamondKernel:
{
if (args->rho < 1.0)
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- if ((labs(u)+labs(v)) <= (ssize_t)kernel->x)
+ if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->positive_range = scale*u;
break;
}
- case DiskKernel:
- {
- ssize_t
- limit = (ssize_t)(args->rho*args->rho);
-
- if (args->rho < 0.4) /* default radius approx 3.5 */
- kernel->width = kernel->height = 7L, limit = 10L;
- else
- kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
- kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+ case OctagonKernel:
+ {
+ if (args->rho < 1.0)
+ kernel->width = kernel->height = 5; /* default radius = 2 */
+ else
+ kernel->width = kernel->height = ((size_t)args->rho)*2+1;
+ kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+
+ kernel->values=(double *) AcquireQuantumMemory(kernel->width,
+ kernel->height*sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+
+ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
+ for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
+ if ( (labs((long) u)+labs((long) v)) <=
+ ((long)kernel->x + (long)(kernel->x/2)) )
+ kernel->positive_range += kernel->values[i] = args->sigma;
+ else
+ kernel->values[i] = nan;
+ kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
+ break;
+ }
+ case DiskKernel:
+ {
+ ssize_t
+ limit = (ssize_t)(args->rho*args->rho);
- kernel->values=(double *) AcquireQuantumMemory(kernel->width,
- kernel->height*sizeof(double));
- if (kernel->values == (double *) NULL)
- return(DestroyKernelInfo(kernel));
+ if (args->rho < 0.4) /* default radius approx 4.3 */
+ kernel->width = kernel->height = 9L, limit = 18L;
+ else
+ kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
+ kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+
+ kernel->values=(double *) AcquireQuantumMemory(kernel->width,
+ kernel->height*sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+
+ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
+ for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
+ if ((u*u+v*v) <= limit)
+ kernel->positive_range += kernel->values[i] = args->sigma;
+ else
+ kernel->values[i] = nan;
+ kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
+ break;
+ }
+ case PlusKernel:
+ {
+ if (args->rho < 1.0)
+ kernel->width = kernel->height = 5; /* default radius 2 */
+ else
+ kernel->width = kernel->height = ((size_t)args->rho)*2+1;
+ kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+
+ kernel->values=(double *) AcquireQuantumMemory(kernel->width,
+ kernel->height*sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+
+ /* set all kernel values along axises to given scale */
+ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
+ for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
+ kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
+ kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
+ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
+ break;
+ }
+ case CrossKernel:
+ {
+ if (args->rho < 1.0)
+ kernel->width = kernel->height = 5; /* default radius 2 */
+ else
+ kernel->width = kernel->height = ((size_t)args->rho)*2+1;
+ kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+
+ kernel->values=(double *) AcquireQuantumMemory(kernel->width,
+ kernel->height*sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+
+ /* set all kernel values along axises to given scale */
+ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
+ for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
+ kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
+ kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
+ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
+ break;
+ }
+ /*
+ HitAndMiss Kernels
+ */
+ case RingKernel:
+ case PeaksKernel:
+ {
+ ssize_t
+ limit1,
+ limit2,
+ scale;
- /* set all kernel values within disk area to scale given */
- for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
- for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- if ((u*u+v*v) <= limit)
- kernel->positive_range += kernel->values[i] = args->sigma;
- else
- kernel->values[i] = nan;
- kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
- break;
- }
- case PlusKernel:
+ if (args->rho < args->sigma)
+ {
+ kernel->width = ((size_t)args->sigma)*2+1;
+ limit1 = (ssize_t)(args->rho*args->rho);
+ limit2 = (ssize_t)(args->sigma*args->sigma);
+ }
+ else
+ {
+ kernel->width = ((size_t)args->rho)*2+1;
+ limit1 = (ssize_t)(args->sigma*args->sigma);
+ limit2 = (ssize_t)(args->rho*args->rho);
+ }
+ if ( limit2 <= 0 )
+ kernel->width = 7L, limit1 = 7L, limit2 = 11L;
+
+ kernel->height = kernel->width;
+ kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+ kernel->values=(double *) AcquireQuantumMemory(kernel->width,
+ kernel->height*sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+
+ /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
+ scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
+ for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
+ for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
+ { ssize_t radius=u*u+v*v;
+ if (limit1 < radius && radius <= limit2)
+ kernel->positive_range += kernel->values[i] = (double) scale;
+ else
+ kernel->values[i] = nan;
+ }
+ kernel->minimum = kernel->maximum = (double) scale;
+ if ( type == PeaksKernel ) {
+ /* set the central point in the middle */
+ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
+ kernel->positive_range = 1.0;
+ kernel->maximum = 1.0;
+ }
+ break;
+ }
+ case EdgesKernel:
+ {
+ kernel=AcquireKernelInfo("ThinSE:482");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
+ break;
+ }
+ case CornersKernel:
+ {
+ kernel=AcquireKernelInfo("ThinSE:87");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
+ break;
+ }
+ case DiagonalsKernel:
+ {
+ switch ( (int) args->rho ) {
+ case 0:
+ default:
+ { KernelInfo
+ *new_kernel;
+ kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ ExpandMirrorKernelInfo(kernel);
+ return(kernel);
+ }
+ case 1:
+ kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
+ break;
+ case 2:
+ kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
+ break;
+ }
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ RotateKernelInfo(kernel, args->sigma);
+ break;
+ }
+ case LineEndsKernel:
+ { /* Kernels for finding the end of thin lines */
+ switch ( (int) args->rho ) {
+ case 0:
+ default:
+ /* set of kernels to find all end of lines */
+ return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>"));
+ case 1:
+ /* kernel for 4-connected line ends - no rotation */
+ kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
+ break;
+ case 2:
+ /* kernel to add for 8-connected lines - no rotation */
+ kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
+ break;
+ case 3:
+ /* kernel to add for orthogonal line ends - does not find corners */
+ kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
+ break;
+ case 4:
+ /* traditional line end - fails on last T end */
+ kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
+ break;
+ }
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ RotateKernelInfo(kernel, args->sigma);
+ break;
+ }
+ case LineJunctionsKernel:
+ { /* kernels for finding the junctions of multiple lines */
+ switch ( (int) args->rho ) {
+ case 0:
+ default:
+ /* set of kernels to find all line junctions */
+ return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>"));
+ case 1:
+ /* Y Junction */
+ kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
+ break;
+ case 2:
+ /* Diagonal T Junctions */
+ kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
+ break;
+ case 3:
+ /* Orthogonal T Junctions */
+ kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
+ break;
+ case 4:
+ /* Diagonal X Junctions */
+ kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
+ break;
+ case 5:
+ /* Orthogonal X Junctions - minimal diamond kernel */
+ kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
+ break;
+ }
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ RotateKernelInfo(kernel, args->sigma);
+ break;
+ }
+ case RidgesKernel:
+ { /* Ridges - Ridge finding kernels */
+ KernelInfo
+ *new_kernel;
+ switch ( (int) args->rho ) {
+ case 1:
+ default:
+ kernel=ParseKernelArray("3x1:0,1,0");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
+ break;
+ case 2:
+ kernel=ParseKernelArray("4x1:0,1,1,0");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
+
+ /* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
+ /* Unfortunatally we can not yet rotate a non-square kernel */
+ /* But then we can't flip a non-symetrical kernel either */
+ new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ break;
+ }
+ break;
+ }
+ case ConvexHullKernel:
+ {
+ KernelInfo
+ *new_kernel;
+ /* first set of 8 kernels */
+ kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ ExpandRotateKernelInfo(kernel, 90.0);
+ /* append the mirror versions too - no flip function yet */
+ new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ ExpandRotateKernelInfo(new_kernel, 90.0);
+ LastKernelInfo(kernel)->next = new_kernel;
+ break;
+ }
+ case SkeletonKernel:
+ {
+ switch ( (int) args->rho ) {
+ case 1:
+ default:
+ /* Traditional Skeleton...
+ ** A cyclically rotated single kernel
+ */
+ kernel=AcquireKernelInfo("ThinSE:482");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
+ break;
+ case 2:
+ /* HIPR Variation of the cyclic skeleton
+ ** Corners of the traditional method made more forgiving,
+ ** but the retain the same cyclic order.
+ */
+ kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ if (kernel->next == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ kernel->type = type;
+ kernel->next->type = type;
+ ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
+ break;
+ case 3:
+ /* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
+ ** "Connectivity-Preserving Morphological Image Thransformations"
+ ** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
+ ** http://www.leptonica.com/papers/conn.pdf
+ */
+ kernel=AcquireKernelInfo(
+ "ThinSE:41; ThinSE:42; ThinSE:43");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ kernel->next->type = type;
+ kernel->next->next->type = type;
+ ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
+ break;
+ }
+ break;
+ }
+ case ThinSEKernel:
+ { /* Special kernels for general thinning, while preserving connections
+ ** "Connectivity-Preserving Morphological Image Thransformations"
+ ** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
+ ** http://www.leptonica.com/papers/conn.pdf
+ ** And
+ ** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
+ **
+ ** Note kernels do not specify the origin pixel, allowing them
+ ** to be used for both thickening and thinning operations.
+ */
+ switch ( (int) args->rho ) {
+ /* SE for 4-connected thinning */
+ case 41: /* SE_4_1 */
+ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
+ break;
+ case 42: /* SE_4_2 */
+ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
+ break;
+ case 43: /* SE_4_3 */
+ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
+ break;
+ case 44: /* SE_4_4 */
+ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
+ break;
+ case 45: /* SE_4_5 */
+ kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
+ break;
+ case 46: /* SE_4_6 */
+ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
+ break;
+ case 47: /* SE_4_7 */
+ kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
+ break;
+ case 48: /* SE_4_8 */
+ kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
+ break;
+ case 49: /* SE_4_9 */
+ kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
+ break;
+ /* SE for 8-connected thinning - negatives of the above */
+ case 81: /* SE_8_0 */
+ kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
+ break;
+ case 82: /* SE_8_2 */
+ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
+ break;
+ case 83: /* SE_8_3 */
+ kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
+ break;
+ case 84: /* SE_8_4 */
+ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
+ break;
+ case 85: /* SE_8_5 */
+ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
+ break;
+ case 86: /* SE_8_6 */
+ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
+ break;
+ case 87: /* SE_8_7 */
+ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
+ break;
+ case 88: /* SE_8_8 */
+ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
+ break;
+ case 89: /* SE_8_9 */
+ kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
+ break;
+ /* Special combined SE kernels */
+ case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
+ kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
+ break;
+ case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
+ kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
+ break;
+ case 481: /* SE_48_1 - General Connected Corner Kernel */
+ kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
+ break;
+ default:
+ case 482: /* SE_48_2 - General Edge Kernel */
+ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
+ break;
+ }
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ RotateKernelInfo(kernel, args->sigma);
+ break;
+ }
+ /*
+ Distance Measuring Kernels
+ */
+ case ChebyshevKernel:
+ {
+ if (args->rho < 1.0)
+ kernel->width = kernel->height = 3; /* default radius = 1 */
+ else
+ kernel->width = kernel->height = ((size_t)args->rho)*2+1;
+ kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+
+ kernel->values=(double *) AcquireQuantumMemory(kernel->width,
+ kernel->height*sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+
+ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
+ for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
+ kernel->positive_range += ( kernel->values[i] =
+ args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
+ kernel->maximum = kernel->values[0];
+ break;
+ }
+ case ManhattanKernel:
+ {
+ if (args->rho < 1.0)
+ kernel->width = kernel->height = 3; /* default radius = 1 */
+ else
+ kernel->width = kernel->height = ((size_t)args->rho)*2+1;
+ kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+
+ kernel->values=(double *) AcquireQuantumMemory(kernel->width,
+ kernel->height*sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+
+ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
+ for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
+ kernel->positive_range += ( kernel->values[i] =
+ args->sigma*(labs((long) u)+labs((long) v)) );
+ kernel->maximum = kernel->values[0];
+ break;
+ }
+ case OctagonalKernel:
{
- if (args->rho < 1.0)
- kernel->width = kernel->height = 5; /* default radius 2 */
+ if (args->rho < 2.0)
+ kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
- kernel->width = kernel->height = ((size_t)args->rho)*2+1;
+ kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
- /* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
- kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
- kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
+ {
+ double
+ r1 = MagickMax(fabs((double)u),fabs((double)v)),
+ r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
+ kernel->positive_range += kernel->values[i] =
+ args->sigma*MagickMax(r1,r2);
+ }
+ kernel->maximum = kernel->values[0];
break;
}
- case CrossKernel:
+ case EuclideanKernel:
{
if (args->rho < 1.0)
- kernel->width = kernel->height = 5; /* default radius 2 */
+ kernel->width = kernel->height = 3; /* default radius = 1 */
else
- kernel->width = kernel->height = ((size_t)args->rho)*2+1;
+ kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
- /* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
- kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
- kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
+ kernel->positive_range += ( kernel->values[i] =
+ args->sigma*sqrt((double)(u*u+v*v)) );
+ kernel->maximum = kernel->values[0];
break;
}
- /* HitAndMiss Kernels */
- case RingKernel:
- case PeaksKernel:
+ default:
{
- ssize_t
- limit1,
- limit2,
- scale;
-
- if (args->rho < args->sigma)
- {
- kernel->width = ((size_t)args->sigma)*2+1;
- limit1 = (ssize_t)(args->rho*args->rho);
- limit2 = (ssize_t)(args->sigma*args->sigma);
- }
- else
- {
- kernel->width = ((size_t)args->rho)*2+1;
- limit1 = (ssize_t)(args->sigma*args->sigma);
- limit2 = (ssize_t)(args->rho*args->rho);
- }
- if ( limit2 <= 0 )
- kernel->width = 7L, limit1 = 7L, limit2 = 11L;
-
- kernel->height = kernel->width;
- kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
- kernel->values=(double *) AcquireQuantumMemory(kernel->width,
- kernel->height*sizeof(double));
- if (kernel->values == (double *) NULL)
- return(DestroyKernelInfo(kernel));
-
- /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
- scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
- for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
- for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- { ssize_t radius=u*u+v*v;
- if (limit1 < radius && radius <= limit2)
- kernel->positive_range += kernel->values[i] = (double) scale;
- else
- kernel->values[i] = nan;
- }
- kernel->minimum = kernel->minimum = (double) scale;
- if ( type == PeaksKernel ) {
- /* set the central point in the middle */
- kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
- kernel->positive_range = 1.0;
- kernel->maximum = 1.0;
- }
- break;
- }
- case EdgesKernel:
- {
- kernel=ParseKernelArray("3: 0,0,0 -,1,- 1,1,1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandKernelInfo(kernel, 90.0); /* Create a list of 4 rotated kernels */
- break;
- }
- case CornersKernel:
- {
- kernel=ParseKernelArray("3: 0,0,- 0,1,1 -,1,-");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandKernelInfo(kernel, 90.0); /* Create a list of 4 rotated kernels */
- break;
- }
- case RidgesKernel:
- {
- kernel=ParseKernelArray("3x1:0,1,0");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
- break;
- }
- case Ridges2Kernel:
- {
- KernelInfo
- *new_kernel;
- kernel=ParseKernelArray("4x1:0,1,1,0");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandKernelInfo(kernel, 90.0); /* 4 rotated kernels */
-#if 0
- /* 2 pixel diagonaly thick - 4 rotates - not needed? */
- new_kernel=ParseKernelArray("4x4^:0,-,-,- -,1,-,- -,-,1,- -,-,-,0'");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- ExpandKernelInfo(new_kernel, 90.0); /* 4 rotated kernels */
- LastKernelInfo(kernel)->next = new_kernel;
-#endif
- /* kernels to find a stepped 'thick' line - 4 rotates * mirror */
- /* Unfortunatally we can not yet rotate a non-square kernel */
- /* But then we can't flip a non-symetrical kernel either */
- new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- break;
- }
- case LineEndsKernel:
- {
- KernelInfo
- *new_kernel;
- kernel=ParseKernelArray("3: 0,0,0 0,1,0 -,1,-");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandKernelInfo(kernel, 90.0);
- /* append second set of 4 kernels */
- new_kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- ExpandKernelInfo(new_kernel, 90.0);
- LastKernelInfo(kernel)->next = new_kernel;
- break;
- }
- case LineJunctionsKernel:
- {
- KernelInfo
- *new_kernel;
- /* first set of 4 kernels */
- kernel=ParseKernelArray("3: -,1,- -,1,- 1,-,1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandKernelInfo(kernel, 45.0);
- /* append second set of 4 kernels */
- new_kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- ExpandKernelInfo(new_kernel, 90.0);
- LastKernelInfo(kernel)->next = new_kernel;
- break;
- }
- case ConvexHullKernel:
- {
- KernelInfo
- *new_kernel;
- /* first set of 8 kernels */
- kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandKernelInfo(kernel, 45.0);
- /* append the mirror versions too */
- new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- ExpandKernelInfo(new_kernel, 45.0);
- LastKernelInfo(kernel)->next = new_kernel;
- break;
- }
- case SkeletonKernel:
- { /* what is the best form for skeletonization by thinning? */
-#if 1
- /* Full Corner rotated to form edges too */
- kernel=ParseKernelArray("3: 0,0,- 0,1,1 -,1,1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandKernelInfo(kernel, 45);
-#endif
-#if 0
- /* As last but thin the edges before looking for corners */
- KernelInfo
- *new_kernel;
- kernel=ParseKernelArray("3: 0,0,0 -,1,- 1,1,1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandKernelInfo(kernel, 90.0);
- new_kernel=ParseKernelArray("3: 0,0,- 0,1,1 -,1,1");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- ExpandKernelInfo(new_kernel, 90.0);
- LastKernelInfo(kernel)->next = new_kernel;
-#endif
-#if 0
- kernel=AcquireKernelInfo("Edges;Corners");
-#endif
-#if 0
- kernel=AcquireKernelInfo("Corners;Edges");
-#endif
- break;
- }
- case MatKernel: /* experimental - MAT from a Distance Gradient */
- {
- KernelInfo
- *new_kernel;
- /* Ridge Kernel but without the diagonal */
- kernel=ParseKernelArray("3x1: 0,1,0");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = RidgesKernel;
- ExpandKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
- /* Plus the 2 pixel ridges kernel - no diagonal */
- new_kernel=AcquireKernelBuiltIn(Ridges2Kernel,args);
- if (new_kernel == (KernelInfo *) NULL)
- return(kernel);
- LastKernelInfo(kernel)->next = new_kernel;
- break;
- }
- /* Distance Measuring Kernels */
- case ChebyshevKernel:
- {
- if (args->rho < 1.0)
- kernel->width = kernel->height = 3; /* default radius = 1 */
- else
- kernel->width = kernel->height = ((size_t)args->rho)*2+1;
- kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
-
- kernel->values=(double *) AcquireQuantumMemory(kernel->width,
- kernel->height*sizeof(double));
- if (kernel->values == (double *) NULL)
- return(DestroyKernelInfo(kernel));
-
- for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
- for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- kernel->positive_range += ( kernel->values[i] =
- args->sigma*((labs(u)>labs(v)) ? labs(u) : labs(v)) );
- kernel->maximum = kernel->values[0];
- break;
- }
- case ManhattenKernel:
- {
- if (args->rho < 1.0)
- kernel->width = kernel->height = 3; /* default radius = 1 */
- else
- kernel->width = kernel->height = ((size_t)args->rho)*2+1;
- kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
-
- kernel->values=(double *) AcquireQuantumMemory(kernel->width,
- kernel->height*sizeof(double));
- if (kernel->values == (double *) NULL)
- return(DestroyKernelInfo(kernel));
-
- for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
- for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- kernel->positive_range += ( kernel->values[i] =
- args->sigma*(labs(u)+labs(v)) );
- kernel->maximum = kernel->values[0];
- break;
- }
- case EuclideanKernel:
- {
- if (args->rho < 1.0)
- kernel->width = kernel->height = 3; /* default radius = 1 */
- else
- kernel->width = kernel->height = ((size_t)args->rho)*2+1;
- kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
-
- kernel->values=(double *) AcquireQuantumMemory(kernel->width,
- kernel->height*sizeof(double));
- if (kernel->values == (double *) NULL)
- return(DestroyKernelInfo(kernel));
-
- for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
- for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- kernel->positive_range += ( kernel->values[i] =
- args->sigma*sqrt((double)(u*u+v*v)) );
- kernel->maximum = kernel->values[0];
- break;
- }
- case UnityKernel:
- default:
- {
- /* Unity or No-Op Kernel - 3x3 with 1 in center */
- kernel=ParseKernelArray("3:0,0,0,0,1,0,0,0,0");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = ( type == UnityKernel ) ? UnityKernel : UndefinedKernel;
+ /* No-Op Kernel - Basically just a single pixel on its own */
+ kernel=ParseKernelArray("1:1");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = UndefinedKernel;
break;
}
break;
}
-
return(kernel);
}
\f
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
-% be destroyed using DestoryKernelInfo() when no ssize_ter needed.
+% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
-
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
% %
% %
% %
-% E x p a n d K e r n e l I n f o %
++ E x p a n d M i r r o r K e r n e l I n f o %
+% %
+% %
+% %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
+% sequence of 90-degree rotated kernels but providing a reflected 180
+% rotatation, before the -/+ 90-degree rotations.
+%
+% This special rotation order produces a better, more symetrical thinning of
+% objects.
+%
+% The format of the ExpandMirrorKernelInfo method is:
+%
+% void ExpandMirrorKernelInfo(KernelInfo *kernel)
+%
+% A description of each parameter follows:
+%
+% o kernel: the Morphology/Convolution kernel
+%
+% This function is only internel to this module, as it is not finalized,
+% especially with regard to non-orthogonal angles, and rotation of larger
+% 2D kernels.
+*/
+
+#if 0
+static void FlopKernelInfo(KernelInfo *kernel)
+ { /* Do a Flop by reversing each row. */
+ size_t
+ y;
+ register ssize_t
+ x,r;
+ register double
+ *k,t;
+
+ for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
+ for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
+ t=k[x], k[x]=k[r], k[r]=t;
+
+ kernel->x = kernel->width - kernel->x - 1;
+ angle = fmod(angle+180.0, 360.0);
+ }
+#endif
+
+static void ExpandMirrorKernelInfo(KernelInfo *kernel)
+{
+ KernelInfo
+ *clone,
+ *last;
+
+ last = kernel;
+
+ clone = CloneKernelInfo(last);
+ RotateKernelInfo(clone, 180); /* flip */
+ LastKernelInfo(last)->next = clone;
+ last = clone;
+
+ clone = CloneKernelInfo(last);
+ RotateKernelInfo(clone, 90); /* transpose */
+ LastKernelInfo(last)->next = clone;
+ last = clone;
+
+ clone = CloneKernelInfo(last);
+ RotateKernelInfo(clone, 180); /* flop */
+ LastKernelInfo(last)->next = clone;
+
+ return;
+}
+\f
+/*
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% %
+% %
+% %
++ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
-% ExpandKernelInfo() takes a single kernel, and expands it into a list
-% of kernels each incrementally rotated the angle given.
+% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
+% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
-% The format of the RotateKernelInfo method is:
+% The format of the ExpandRotateKernelInfo method is:
%
-% void ExpandKernelInfo(KernelInfo *kernel, double angle)
+% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
- /* Test for Nan equivelence */
+ /* Test for Nan equivalence */
if ( IsNan(kernel1->values[i]) && !IsNan(kernel2->values[i]) )
return MagickFalse;
if ( IsNan(kernel2->values[i]) && !IsNan(kernel1->values[i]) )
return MagickFalse;
- /* Test actual values are equivelent */
+ /* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) > MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
-static void ExpandKernelInfo(KernelInfo *kernel, const double angle)
+static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle)
{
KernelInfo
*clone,
RotateKernelInfo(clone, angle);
if ( SameKernelInfo(kernel, clone) == MagickTrue )
break;
- last->next = clone;
+ LastKernelInfo(last)->next = clone;
last = clone;
}
- clone = DestroyKernelInfo(clone); /* This was the same as the first - junk */
+ clone = DestroyKernelInfo(clone); /* kernel has repeated - junk the clone */
return;
}
\f
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels.
%
-% It is basically equivelent to as MorphologyImageChannel() (see below) but
-% without user controls, that that function extracts and applies to kernels
-% and morphology methods.
+% It is basically equivalent to as MorphologyImageChannel() (see below) but
+% without any user controls. This allows internel programs to use this
+% function, to actually perform a specific task without posible interference
+% by any API user supplied settings.
+%
+% It is MorphologyImageChannel() task to extract any such user controls, and
+% pass them to this function for processing.
%
% More specifically kernels are not normalized/scaled/blended by the
-% 'convolve:scale' Image Artifact (-set setting), and the convolve bias
-% (-bias setting or image->bias) is passed directly to this function,
-% and not extracted from an image.
+% 'convolve:scale' Image Artifact (setting), nor is the convolve bias
+% (-bias setting or image->bias) loooked at, but must be supplied from the
+% function arguments.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
-% const ssize_t iterations,const KernelInfo *kernel,
-% const CompositeMethod compose, const double bias,
-% ExceptionInfo *exception)
+% const ChannelType channel, const ssize_t iterations,
+% const KernelInfo *kernel, const CompositeMethod compose,
+% const double bias, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
-% o image: the image.
+% o image: the source image
%
% o method: the morphology method to be applied.
%
+% o channel: the channels to which the operations are applied
+% The channel 'sync' flag determines if 'alpha weighting' is
+% applied for convolution style operations.
+%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
-% Warning: kernel may be normalized for the Convolve method.
%
% o compose: How to handle or merge multi-kernel results.
-% If 'Undefined' use default of the Morphology method.
-% If 'No' force image to be re-iterated by each kernel.
-% Otherwise merge the results using the mathematical compose
-% method given.
+% If 'UndefinedCompositeOp' use default for the Morphology method.
+% If 'NoCompositeOp' force image to be re-iterated by each kernel.
+% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
%
*/
-
/* Apply a Morphology Primative to an image using the given kernel.
-** Two pre-created images must be provided, no image is created.
-** Returning the number of pixels that changed.
+** Two pre-created images must be provided, and no image is created.
+** It returns the number of pixels that changed between the images
+** for result convergence determination.
*/
-static size_t MorphologyPrimitive(const Image *image, Image
- *result_image, const MorphologyMethod method, const ChannelType channel,
+static ssize_t MorphologyPrimitive(const Image *image, Image *result_image,
+ const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,const double bias,ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
*q_view;
ssize_t
- y, offx, offy,
+ y, offx, offy;
+
+ size_t
+ virt_width,
changed;
MagickBooleanType
p_view=AcquireCacheView(image);
q_view=AcquireCacheView(result_image);
+ virt_width=image->columns+kernel->width-1;
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
- case DistanceMorphology:
+ /*case DistanceMorphology:*/
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
- /* kernel is user as is, without reflection */
+ /* kernel is used as is, without reflection */
break;
default:
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
+
+ if ( method == ConvolveMorphology && kernel->width == 1 )
+ { /* Special handling (for speed) of vertical (blur) kernels.
+ ** This performs its handling in columns rather than in rows.
+ ** This is only done for convolve as it is the only method that
+ ** generates very large 1-D vertical kernels (such as a 'BlurKernel')
+ **
+ ** Timing tests (on single CPU laptop)
+ ** Using a vertical 1-d Blue with normal row-by-row (below)
+ ** time convert logo: -morphology Convolve Blur:0x10+90 null:
+ ** 0.807u
+ ** Using this column method
+ ** time convert logo: -morphology Convolve Blur:0x10+90 null:
+ ** 0.620u
+ **
+ ** Anthony Thyssen, 14 June 2010
+ */
+ register ssize_t
+ x;
+
#if defined(MAGICKCORE_OPENMP_SUPPORT)
- #pragma omp parallel for schedule(dynamic,4) shared(progress,status)
+#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
- for (y=0; y < (ssize_t) image->rows; y++)
- {
- MagickBooleanType
- sync;
+ for (x=0; x < (ssize_t) image->columns; x++)
+ {
+ register const PixelPacket
+ *restrict p;
- register const PixelPacket
- *restrict p;
+ register const IndexPacket
+ *restrict p_indexes;
- register const IndexPacket
- *restrict p_indexes;
+ register PixelPacket
+ *restrict q;
- register PixelPacket
- *restrict q;
+ register IndexPacket
+ *restrict q_indexes;
- register IndexPacket
- *restrict q_indexes;
+ register ssize_t
+ y;
- register ssize_t
- x;
+ ssize_t
+ r;
- size_t
- r;
+ if (status == MagickFalse)
+ continue;
+ p=GetCacheViewVirtualPixels(p_view, x, -offy,1,
+ image->rows+kernel->height-1, exception);
+ q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception);
+ if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
+ {
+ status=MagickFalse;
+ continue;
+ }
+ p_indexes=GetCacheViewVirtualIndexQueue(p_view);
+ q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
- if (status == MagickFalse)
- continue;
- p=GetCacheViewVirtualPixels(p_view, -offx, y-offy,
- image->columns+kernel->width, kernel->height, exception);
- q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
+ /* offset to origin in 'p'. while 'q' points to it directly */
+ r = offy;
+
+ for (y=0; y < (ssize_t) image->rows; y++)
+ {
+ register ssize_t
+ v;
+
+ register const double
+ *restrict k;
+
+ register const PixelPacket
+ *restrict k_pixels;
+
+ register const IndexPacket
+ *restrict k_indexes;
+
+ MagickPixelPacket
+ result;
+
+ /* Copy input image to the output image for unused channels
+ * This removes need for 'cloning' a new image every iteration
+ */
+ *q = p[r];
+ if (image->colorspace == CMYKColorspace)
+ SetIndexPixelComponent(q_indexes+y,GetIndexPixelComponent(
+ p_indexes+r));
+
+ /* Set the bias of the weighted average output */
+ result.red =
+ result.green =
+ result.blue =
+ result.opacity =
+ result.index = bias;
+
+
+ /* Weighted Average of pixels using reflected kernel
+ **
+ ** NOTE for correct working of this operation for asymetrical
+ ** kernels, the kernel needs to be applied in its reflected form.
+ ** That is its values needs to be reversed.
+ */
+ k = &kernel->values[ kernel->height-1 ];
+ k_pixels = p;
+ k_indexes = p_indexes;
+ if ( ((channel & SyncChannels) == 0 ) ||
+ (image->matte == MagickFalse) )
+ { /* No 'Sync' involved.
+ ** Convolution is simple greyscale channel operation
+ */
+ for (v=0; v < (ssize_t) kernel->height; v++) {
+ if ( IsNan(*k) ) continue;
+ result.red += (*k)*GetRedPixelComponent(k_pixels);
+ result.green += (*k)*GetGreenPixelComponent(k_pixels);
+ result.blue += (*k)*GetBluePixelComponent(k_pixels);
+ result.opacity += (*k)*GetOpacityPixelComponent(k_pixels);
+ if ( image->colorspace == CMYKColorspace)
+ result.index += (*k)*(*k_indexes);
+ k--;
+ k_pixels++;
+ k_indexes++;
+ }
+ if ((channel & RedChannel) != 0)
+ SetRedPixelComponent(q,ClampToQuantum(result.red));
+ if ((channel & GreenChannel) != 0)
+ SetGreenPixelComponent(q,ClampToQuantum(result.green));
+ if ((channel & BlueChannel) != 0)
+ SetBluePixelComponent(q,ClampToQuantum(result.blue));
+ if ((channel & OpacityChannel) != 0
+ && image->matte == MagickTrue )
+ SetOpacityPixelComponent(q,ClampToQuantum(result.opacity));
+ if ((channel & IndexChannel) != 0
+ && image->colorspace == CMYKColorspace)
+ SetIndexPixelComponent(q_indexes+x,ClampToQuantum(result.index));
+ }
+ else
+ { /* Channel 'Sync' Flag, and Alpha Channel enabled.
+ ** Weight the color channels with Alpha Channel so that
+ ** transparent pixels are not part of the results.
+ */
+ MagickRealType
+ alpha, /* alpha weighting of colors : kernel*alpha */
+ gamma; /* divisor, sum of color weighting values */
+
+ gamma=0.0;
+ for (v=0; v < (ssize_t) kernel->height; v++) {
+ if ( IsNan(*k) ) continue;
+ alpha=(*k)*(QuantumScale*(QuantumRange-GetOpacityPixelComponent(k_pixels)));
+ gamma += alpha;
+ result.red += alpha*GetRedPixelComponent(k_pixels);
+ result.green += alpha*GetGreenPixelComponent(k_pixels);
+ result.blue += alpha*GetBluePixelComponent(k_pixels);
+ result.opacity += (*k)*GetOpacityPixelComponent(k_pixels);
+ if ( image->colorspace == CMYKColorspace)
+ result.index += alpha*(*k_indexes);
+ k--;
+ k_pixels++;
+ k_indexes++;
+ }
+ /* Sync'ed channels, all channels are modified */
+ gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
+ SetRedPixelComponent(q,ClampToQuantum(gamma*result.red));
+ SetGreenPixelComponent(q,ClampToQuantum(gamma*result.green));
+ SetBluePixelComponent(q,ClampToQuantum(gamma*result.blue));
+ SetOpacityPixelComponent(q,ClampToQuantum(result.opacity));
+ if (image->colorspace == CMYKColorspace)
+ SetIndexPixelComponent(q_indexes+x,ClampToQuantum(gamma*
+ result.index));
+ }
+
+ /* Count up changed pixels */
+ if ( ( p[r].red != GetRedPixelComponent(q))
+ || ( p[r].green != GetGreenPixelComponent(q))
+ || ( p[r].blue != GetBluePixelComponent(q))
+ || ( p[r].opacity != GetOpacityPixelComponent(q))
+ || ( image->colorspace == CMYKColorspace &&
+ GetIndexPixelComponent(p_indexes+r) != GetIndexPixelComponent(q_indexes+x) ) )
+ changed++; /* The pixel was changed in some way! */
+ p++;
+ q++;
+ } /* y */
+ if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
+ status=MagickFalse;
+ if (image->progress_monitor != (MagickProgressMonitor) NULL)
+ {
+ MagickBooleanType
+ proceed;
+
+#if defined(MAGICKCORE_OPENMP_SUPPORT)
+ #pragma omp critical (MagickCore_MorphologyImage)
+#endif
+ proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
+ if (proceed == MagickFalse)
+ status=MagickFalse;
+ }
+ } /* x */
+ result_image->type=image->type;
+ q_view=DestroyCacheView(q_view);
+ p_view=DestroyCacheView(p_view);
+ return(status ? (ssize_t) changed : 0);
+ }
+
+ /*
+ ** Normal handling of horizontal or rectangular kernels (row by row)
+ */
+#if defined(MAGICKCORE_OPENMP_SUPPORT)
+ #pragma omp parallel for schedule(dynamic,4) shared(progress,status)
+#endif
+ for (y=0; y < (ssize_t) image->rows; y++)
+ {
+ register const PixelPacket
+ *restrict p;
+
+ register const IndexPacket
+ *restrict p_indexes;
+
+ register PixelPacket
+ *restrict q;
+
+ register IndexPacket
+ *restrict q_indexes;
+
+ register ssize_t
+ x;
+
+ size_t
+ r;
+
+ if (status == MagickFalse)
+ continue;
+ p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width,
+ kernel->height, exception);
+ q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
- r = (image->columns+kernel->width)*offy+offx; /* constant */
+
+ /* offset to origin in 'p'. while 'q' points to it directly */
+ r = virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
min,
max;
- /* Copy input to ouput image for unused channels
+ /* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
- q_indexes[x] = p_indexes[r];
+ SetIndexPixelComponent(q_indexes+x,GetIndexPixelComponent(p_indexes+r));
/* Defaults */
min.red =
result.opacity = QuantumRange - (MagickRealType) p[r].opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
- result.index = (MagickRealType) p_indexes[r];
+ result.index = (MagickRealType) GetIndexPixelComponent(p_indexes+r);
switch (method) {
case ConvolveMorphology:
- /* Set the user defined bias of the weighted average output */
+ /* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
** For more details of Correlation vs Convolution see
** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
- if (((channel & SyncChannels) != 0 ) &&
- (image->matte == MagickTrue))
- { /* Channel has a 'Sync' Flag, and Alpha Channel enabled.
+ k = &kernel->values[ kernel->width*kernel->height-1 ];
+ k_pixels = p;
+ k_indexes = p_indexes;
+ if ( ((channel & SyncChannels) == 0 ) ||
+ (image->matte == MagickFalse) )
+ { /* No 'Sync' involved.
+ ** Convolution is simple greyscale channel operation
+ */
+ for (v=0; v < (ssize_t) kernel->height; v++) {
+ for (u=0; u < (ssize_t) kernel->width; u++, k--) {
+ if ( IsNan(*k) ) continue;
+ result.red += (*k)*k_pixels[u].red;
+ result.green += (*k)*k_pixels[u].green;
+ result.blue += (*k)*k_pixels[u].blue;
+ result.opacity += (*k)*k_pixels[u].opacity;
+ if ( image->colorspace == CMYKColorspace)
+ result.index += (*k)*GetIndexPixelComponent(k_indexes+u);
+ }
+ k_pixels += virt_width;
+ k_indexes += virt_width;
+ }
+ if ((channel & RedChannel) != 0)
+ SetRedPixelComponent(q,ClampToQuantum(result.red));
+ if ((channel & GreenChannel) != 0)
+ SetGreenPixelComponent(q,ClampToQuantum(result.green));
+ if ((channel & BlueChannel) != 0)
+ SetBluePixelComponent(q,ClampToQuantum(result.blue));
+ if ((channel & OpacityChannel) != 0
+ && image->matte == MagickTrue )
+ SetOpacityPixelComponent(q,ClampToQuantum(result.opacity));
+ if ((channel & IndexChannel) != 0
+ && image->colorspace == CMYKColorspace)
+ SetIndexPixelComponent(q_indexes+x,ClampToQuantum(
+ result.index));
+ }
+ else
+ { /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
MagickRealType
- alpha, /* color channel weighting : kernel*alpha */
- gamma; /* divisor, sum of weighting values */
+ alpha, /* alpha weighting of colors : kernel*alpha */
+ gamma; /* divisor, sum of color weighting values */
gamma=0.0;
- k = &kernel->values[ kernel->width*kernel->height-1 ];
- k_pixels = p;
- k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
result.red += alpha*k_pixels[u].red;
result.green += alpha*k_pixels[u].green;
result.blue += alpha*k_pixels[u].blue;
- result.opacity += (*k)*(QuantumRange-k_pixels[u].opacity);
+ result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
- result.index += alpha*k_indexes[u];
+ result.index+=alpha*GetIndexPixelComponent(k_indexes+u);
}
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
+ k_pixels += virt_width;
+ k_indexes += virt_width;
}
+ /* Sync'ed channels, all channels are modified */
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
- result.red *= gamma;
- result.green *= gamma;
- result.blue *= gamma;
- result.opacity *= gamma;
- result.index *= gamma;
- }
- else
- {
- /* No 'Sync' flag, or no Alpha involved.
- ** Convolution is simple individual channel weigthed sum.
- */
- k = &kernel->values[ kernel->width*kernel->height-1 ];
- k_pixels = p;
- k_indexes = p_indexes;
- for (v=0; v < (ssize_t) kernel->height; v++) {
- for (u=0; u < (ssize_t) kernel->width; u++, k--) {
- if ( IsNan(*k) ) continue;
- result.red += (*k)*k_pixels[u].red;
- result.green += (*k)*k_pixels[u].green;
- result.blue += (*k)*k_pixels[u].blue;
- result.opacity += (*k)*(QuantumRange-k_pixels[u].opacity);
- if ( image->colorspace == CMYKColorspace)
- result.index += (*k)*k_indexes[u];
- }
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
- }
+ SetRedPixelComponent(q,ClampToQuantum(gamma*result.red));
+ SetGreenPixelComponent(q,ClampToQuantum(gamma*result.green));
+ SetBluePixelComponent(q,ClampToQuantum(gamma*result.blue));
+ SetOpacityPixelComponent(q,ClampToQuantum(result.opacity));
+ if (image->colorspace == CMYKColorspace)
+ SetIndexPixelComponent(q_indexes+x,ClampToQuantum(gamma*
+ result.index));
}
break;
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
- Minimize(min.index, (double) k_indexes[u]);
+ Minimize(min.index,(double) GetIndexPixelComponent(
+ k_indexes+u));
}
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
+ k_pixels += virt_width;
+ k_indexes += virt_width;
}
break;
-
case DilateMorphology:
/* Maximum Value within kernel neighbourhood
**
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
- Maximize(max.index, (double) k_indexes[u]);
+ Maximize(max.index, (double) GetIndexPixelComponent(
+ k_indexes+u));
}
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
+ k_pixels += virt_width;
+ k_indexes += virt_width;
}
break;
** neighbourhoods, 0.0 for background, and 1.0 for foreground
** with either Nan or 0.5 values for don't care.
**
- ** Note that this can produce negative results, though really
- ** only a positive match has any real value.
+ ** Note that this will never produce a meaningless negative
+ ** result. Such results can cause Thinning/Thicken to not work
+ ** correctly when used against a greyscale image.
*/
k = kernel->values;
k_pixels = p;
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
- Minimize(min.index, (double) k_indexes[u]);
+ Minimize(min.index,(double) GetIndexPixelComponent(
+ k_indexes+u));
}
else if ( (*k) < 0.3 )
{ /* maximum of background pixels */
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
- Maximize(max.index, (double) k_indexes[u]);
+ Maximize(max.index, (double) GetIndexPixelComponent(
+ k_indexes+u));
}
}
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
+ k_pixels += virt_width;
+ k_indexes += virt_width;
}
- /* Pattern Match only if min fg larger than min bg pixels */
+ /* Pattern Match if difference is positive */
min.red -= max.red; Maximize( min.red, 0.0 );
min.green -= max.green; Maximize( min.green, 0.0 );
min.blue -= max.blue; Maximize( min.blue, 0.0 );
/* Select Pixel with Minimum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
- ** take into account the moderating effect of teh alpha channel
+ ** take into account the moderating effect of the alpha channel
** on the intensity.
**
** NOTE that the kernel is not reflected for this operation!
result.red = 1.0;
}
}
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
+ k_pixels += virt_width;
+ k_indexes += virt_width;
+ }
+ break;
+
+ case DilateIntensityMorphology:
+ /* Select Pixel with Maximum Intensity within kernel neighbourhood
+ **
+ ** WARNING: the intensity test fails for CMYK and does not
+ ** take into account the moderating effect of the alpha channel
+ ** on the intensity (yet).
+ **
+ ** NOTE for correct working of this operation for asymetrical
+ ** kernels, the kernel needs to be applied in its reflected form.
+ ** That is its values needs to be reversed.
+ */
+ k = &kernel->values[ kernel->width*kernel->height-1 ];
+ k_pixels = p;
+ k_indexes = p_indexes;
+ for (v=0; v < (ssize_t) kernel->height; v++) {
+ for (u=0; u < (ssize_t) kernel->width; u++, k--) {
+ if ( IsNan(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
+ if ( result.red == 0.0 ||
+ PixelIntensity(&(k_pixels[u])) > PixelIntensity(q) ) {
+ /* copy the whole pixel - no channel selection */
+ *q = k_pixels[u];
+ if ( result.red > 0.0 ) changed++;
+ result.red = 1.0;
+ }
+ }
+ k_pixels += virt_width;
+ k_indexes += virt_width;
+ }
+ break;
+#if 0
+ This code has been obsoleted by the MorphologyPrimitiveDirect() function.
+ However it is still (almost) correct coding for Grayscale Morphology.
+ That is...
+
+ GrayErode is equivalent but with kernel values subtracted from pixels
+ without the kernel rotation
+ GreyDilate is equivalent but using Maximum() instead of Minimum()
+ useing kernel rotation
+
+ case DistanceMorphology:
+ /* Add kernel Value and select the minimum value found.
+ ** The result is a iterative distance from edge of image shape.
+ **
+ ** All Distance Kernels are symetrical, but that may not always
+ ** be the case. For example how about a distance from left edges?
+ ** To work correctly with asymetrical kernels the reflected kernel
+ ** needs to be applied.
+ */
+ k = &kernel->values[ kernel->width*kernel->height-1 ];
+ k_pixels = p;
+ k_indexes = p_indexes;
+ for (v=0; v < (ssize_t) kernel->height; v++) {
+ for (u=0; u < (ssize_t) kernel->width; u++, k--) {
+ if ( IsNan(*k) ) continue;
+ Minimize(result.red, (*k)+k_pixels[u].red);
+ Minimize(result.green, (*k)+k_pixels[u].green);
+ Minimize(result.blue, (*k)+k_pixels[u].blue);
+ Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
+ if ( image->colorspace == CMYKColorspace)
+ Minimize(result.index,(*k)+GetIndexPixelComponent(
+ k_indexes+u));
+ }
+ k_pixels += virt_width;
+ k_indexes += virt_width;
+ }
+ break;
+#endif
+ case UndefinedMorphology:
+ default:
+ break; /* Do nothing */
+ }
+ /* Final mathematics of results (combine with original image?)
+ **
+ ** NOTE: Difference Morphology operators Edge* and *Hat could also
+ ** be done here but works better with iteration as a image difference
+ ** in the controling function (below). Thicken and Thinning however
+ ** should be done here so thay can be iterated correctly.
+ */
+ switch ( method ) {
+ case HitAndMissMorphology:
+ case ErodeMorphology:
+ result = min; /* minimum of neighbourhood */
+ break;
+ case DilateMorphology:
+ result = max; /* maximum of neighbourhood */
+ break;
+ case ThinningMorphology:
+ /* subtract pattern match from original */
+ result.red -= min.red;
+ result.green -= min.green;
+ result.blue -= min.blue;
+ result.opacity -= min.opacity;
+ result.index -= min.index;
+ break;
+ case ThickenMorphology:
+ /* Add the pattern matchs to the original */
+ result.red += min.red;
+ result.green += min.green;
+ result.blue += min.blue;
+ result.opacity += min.opacity;
+ result.index += min.index;
+ break;
+ default:
+ /* result directly calculated or assigned */
+ break;
+ }
+ /* Assign the resulting pixel values - Clamping Result */
+ switch ( method ) {
+ case UndefinedMorphology:
+ case ConvolveMorphology:
+ case DilateIntensityMorphology:
+ case ErodeIntensityMorphology:
+ break; /* full pixel was directly assigned - not a channel method */
+ default:
+ if ((channel & RedChannel) != 0)
+ SetRedPixelComponent(q,ClampToQuantum(result.red));
+ if ((channel & GreenChannel) != 0)
+ SetGreenPixelComponent(q,ClampToQuantum(result.green));
+ if ((channel & BlueChannel) != 0)
+ SetBluePixelComponent(q,ClampToQuantum(result.blue));
+ if ((channel & OpacityChannel) != 0
+ && image->matte == MagickTrue )
+ SetOpacityPixelComponent(q,ClampToQuantum(QuantumRange-result.opacity));
+ if ((channel & IndexChannel) != 0
+ && image->colorspace == CMYKColorspace)
+ SetIndexPixelComponent(q_indexes+x,ClampToQuantum(result.index));
+ break;
+ }
+ /* Count up changed pixels */
+ if ( ( p[r].red != GetRedPixelComponent(q) )
+ || ( p[r].green != GetGreenPixelComponent(q) )
+ || ( p[r].blue != GetBluePixelComponent(q) )
+ || ( p[r].opacity != GetOpacityPixelComponent(q) )
+ || ( image->colorspace == CMYKColorspace &&
+ GetIndexPixelComponent(p_indexes+r) != GetIndexPixelComponent(q_indexes+x) ) )
+ changed++; /* The pixel was changed in some way! */
+ p++;
+ q++;
+ } /* x */
+ if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
+ status=MagickFalse;
+ if (image->progress_monitor != (MagickProgressMonitor) NULL)
+ {
+ MagickBooleanType
+ proceed;
+
+#if defined(MAGICKCORE_OPENMP_SUPPORT)
+ #pragma omp critical (MagickCore_MorphologyImage)
+#endif
+ proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
+ if (proceed == MagickFalse)
+ status=MagickFalse;
+ }
+ } /* y */
+ q_view=DestroyCacheView(q_view);
+ p_view=DestroyCacheView(p_view);
+ return(status ? (ssize_t)changed : -1);
+}
+
+/* This is almost identical to the MorphologyPrimative() function above,
+** but will apply the primitive directly to the image in two passes.
+**
+** That is after each row is 'Sync'ed' into the image, the next row will
+** make use of those values as part of the calculation of the next row.
+** It then repeats, but going in the oppisite (bottom-up) direction.
+**
+** Because of this 'iterative' handling this function can not make use
+** of multi-threaded, parellel processing.
+*/
+static ssize_t MorphologyPrimitiveDirect(Image *image,
+ const MorphologyMethod method, const ChannelType channel,
+ const KernelInfo *kernel,ExceptionInfo *exception)
+{
+ CacheView
+ *auth_view,
+ *virt_view;
+
+ MagickBooleanType
+ status;
+
+ MagickOffsetType
+ progress;
+
+ ssize_t
+ y, offx, offy;
+
+ size_t
+ virt_width,
+ changed;
+
+ status=MagickTrue;
+ changed=0;
+ progress=0;
+
+ assert(image != (Image *) NULL);
+ assert(image->signature == MagickSignature);
+ assert(kernel != (KernelInfo *) NULL);
+ assert(kernel->signature == MagickSignature);
+ assert(exception != (ExceptionInfo *) NULL);
+ assert(exception->signature == MagickSignature);
+
+ /* Some methods (including convolve) needs use a reflected kernel.
+ * Adjust 'origin' offsets to loop though kernel as a reflection.
+ */
+ offx = kernel->x;
+ offy = kernel->y;
+ switch(method) {
+ case DistanceMorphology:
+ case VoronoiMorphology:
+ /* kernel needs to used with reflection about origin */
+ offx = (ssize_t) kernel->width-offx-1;
+ offy = (ssize_t) kernel->height-offy-1;
+ break;
+#if 0
+ case ?????Morphology:
+ /* kernel is used as is, without reflection */
+ break;
+#endif
+ default:
+ assert("Not a PrimativeDirect Morphology Method" != (char *) NULL);
+ break;
+ }
+
+ /* DO NOT THREAD THIS CODE! */
+ /* two views into same image (virtual, and actual) */
+ virt_view=AcquireCacheView(image);
+ auth_view=AcquireCacheView(image);
+ virt_width=image->columns+kernel->width-1;
+
+ for (y=0; y < (ssize_t) image->rows; y++)
+ {
+ register const PixelPacket
+ *restrict p;
+
+ register const IndexPacket
+ *restrict p_indexes;
+
+ register PixelPacket
+ *restrict q;
+
+ register IndexPacket
+ *restrict q_indexes;
+
+ register ssize_t
+ x;
+
+ ssize_t
+ r;
+
+ /* NOTE read virtual pixels, and authentic pixels, from the same image!
+ ** we read using virtual to get virtual pixel handling, but write back
+ ** into the same image.
+ **
+ ** Only top half of kernel is processed as we do a single pass downward
+ ** through the image iterating the distance function as we go.
+ */
+ if (status == MagickFalse)
+ break;
+ p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1,
+ exception);
+ q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
+ exception);
+ if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
+ status=MagickFalse;
+ if (status == MagickFalse)
+ break;
+ p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
+ q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
+
+ /* offset to origin in 'p'. while 'q' points to it directly */
+ r = (ssize_t) virt_width*offy + offx;
+
+ for (x=0; x < (ssize_t) image->columns; x++)
+ {
+ ssize_t
+ v;
+
+ register ssize_t
+ u;
+
+ register const double
+ *restrict k;
+
+ register const PixelPacket
+ *restrict k_pixels;
+
+ register const IndexPacket
+ *restrict k_indexes;
+
+ MagickPixelPacket
+ result;
+
+ /* Starting Defaults */
+ GetMagickPixelPacket(image,&result);
+ SetMagickPixelPacket(image,q,q_indexes,&result);
+ if ( method != VoronoiMorphology )
+ result.opacity = QuantumRange - result.opacity;
+
+ switch ( method ) {
+ case DistanceMorphology:
+ /* Add kernel Value and select the minimum value found. */
+ k = &kernel->values[ kernel->width*kernel->height-1 ];
+ k_pixels = p;
+ k_indexes = p_indexes;
+ for (v=0; v <= (ssize_t) offy; v++) {
+ for (u=0; u < (ssize_t) kernel->width; u++, k--) {
+ if ( IsNan(*k) ) continue;
+ Minimize(result.red, (*k)+k_pixels[u].red);
+ Minimize(result.green, (*k)+k_pixels[u].green);
+ Minimize(result.blue, (*k)+k_pixels[u].blue);
+ Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
+ if ( image->colorspace == CMYKColorspace)
+ Minimize(result.index, (*k)+GetIndexPixelComponent(k_indexes+u));
+ }
+ k_pixels += virt_width;
+ k_indexes += virt_width;
}
+ /* repeat with the just processed pixels of this row */
+ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
+ k_pixels = q-offx;
+ k_indexes = q_indexes-offx;
+ for (u=0; u < (ssize_t) offx; u++, k--) {
+ if ( x+u-offx < 0 ) continue; /* off the edge! */
+ if ( IsNan(*k) ) continue;
+ Minimize(result.red, (*k)+k_pixels[u].red);
+ Minimize(result.green, (*k)+k_pixels[u].green);
+ Minimize(result.blue, (*k)+k_pixels[u].blue);
+ Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
+ if ( image->colorspace == CMYKColorspace)
+ Minimize(result.index, (*k)+GetIndexPixelComponent(k_indexes+u));
+ }
break;
-
- case DilateIntensityMorphology:
- /* Select Pixel with Maximum Intensity within kernel neighbourhood
- **
- ** WARNING: the intensity test fails for CMYK and does not
- ** take into account the moderating effect of the alpha channel
- ** on the intensity (yet).
+ case VoronoiMorphology:
+ /* Apply Distance to 'Matte' channel, coping the closest color.
**
- ** NOTE for correct working of this operation for asymetrical
- ** kernels, the kernel needs to be applied in its reflected form.
- ** That is its values needs to be reversed.
+ ** This is experimental, and realy the 'alpha' component should
+ ** be completely separate 'masking' channel.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
- for (v=0; v < (ssize_t) kernel->height; v++) {
+ for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
- if ( IsNan(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
- if ( result.red == 0.0 ||
- PixelIntensity(&(k_pixels[u])) > PixelIntensity(q) ) {
- /* copy the whole pixel - no channel selection */
- *q = k_pixels[u];
- if ( result.red > 0.0 ) changed++;
- result.red = 1.0;
- }
+ if ( IsNan(*k) ) continue;
+ if( result.opacity > (*k)+k_pixels[u].opacity )
+ {
+ SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
+ &result);
+ result.opacity += *k;
+ }
}
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
+ k_pixels += virt_width;
+ k_indexes += virt_width;
}
+ /* repeat with the just processed pixels of this row */
+ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
+ k_pixels = q-offx;
+ k_indexes = q_indexes-offx;
+ for (u=0; u < (ssize_t) offx; u++, k--) {
+ if ( x+u-offx < 0 ) continue; /* off the edge! */
+ if ( IsNan(*k) ) continue;
+ if( result.opacity > (*k)+k_pixels[u].opacity )
+ {
+ SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
+ &result);
+ result.opacity += *k;
+ }
+ }
break;
+ default:
+ /* result directly calculated or assigned */
+ break;
+ }
+ /* Assign the resulting pixel values - Clamping Result */
+ switch ( method ) {
+ case VoronoiMorphology:
+ SetPixelPacket(image,&result,q,q_indexes);
+ break;
+ default:
+ if ((channel & RedChannel) != 0)
+ SetRedPixelComponent(q,ClampToQuantum(result.red));
+ if ((channel & GreenChannel) != 0)
+ SetGreenPixelComponent(q,ClampToQuantum(result.green));
+ if ((channel & BlueChannel) != 0)
+ SetBluePixelComponent(q,ClampToQuantum(result.blue));
+ if ((channel & OpacityChannel) != 0 && image->matte == MagickTrue )
+ SetOpacityPixelComponent(q,ClampToQuantum(QuantumRange-result.opacity));
+ if ((channel & IndexChannel) != 0
+ && image->colorspace == CMYKColorspace)
+ SetIndexPixelComponent(q_indexes+x,ClampToQuantum(result.index));
+ break;
+ }
+ /* Count up changed pixels */
+ if ( ( p[r].red != GetRedPixelComponent(q) )
+ || ( p[r].green != GetGreenPixelComponent(q) )
+ || ( p[r].blue != GetBluePixelComponent(q) )
+ || ( p[r].opacity != GetOpacityPixelComponent(q) )
+ || ( image->colorspace == CMYKColorspace &&
+ GetIndexPixelComponent(p_indexes+r) != GetIndexPixelComponent(q_indexes+x) ) )
+ changed++; /* The pixel was changed in some way! */
+
+ p++; /* increment pixel buffers */
+ q++;
+ } /* x */
+
+ if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
+ status=MagickFalse;
+ if (image->progress_monitor != (MagickProgressMonitor) NULL)
+ if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
+ == MagickFalse )
+ status=MagickFalse;
+
+ } /* y */
+
+ /* Do the reversed pass through the image */
+ for (y=(ssize_t)image->rows-1; y >= 0; y--)
+ {
+ register const PixelPacket
+ *restrict p;
+
+ register const IndexPacket
+ *restrict p_indexes;
+
+ register PixelPacket
+ *restrict q;
+
+ register IndexPacket
+ *restrict q_indexes;
+
+ register ssize_t
+ x;
+
+ ssize_t
+ r;
+
+ if (status == MagickFalse)
+ break;
+ /* NOTE read virtual pixels, and authentic pixels, from the same image!
+ ** we read using virtual to get virtual pixel handling, but write back
+ ** into the same image.
+ **
+ ** Only the bottom half of the kernel will be processes as we
+ ** up the image.
+ */
+ p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1,
+ exception);
+ q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
+ exception);
+ if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
+ status=MagickFalse;
+ if (status == MagickFalse)
+ break;
+ p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
+ q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
+
+ /* adjust positions to end of row */
+ p += image->columns-1;
+ q += image->columns-1;
+
+ /* offset to origin in 'p'. while 'q' points to it directly */
+ r = offx;
+ for (x=(ssize_t)image->columns-1; x >= 0; x--)
+ {
+ ssize_t
+ v;
+
+ register ssize_t
+ u;
+
+ register const double
+ *restrict k;
+
+ register const PixelPacket
+ *restrict k_pixels;
+
+ register const IndexPacket
+ *restrict k_indexes;
+
+ MagickPixelPacket
+ result;
+ /* Default - previously modified pixel */
+ GetMagickPixelPacket(image,&result);
+ SetMagickPixelPacket(image,q,q_indexes,&result);
+ if ( method != VoronoiMorphology )
+ result.opacity = QuantumRange - result.opacity;
+
+ switch ( method ) {
case DistanceMorphology:
- /* Add kernel Value and select the minimum value found.
- ** The result is a iterative distance from edge of image shape.
- **
- ** All Distance Kernels are symetrical, but that may not always
- ** be the case. For example how about a distance from left edges?
- ** To work correctly with asymetrical kernels the reflected kernel
- ** needs to be applied.
- **
- ** Actually this is really a GreyErode with a negative kernel!
- **
- */
- k = &kernel->values[ kernel->width*kernel->height-1 ];
+ /* Add kernel Value and select the minimum value found. */
+ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes;
- for (v=0; v < (ssize_t) kernel->height; v++) {
+ for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
- Minimize(result.index, (*k)+k_indexes[u]);
+ Minimize(result.index,(*k)+GetIndexPixelComponent(k_indexes+u));
+ }
+ k_pixels += virt_width;
+ k_indexes += virt_width;
+ }
+ /* repeat with the just processed pixels of this row */
+ k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
+ k_pixels = q-offx;
+ k_indexes = q_indexes-offx;
+ for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
+ if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
+ if ( IsNan(*k) ) continue;
+ Minimize(result.red, (*k)+k_pixels[u].red);
+ Minimize(result.green, (*k)+k_pixels[u].green);
+ Minimize(result.blue, (*k)+k_pixels[u].blue);
+ Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
+ if ( image->colorspace == CMYKColorspace)
+ Minimize(result.index, (*k)+GetIndexPixelComponent(k_indexes+u));
+ }
+ break;
+ case VoronoiMorphology:
+ /* Apply Distance to 'Matte' channel, coping the closest color.
+ **
+ ** This is experimental, and realy the 'alpha' component should
+ ** be completely separate 'masking' channel.
+ */
+ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
+ k_pixels = p;
+ k_indexes = p_indexes;
+ for (v=offy; v < (ssize_t) kernel->height; v++) {
+ for (u=0; u < (ssize_t) kernel->width; u++, k--) {
+ if ( IsNan(*k) ) continue;
+ if( result.opacity > (*k)+k_pixels[u].opacity )
+ {
+ SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
+ &result);
+ result.opacity += *k;
+ }
}
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
+ k_pixels += virt_width;
+ k_indexes += virt_width;
}
+ /* repeat with the just processed pixels of this row */
+ k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
+ k_pixels = q-offx;
+ k_indexes = q_indexes-offx;
+ for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
+ if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
+ if ( IsNan(*k) ) continue;
+ if( result.opacity > (*k)+k_pixels[u].opacity )
+ {
+ SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
+ &result);
+ result.opacity += *k;
+ }
+ }
break;
-
- case UndefinedMorphology:
- default:
- break; /* Do nothing */
- }
- /* Final mathematics of results (combine with original image?)
- **
- ** NOTE: Difference Morphology operators Edge* and *Hat could also
- ** be done here but works better with iteration as a image difference
- ** in the controling function (below). Thicken and Thinning however
- ** should be done here so thay can be iterated correctly.
- */
- switch ( method ) {
- case HitAndMissMorphology:
- case ErodeMorphology:
- result = min; /* minimum of neighbourhood */
- break;
- case DilateMorphology:
- result = max; /* maximum of neighbourhood */
- break;
- case ThinningMorphology:
- /* subtract pattern match from original */
- result.red -= min.red;
- result.green -= min.green;
- result.blue -= min.blue;
- result.opacity -= min.opacity;
- result.index -= min.index;
- break;
- case ThickenMorphology:
- /* Union with original image (maximize) - or should this be + */
- Maximize( result.red, min.red );
- Maximize( result.green, min.green );
- Maximize( result.blue, min.blue );
- Maximize( result.opacity, min.opacity );
- Maximize( result.index, min.index );
- break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
- case UndefinedMorphology:
- case DilateIntensityMorphology:
- case ErodeIntensityMorphology:
- break; /* full pixel was directly assigned - not a channel method */
+ case VoronoiMorphology:
+ SetPixelPacket(image,&result,q,q_indexes);
+ break;
default:
if ((channel & RedChannel) != 0)
- q->red = ClampToQuantum(result.red);
+ SetRedPixelComponent(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
- q->green = ClampToQuantum(result.green);
+ SetGreenPixelComponent(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
- q->blue = ClampToQuantum(result.blue);
- if ((channel & OpacityChannel) != 0
- && image->matte == MagickTrue )
- q->opacity = ClampToQuantum(QuantumRange-result.opacity);
+ SetBluePixelComponent(q,ClampToQuantum(result.blue));
+ if ((channel & OpacityChannel) != 0 && image->matte == MagickTrue )
+ SetOpacityPixelComponent(q,ClampToQuantum(QuantumRange-result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
- q_indexes[x] = ClampToQuantum(result.index);
+ SetIndexPixelComponent(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
- if ( ( p[r].red != q->red )
- || ( p[r].green != q->green )
- || ( p[r].blue != q->blue )
- || ( p[r].opacity != q->opacity )
+ if ( ( p[r].red != GetRedPixelComponent(q) )
+ || ( p[r].green != GetGreenPixelComponent(q) )
+ || ( p[r].blue != GetBluePixelComponent(q) )
+ || ( p[r].opacity != GetOpacityPixelComponent(q) )
|| ( image->colorspace == CMYKColorspace &&
- p_indexes[r] != q_indexes[x] ) )
- changed++; /* The pixel had some value changed! */
- p++;
- q++;
+ GetIndexPixelComponent(p_indexes+r) != GetIndexPixelComponent(q_indexes+x) ) )
+ changed++; /* The pixel was changed in some way! */
+
+ p--; /* go backward through pixel buffers */
+ q--;
} /* x */
- sync=SyncCacheViewAuthenticPixels(q_view,exception);
- if (sync == MagickFalse)
+ if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
- {
- MagickBooleanType
- proceed;
+ if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
+ == MagickFalse )
+ status=MagickFalse;
-#if defined(MAGICKCORE_OPENMP_SUPPORT)
- #pragma omp critical (MagickCore_MorphologyImage)
-#endif
- proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
- if (proceed == MagickFalse)
- status=MagickFalse;
- }
} /* y */
- result_image->type=image->type;
- q_view=DestroyCacheView(q_view);
- p_view=DestroyCacheView(p_view);
- return(status ? (size_t) changed : 0);
-}
+ auth_view=DestroyCacheView(auth_view);
+ virt_view=DestroyCacheView(virt_view);
+ return(status ? (ssize_t) changed : -1);
+}
+/* Apply a Morphology by calling theabove low level primitive application
+** functions. This function handles any iteration loops, composition or
+** re-iteration of results, and compound morphology methods that is based
+** on multiple low-level (staged) morphology methods.
+**
+** Basically this provides the complex grue between the requested morphology
+** method and raw low-level implementation (above).
+*/
MagickExport Image *MorphologyApply(const Image *image, const ChannelType
channel,const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,
const double bias, ExceptionInfo *exception)
{
+ CompositeOperator
+ curr_compose;
+
Image
*curr_image, /* Image we are working with or iterating */
- *work_image, /* secondary image for primative iteration */
+ *work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
*this_kernel; /* the kernel being applied */
MorphologyMethod
- primative; /* the current morphology primative being applied */
+ primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
+ special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
- method_loop, /* Loop 1: number of compound method iterations */
+ method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
- stage_loop, /* Loop 3: primative loop for compound morphology */
- stage_limit, /* how many primatives in this compound */
- kernel_loop, /* Loop 4: iterate the kernel (basic morphology) */
+ stage_loop, /* Loop 3: primitive loop for compound morphology */
+ stage_limit, /* how many primitives are in this compound */
+ kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
- count, /* total count of primative steps applied */
- changed, /* number pixels changed by last primative operation */
+ count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
+ ssize_t
+ changed; /* number pixels changed by last primitive operation */
+
char
v_info[80];
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
- count = 0; /* number of low-level morphology primatives performed */
+ count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *)NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
- kernel_limit = image->columns > image->rows ? image->columns : image->rows;
+ kernel_limit = image->columns>image->rows ? image->columns : image->rows;
- verbose = ( GetImageArtifact(image,"verbose") != (const char *) NULL ) ?
- MagickTrue : MagickFalse;
+ verbose = IsMagickTrue(GetImageArtifact(image,"verbose"));
/* initialise for cleanup */
curr_image = (Image *) image;
+ curr_compose = image->compose;
+ (void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
- * + how many primatives make up the compound morphology
+ * + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
- stage_limit = 1; /* assume method is not a compount */
+ stage_limit = 1; /* assume method is not a compound */
+ special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
- case SmoothMorphology: /* 4 primative compound morphology */
+ case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
- case OpenMorphology: /* 2 primative compound morphology */
+ case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
- kernel_limit = 1; /* no method or kernel iteration */
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
- break;
+ /* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
- method_limit = kernel_limit; /* iterate method with each kernel */
+ method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
+ break;
case DistanceMorphology:
- rslt_compose = NoCompositeOp; /* Re-iterate with multiple kernels */
+ case VoronoiMorphology:
+ special = MagickTrue;
break;
default:
break;
}
+ /* Apply special methods with special requirments
+ ** For example, single run only, or post-processing requirements
+ */
+ if ( special == MagickTrue )
+ {
+ rslt_image=CloneImage(image,0,0,MagickTrue,exception);
+ if (rslt_image == (Image *) NULL)
+ goto error_cleanup;
+ if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse)
+ {
+ InheritException(exception,&rslt_image->exception);
+ goto error_cleanup;
+ }
+
+ changed = MorphologyPrimitiveDirect(rslt_image, method,
+ channel, kernel, exception);
+
+ if ( verbose == MagickTrue )
+ (void) fprintf(stderr, "%s:%.20g.%.20g #%.20g => Changed %.20g\n",
+ CommandOptionToMnemonic(MagickMorphologyOptions, method),
+ 1.0,0.0,1.0, (double) changed);
+
+ if ( changed < 0 )
+ goto error_cleanup;
+
+ if ( method == VoronoiMorphology ) {
+ /* Preserve the alpha channel of input image - but turned off */
+ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
+ (void) CompositeImageChannel(rslt_image, DefaultChannels,
+ CopyOpacityCompositeOp, image, 0, 0);
+ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
+ }
+ goto exit_cleanup;
+ }
+
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
- /* Some methods require a reflected kernel to use with primatives.
+ /* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
break;
}
+ /* Loops around more primitive morpholgy methods
+ ** erose, dilate, open, close, smooth, edge, etc...
+ */
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
- /* Select primative morphology for this stage of compound method */
+ /* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
- primative = method; /* Assume method is a primative */
+ primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
- primative = ErodeMorphology;
+ primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
- primative = DilateMorphology;
+ primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
- primative = ErodeMorphology;
+ primitive = ErodeMorphology;
if ( stage_loop == 2 )
- primative = DilateMorphology;
+ primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
- primative = ErodeIntensityMorphology;
+ primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
- primative = DilateIntensityMorphology;
+ primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
- primative = DilateMorphology;
+ primitive = DilateMorphology;
if ( stage_loop == 2 )
- primative = ErodeMorphology;
+ primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
- primative = DilateIntensityMorphology;
+ primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
- primative = ErodeIntensityMorphology;
+ primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
- primative = ErodeMorphology;
+ primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
- primative = DilateMorphology;
+ primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
- primative = DilateMorphology;
+ primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
- primative = ErodeMorphology;
+ primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
- primative = DilateMorphology;
+ primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
- primative = ErodeMorphology;
+ primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
- primative = ConvolveMorphology;
+ primitive = ConvolveMorphology;
break;
default:
break;
/* Extra information for debugging compound operations */
if ( verbose == MagickTrue ) {
if ( stage_limit > 1 )
- (void) FormatMagickString(v_info, MaxTextExtent, "%s:%lu.%lu -> ",
- MagickOptionToMnemonic(MagickMorphologyOptions, method),
- (unsigned long) method_loop,(unsigned long) stage_loop);
- else if ( primative != method )
- (void) FormatMagickString(v_info, MaxTextExtent, "%s:%lu -> ",
- MagickOptionToMnemonic(MagickMorphologyOptions, method),
- (unsigned long) method_loop);
+ (void) FormatMagickString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ",
+ CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
+ method_loop,(double) stage_loop);
+ else if ( primitive != method )
+ (void) FormatMagickString(v_info, MaxTextExtent, "%s:%.20g -> ",
+ CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
+ method_loop);
else
v_info[0] = '\0';
}
- /* Loop 4: Iterate the kernel with primative */
+ /* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
- /* Create a destination image, if not yet defined */
+ /* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
InheritException(exception,&work_image->exception);
goto error_cleanup;
}
+ /* work_image->type=image->type; ??? */
}
- /* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
+ /* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
- changed = MorphologyPrimitive(curr_image, work_image, primative,
- channel, this_kernel, bias, exception);
- kernel_changed += changed;
- method_changed += changed;
+ changed = MorphologyPrimitive(curr_image, work_image, primitive,
+ channel, this_kernel, bias, exception);
if ( verbose == MagickTrue ) {
if ( kernel_loop > 1 )
fprintf(stderr, "\n"); /* add end-of-line from previous */
- fprintf(stderr, "%s%s%s:%lu.%lu #%lu => Changed %lu", v_info,
- MagickOptionToMnemonic(MagickMorphologyOptions, primative),
- ( this_kernel == rflt_kernel ) ? "*" : "",
- (unsigned long) method_loop+kernel_loop-1,(unsigned long)
- kernel_number,(unsigned long) count,(unsigned long) changed);
+ (void) fprintf(stderr, "%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
+ v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
+ primitive),(this_kernel == rflt_kernel ) ? "*" : "",
+ (double) (method_loop+kernel_loop-1),(double) kernel_number,
+ (double) count,(double) changed);
}
+ if ( changed < 0 )
+ goto error_cleanup;
+ kernel_changed += changed;
+ method_changed += changed;
+
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
- } /* End Loop 4: Iterate the kernel with primative */
+ } /* End Loop 4: Iterate the kernel with primitive */
- if ( verbose == MagickTrue && kernel_changed != changed )
- fprintf(stderr, " Total %lu",(unsigned long) kernel_changed);
+ if ( verbose == MagickTrue && kernel_changed != (size_t)changed )
+ fprintf(stderr, " Total %.20g",(double) kernel_changed);
if ( verbose == MagickTrue && stage_loop < stage_limit )
fprintf(stderr, "\n"); /* add end-of-line before looping */
case BottomHatMorphology:
if ( verbose == MagickTrue )
fprintf(stderr, "\n%s: Difference with original image",
- MagickOptionToMnemonic(MagickMorphologyOptions, method) );
+ CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, image, 0, 0);
case EdgeMorphology:
if ( verbose == MagickTrue )
fprintf(stderr, "\n%s: Difference of Dilate and Erode",
- MagickOptionToMnemonic(MagickMorphologyOptions, method) );
+ CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, save_image, 0, 0);
curr_image = (Image *) image; /* continue with original image */
}
else
- { /* add the new 'current' result to the composition
+ { /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
- ** Turn off SVG composition 'alpha blending'.
+ ** IE: Turn off SVG composition 'alpha blending'.
*/
if ( verbose == MagickTrue )
fprintf(stderr, " (compose \"%s\")",
- MagickOptionToMnemonic(MagickComposeOptions, rslt_compose) );
+ CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImageChannel(rslt_image,
(ChannelType) (channel & ~SyncChannels), rslt_compose,
curr_image, 0, 0);
+ curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if ( verbose == MagickTrue )
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
- if ( curr_image != (Image *) NULL &&
- curr_image != rslt_image &&
- curr_image != image )
- curr_image = DestroyImage(curr_image);
+ if ( curr_image == rslt_image )
+ curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
- if ( curr_image != (Image *) NULL &&
- curr_image != rslt_image &&
- curr_image != image )
+ if ( curr_image == rslt_image || curr_image == image )
+ curr_image = (Image *) NULL;
+ if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
+
\f
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
const ChannelType channel,const MorphologyMethod method,
const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception)
{
- const char
- *artifact;
-
KernelInfo
*curr_kernel;
curr_kernel = (KernelInfo *) kernel;
if ( method == ConvolveMorphology || method == CorrelateMorphology )
{
+ const char
+ *artifact;
artifact = GetImageArtifact(image,"convolve:scale");
- if ( artifact != (char *)NULL ) {
+ if ( artifact != (const char *)NULL ) {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL) {
}
/* display the (normalized) kernel via stderr */
- artifact = GetImageArtifact(image,"showkernel");
- if ( artifact == (const char *) NULL)
- artifact = GetImageArtifact(image,"convolve:showkernel");
- if ( artifact == (const char *) NULL)
- artifact = GetImageArtifact(image,"morphology:showkernel");
- if ( artifact != (const char *) NULL)
+ if ( IsMagickTrue(GetImageArtifact(image,"showkernel"))
+ || IsMagickTrue(GetImageArtifact(image,"convolve:showkernel"))
+ || IsMagickTrue(GetImageArtifact(image,"morphology:showkernel")) )
ShowKernelInfo(curr_kernel);
- /* override the default handling of multi-kernel morphology results
- * if 'Undefined' use the default method
- * if 'None' (default for 'Convolve') re-iterate previous result
- * otherwise merge resulting images using compose method given
+ /* Override the default handling of multi-kernel morphology results
+ * If 'Undefined' use the default method
+ * If 'None' (default for 'Convolve') re-iterate previous result
+ * Otherwise merge resulting images using compose method given.
+ * Default for 'HitAndMiss' is 'Lighten'.
*/
- compose = UndefinedCompositeOp; /* use default for method */
- artifact = GetImageArtifact(image,"morphology:compose");
- if ( artifact != (const char *) NULL)
- compose = (CompositeOperator) ParseMagickOption(
+ { const char
+ *artifact;
+ artifact = GetImageArtifact(image,"morphology:compose");
+ compose = UndefinedCompositeOp; /* use default for method */
+ if ( artifact != (const char *) NULL)
+ compose = (CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,artifact);
-
+ }
/* Apply the Morphology */
morphology_image = MorphologyApply(image, channel, method, iterations,
curr_kernel, compose, image->bias, exception);
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
- case DOGKernel:
+ case DoGKernel:
+ case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
- case ManhattenKernel:
+ case ManhattanKernel:
case EuclideanKernel:
return;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
- kernel->x = (size_t) x+1;
- kernel->y = (size_t) y+1;
+ kernel->x = (ssize_t) x+1;
+ kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
- { /* Do a transpose of the image, which results in a 90
- ** degree rotation of a 1 dimentional kernel
+ { /* Do a transpose of a 1 dimentional kernel,
+ ** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
- kernel->x = (size_t) ( -y +(ssize_t) kernel->width-1)/2;
- kernel->y = (size_t) ( +x +(ssize_t) kernel->height-1)/2;
+ kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
+ kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
* performed here, posibily with a linear kernel restriction.
*/
-#if 0
- { /* Do a Flop by reversing each row.
- */
- size_t
- y;
- register ssize_t
- x,r;
- register double
- *k,t;
-
- for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
- for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
- t=k[x], k[x]=k[r], k[r]=t;
-
- kernel->x = kernel->width - kernel->x - 1;
- angle = fmod(angle+180.0, 360.0);
- }
-#endif
return;
}
\f
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
-% The format of the ScaleKernelInfo method is:
+% The format of the ScaleGeometryKernelInfo method is:
%
-% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
-% const MagickStatusType normalize_flags )
+% void ScaleGeometryKernelInfo(KernelInfo *kernel,
+% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
-% values seperatally to those of the negative values, so the kernel will be
+% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
if ( kernel->next != (KernelInfo *) NULL )
fprintf(stderr, " #%lu", (unsigned long) c );
fprintf(stderr, " \"%s",
- MagickOptionToMnemonic(MagickKernelOptions, k->type) );
+ CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) > MagickEpsilon )
fprintf(stderr, "@%lg", k->angle);
fprintf(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long) k->width,
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
-% The resulting effect is to either convert a 'zero-summing' edge detection
-% kernel (such as a "Laplacian", "DOG" or a "LOG") into a 'sharpening'
-% kernel.
-%
-% Alternativally by using a purely positive kernel, and using a negative
-% post-normalizing scaling factor, you can convert a 'blurring' kernel (such
-% as a "Gaussian") into a 'unsharp' kernel.
+% The resulting effect is to convert the defined kernels into blended
+% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%