% January 2010 %
% %
% %
-% Copyright 1999-2010 ImageMagick Studio LLC, a non-profit organization %
+% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/token.h"
+#include "magick/utility.h"
\f
/*
return(kernel);
}
-
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
-% center as origin, this is no ssize_ter the case, and any rectangular kernel
+% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
-% operators A list is defined as a semi-colon seperated list kernels.
+% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
- /* clear flags - for Expanding kernal lists thorugh rotations */
+ /* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification */
kernel->values[i] = nan; /* do not include this value in kernel */
}
else {
- kernel->values[i] = StringToDouble(token);
+ kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
static KernelInfo *ParseKernelName(const char *kernel_string)
{
- KernelInfo
- *kernel;
-
char
token[MaxTextExtent];
- ssize_t
- type;
-
const char
*p,
*end;
+ GeometryInfo
+ args;
+
+ KernelInfo
+ *kernel;
+
MagickStatusType
flags;
- GeometryInfo
- args;
+ ssize_t
+ type;
/* Parse special 'named' kernel */
GetMagickToken(kernel_string,&p,token);
- type=ParseMagickOption(MagickKernelOptions,MagickFalse,token);
+ type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *)NULL); /* not a valid named kernel */
/* special handling of missing values in input string */
switch( type ) {
- case RectangleKernel:
- if ( (flags & WidthValue) == 0 ) /* if no width then */
- args.rho = args.sigma; /* then width = height */
- if ( args.rho < 1.0 ) /* if width too small */
- args.rho = 3; /* then width = 3 */
- if ( args.sigma < 1.0 ) /* if height too small */
- args.sigma = args.rho; /* then height = width */
- if ( (flags & XValue) == 0 ) /* center offset if not defined */
- args.xi = (double)(((ssize_t)args.rho-1)/2);
- if ( (flags & YValue) == 0 )
- args.psi = (double)(((ssize_t)args.sigma-1)/2);
+ /* Shape Kernel Defaults */
+ case UnityKernel:
+ if ( (flags & WidthValue) == 0 )
+ args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
+ case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
- /* If no scale given (a 0 scale is valid! - set it to 1.0 */
if ( (flags & HeightValue) == 0 )
- args.sigma = 1.0;
+ args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
- args.xi = 1.0;
+ args.xi = 1.0; /* Default scale = 1.0, zero is valid */
+ break;
+ case RectangleKernel: /* Rectangle - set size defaults */
+ if ( (flags & WidthValue) == 0 ) /* if no width then */
+ args.rho = args.sigma; /* then width = height */
+ if ( args.rho < 1.0 ) /* if width too small */
+ args.rho = 3; /* then width = 3 */
+ if ( args.sigma < 1.0 ) /* if height too small */
+ args.sigma = args.rho; /* then height = width */
+ if ( (flags & XValue) == 0 ) /* center offset if not defined */
+ args.xi = (double)(((ssize_t)args.rho-1)/2);
+ if ( (flags & YValue) == 0 )
+ args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
+ /* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
+ case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args);
+ if ( kernel == (KernelInfo *) NULL )
+ return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
while ( GetMagickToken(p,NULL,token), *token != '\0' ) {
- /* ignore extra or multiple ';' kernel seperators */
+ /* ignore extra or multiple ';' kernel separators */
if ( *token != ';' ) {
/* tokens starting with alpha is a Named kernel */
% Convolution Kernels
%
% Unity
-% the No-Op kernel, also requivelent to Gaussian of sigma zero.
-% Basically a 3x3 kernel of a 1 surrounded by zeros.
+% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimentional gaussian kernel, as used by -gaussian.
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
-% each other, is equivelent to a far larger "Gaussian" kernel with the
+% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% | -2, 0,-2 |
% | -1, 0, 1 |
%
-% Sobel:{type},{angle}
-% Type 0: default un-nomalized version shown above.
-%
-% Type 1: As default but pre-normalized
-% | 1, 0, -1 |
-% | 2, 0, -2 | / 4
-% | 1, 0, -1 |
-%
-% Type 2: Diagonal version with same normalization as 1
-% | 1, 0, -1 |
-% | 2, 0, -2 | / 4
-% | 1, 0, -1 |
-%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
-% Note that using a larger radius for the "Square" or the "Diamond" is
-% also equivelent to iterating the basic morphological method that many
-% times. However iterating with the smaller radius is actually faster
-% than using a larger kernel radius.
+% Octagon:[{radius}[,{scale}]]
+% Generate octagonal shaped kernel of given radius and constant scale.
+% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
+% in "Diamond" kernel.
+%
+% Disk:[{radius}[,{scale}]]
+% Generate a binary disk, thresholded at the radius given, the radius
+% may be a float-point value. Final Kernel size is floor(radius)*2+1
+% square. A radius of 5.3 is the default.
+%
+% NOTE: That a low radii Disk kernels produce the same results as
+% many of the previously defined kernels, but differ greatly at larger
+% radii. Here is a table of equivalences...
+% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
+% "Disk:1.5" => "Square"
+% "Disk:2" => "Diamond:2"
+% "Disk:2.5" => "Octagon"
+% "Disk:2.9" => "Square:2"
+% "Disk:3.5" => "Octagon:3"
+% "Disk:4.5" => "Octagon:4"
+% "Disk:5.4" => "Octagon:5"
+% "Disk:6.4" => "Octagon:6"
+% All other Disk shapes are unique to this kernel, but because a "Disk"
+% is more circular when using a larger radius, using a larger radius is
+% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
%
% Properly centered and odd sized rectangles work the best.
%
-% Disk:[{radius}[,{scale}]]
-% Generate a binary disk of the radius given, radius may be a float.
-% Kernel size will be ceil(radius)*2+1 square.
-% NOTE: Here are some disk shapes of specific interest
-% "Disk:1" => "diamond" or "cross:1"
-% "Disk:1.5" => "square"
-% "Disk:2" => "diamond:2"
-% "Disk:2.5" => a general disk shape of radius 2
-% "Disk:2.9" => "square:2"
-% "Disk:3.5" => default - octagonal/disk shape of radius 3
-% "Disk:4.2" => roughly octagonal shape of radius 4
-% "Disk:4.3" => a general disk shape of radius 4
-% After this all the kernel shape becomes more and more circular.
-%
-% Because a "disk" is more circular when using a larger radius, using a
-% larger radius is preferred over iterating the morphological operation.
-%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
-% NOTE: "plus:1" is equivelent to a "Diamond" kernel.
+% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
+% Diagonals:type
+% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
-% Octagonal thicken kernel, to generate convex hulls of 45 degrees
+% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
-% Type 3: Experimental Variation to try to present left-right symmetry
-% Type 4: Experimental Variation to preserve left-right symmetry
+% Type 3: Thinning skeleton based on a ressearch paper by
+% Dan S. Bloomberg (Default Type)
+% ThinSE:type
+% A huge variety of Thinning Kernels designed to preserve conectivity.
+% many other kernel sets use these kernels as source definitions.
+% Type numbers are 41-49, 81-89, 481, and 482 which are based on
+% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
-% Chebyshev Distance (also known as Tchebychev Distance) is a value of
-% one to any neighbour, orthogonal or diagonal. One why of thinking of
-% it is the number of squares a 'King' or 'Queen' in chess needs to
-% traverse reach any other position on a chess board. It results in a
-% 'square' like distance function, but one where diagonals are closer
-% than expected.
+% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
+% is a value of one to any neighbour, orthogonal or diagonal. One why
+% of thinking of it is the number of squares a 'King' or 'Queen' in
+% chess needs to traverse reach any other position on a chess board.
+% It results in a 'square' like distance function, but one where
+% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
-% Manhattan Distance (also known as Rectilinear Distance, or the Taxi
-% Cab metric), is the distance needed when you can only travel in
-% orthogonal (horizontal or vertical) only. It is the distance a 'Rook'
-% in chess would travel. It results in a diamond like distances, where
-% diagonals are further than expected.
+% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
+% Cab distance metric), it is the distance needed when you can only
+% travel in horizontal or vertical directions only. It is the
+% distance a 'Rook' in chess would have to travel, and results in a
+% diamond like distances, where diagonals are further than expected.
+%
+% Octagonal:[{radius}][x{scale}[%!]]
+% An interleving of Manhatten and Chebyshev metrics producing an
+% increasing octagonally shaped distance. Distances matches those of
+% the "Octagon" shaped kernel of the same radius. The minimum radius
+% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
-% Euclidean Distance is the 'direct' or 'as the crow flys distance.
+% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
-% you will get octagonal like distance function, which is reasonally
-% accurate.
-%
-% However if you use a larger radius such as "Euclidean:4" you will
-% get a much smoother distance gradient from the edge of the shape.
-% Of course a larger kernel is slower to use, and generally not needed.
-%
-% To allow the use of fractional distances that you get with diagonals
-% the actual distance is scaled by a fixed value which the user can
-% provide. This is not actually nessary for either ""Chebyshev" or
-% "Manhattan" distance kernels, but is done for all three distance
-% kernels. If no scale is provided it is set to a value of 100,
-% allowing for a maximum distance measurement of 655 pixels using a Q16
-% version of IM, from any edge. However for small images this can
-% result in quite a dark gradient.
+% you will get octagonal like distance function.
+%
+% However using a larger radius such as "Euclidean:4" you will get a
+% much smoother distance gradient from the edge of the shape. Especially
+% if the image is pre-processed to include any anti-aliasing pixels.
+% Of course a larger kernel is slower to use, and not always needed.
+%
+% The first three Distance Measuring Kernels will only generate distances
+% of exact multiples of {scale} in binary images. As such you can use a
+% scale of 1 without loosing any information. However you also need some
+% scaling when handling non-binary anti-aliased shapes.
+%
+% The "Euclidean" Distance Kernel however does generate a non-integer
+% fractional results, and as such scaling is vital even for binary shapes.
%
*/
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
+ assert("Should not call this function" != (char *)NULL);
break;
- case UnityKernel: /* Named Descrete Convolution Kernels */
- case LaplacianKernel:
- case SobelKernel:
+ case LaplacianKernel: /* Named Descrete Convolution Kernels */
+ case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
- case ThinDiagonalsKernel:
+ case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
+ case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
+ case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
+ case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
+ case OctangonalKernel:
case EuclideanKernel:
#else
default:
}
switch(type) {
- /* Convolution Kernels */
+ /*
+ Convolution Kernels
+ */
+ case UnityKernel:
+ {
+ kernel->height = kernel->width = (size_t) 1;
+ kernel->x = kernel->y = (ssize_t) 0;
+ kernel->values=(double *) AcquireQuantumMemory(1,sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+ kernel->maximum = kernel->values[0] = args->rho;
+ break;
+ }
+ break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
- * How to do this is currently not known, but appears to be
- * basied on the Error Function 'erf()' (intergral of a gaussian)
+ * How to do this is I don't know, but appears to be basied on the
+ * Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
- B = 1.0/(Magick2PI*sigma*sigma);
+ B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
- B = 1.0/(Magick2PI*sigma*sigma);
+ B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
- B = 1.0/(MagickPI*sigma*sigma*sigma*sigma);
+ B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
- beta= 1.0/(MagickSQ2PI*sigma );
+ beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
break;
}
- /* Convolution Kernels - Well Known Constants */
+ /*
+ Convolution Kernels - Well Known Named Constant Kernels
+ */
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
break;
}
case SobelKernel:
-#if 0
- { /* Sobel with optional 'sub-types' */
- switch ( (int) args->rho ) {
- default:
- case 0:
- kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- break;
- case 1:
- kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ScaleKernelInfo(kernel, 0.25, NoValue);
- break;
- case 2:
- kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ScaleKernelInfo(kernel, 0.25, NoValue);
- break;
- }
- if ( fabs(args->sigma) > MagickEpsilon )
- /* Rotate by correctly supplied 'angle' */
- RotateKernelInfo(kernel, args->sigma);
- else if ( args->rho > 30.0 || args->rho < -30.0 )
- /* Rotate by out of bounds 'type' */
- RotateKernelInfo(kernel, args->rho);
- break;
- }
-#else
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
RotateKernelInfo(kernel, args->rho);
break;
}
-#endif
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
kernel->values[1] = kernel->values[3] = +MagickSQ2;
kernel->values[5] = kernel->values[7] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
- ScaleKernelInfo(kernel, 1.0/2.0*MagickSQ2, NoValue);
+ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19");
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
- ScaleKernelInfo(kernel, 1.0/2.0*MagickSQ2, NoValue);
+ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
kernel->values[1] = +MagickSQ2;
kernel->values[7] = +MagickSQ2;
CalcKernelMetaData(kernel);
- ScaleKernelInfo(kernel, 1.0/2.0*MagickSQ2, NoValue);
+ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
kernel->values[0] = +MagickSQ2;
kernel->values[8] = -MagickSQ2;
CalcKernelMetaData(kernel);
- ScaleKernelInfo(kernel, 1.0/2.0*MagickSQ2, NoValue);
+ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
kernel->values[2] = -MagickSQ2;
kernel->values[6] = +MagickSQ2;
CalcKernelMetaData(kernel);
- ScaleKernelInfo(kernel, 1.0/2.0*MagickSQ2, NoValue);
+ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
break;
}
- /* Boolean Kernels */
+ /*
+ Boolean or Shaped Kernels
+ */
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->positive_range = scale*u;
break;
}
- case DiskKernel:
- {
- ssize_t
- limit = (ssize_t)(args->rho*args->rho);
-
- if (args->rho < 0.4) /* default radius approx 3.5 */
- kernel->width = kernel->height = 7L, limit = 10L;
- else
- kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
- kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
-
- kernel->values=(double *) AcquireQuantumMemory(kernel->width,
- kernel->height*sizeof(double));
- if (kernel->values == (double *) NULL)
- return(DestroyKernelInfo(kernel));
-
- /* set all kernel values within disk area to scale given */
- for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
- for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- if ((u*u+v*v) <= limit)
- kernel->positive_range += kernel->values[i] = args->sigma;
- else
- kernel->values[i] = nan;
- kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
- break;
- }
- case PlusKernel:
- {
- if (args->rho < 1.0)
- kernel->width = kernel->height = 5; /* default radius 2 */
- else
- kernel->width = kernel->height = ((size_t)args->rho)*2+1;
- kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
-
- kernel->values=(double *) AcquireQuantumMemory(kernel->width,
- kernel->height*sizeof(double));
- if (kernel->values == (double *) NULL)
- return(DestroyKernelInfo(kernel));
-
- /* set all kernel values along axises to given scale */
- for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
- for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
- kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
- kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
- break;
- }
- case CrossKernel:
- {
- if (args->rho < 1.0)
- kernel->width = kernel->height = 5; /* default radius 2 */
- else
- kernel->width = kernel->height = ((size_t)args->rho)*2+1;
- kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
-
- kernel->values=(double *) AcquireQuantumMemory(kernel->width,
- kernel->height*sizeof(double));
- if (kernel->values == (double *) NULL)
- return(DestroyKernelInfo(kernel));
-
- /* set all kernel values along axises to given scale */
- for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
- for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
- kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
- kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
- break;
- }
- /* HitAndMiss Kernels */
- case RingKernel:
- case PeaksKernel:
- {
- ssize_t
- limit1,
- limit2,
- scale;
-
- if (args->rho < args->sigma)
- {
- kernel->width = ((size_t)args->sigma)*2+1;
- limit1 = (ssize_t)(args->rho*args->rho);
- limit2 = (ssize_t)(args->sigma*args->sigma);
- }
- else
- {
- kernel->width = ((size_t)args->rho)*2+1;
- limit1 = (ssize_t)(args->sigma*args->sigma);
- limit2 = (ssize_t)(args->rho*args->rho);
- }
- if ( limit2 <= 0 )
- kernel->width = 7L, limit1 = 7L, limit2 = 11L;
-
- kernel->height = kernel->width;
- kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
- kernel->values=(double *) AcquireQuantumMemory(kernel->width,
- kernel->height*sizeof(double));
- if (kernel->values == (double *) NULL)
- return(DestroyKernelInfo(kernel));
+ case OctagonKernel:
+ {
+ if (args->rho < 1.0)
+ kernel->width = kernel->height = 5; /* default radius = 2 */
+ else
+ kernel->width = kernel->height = ((size_t)args->rho)*2+1;
+ kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+
+ kernel->values=(double *) AcquireQuantumMemory(kernel->width,
+ kernel->height*sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+
+ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
+ for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
+ if ( (labs((long) u)+labs((long) v)) <=
+ ((long)kernel->x + (long)(kernel->x/2)) )
+ kernel->positive_range += kernel->values[i] = args->sigma;
+ else
+ kernel->values[i] = nan;
+ kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
+ break;
+ }
+ case DiskKernel:
+ {
+ ssize_t
+ limit = (ssize_t)(args->rho*args->rho);
- /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
- scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
- for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
- for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- { ssize_t radius=u*u+v*v;
- if (limit1 < radius && radius <= limit2)
- kernel->positive_range += kernel->values[i] = (double) scale;
+ if (args->rho < 0.4) /* default radius approx 4.3 */
+ kernel->width = kernel->height = 9L, limit = 18L;
+ else
+ kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
+ kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+
+ kernel->values=(double *) AcquireQuantumMemory(kernel->width,
+ kernel->height*sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+
+ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
+ for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
+ if ((u*u+v*v) <= limit)
+ kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
+ kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
+ break;
+ }
+ case PlusKernel:
+ {
+ if (args->rho < 1.0)
+ kernel->width = kernel->height = 5; /* default radius 2 */
+ else
+ kernel->width = kernel->height = ((size_t)args->rho)*2+1;
+ kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+
+ kernel->values=(double *) AcquireQuantumMemory(kernel->width,
+ kernel->height*sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+
+ /* set all kernel values along axises to given scale */
+ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
+ for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
+ kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
+ kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
+ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
+ break;
+ }
+ case CrossKernel:
+ {
+ if (args->rho < 1.0)
+ kernel->width = kernel->height = 5; /* default radius 2 */
+ else
+ kernel->width = kernel->height = ((size_t)args->rho)*2+1;
+ kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+
+ kernel->values=(double *) AcquireQuantumMemory(kernel->width,
+ kernel->height*sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+
+ /* set all kernel values along axises to given scale */
+ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
+ for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
+ kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
+ kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
+ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
+ break;
+ }
+ /*
+ HitAndMiss Kernels
+ */
+ case RingKernel:
+ case PeaksKernel:
+ {
+ ssize_t
+ limit1,
+ limit2,
+ scale;
+
+ if (args->rho < args->sigma)
+ {
+ kernel->width = ((size_t)args->sigma)*2+1;
+ limit1 = (ssize_t)(args->rho*args->rho);
+ limit2 = (ssize_t)(args->sigma*args->sigma);
}
- kernel->minimum = kernel->minimum = (double) scale;
- if ( type == PeaksKernel ) {
- /* set the central point in the middle */
- kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
- kernel->positive_range = 1.0;
- kernel->maximum = 1.0;
+ else
+ {
+ kernel->width = ((size_t)args->rho)*2+1;
+ limit1 = (ssize_t)(args->sigma*args->sigma);
+ limit2 = (ssize_t)(args->rho*args->rho);
+ }
+ if ( limit2 <= 0 )
+ kernel->width = 7L, limit1 = 7L, limit2 = 11L;
+
+ kernel->height = kernel->width;
+ kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+ kernel->values=(double *) AcquireQuantumMemory(kernel->width,
+ kernel->height*sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+
+ /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
+ scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
+ for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
+ for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
+ { ssize_t radius=u*u+v*v;
+ if (limit1 < radius && radius <= limit2)
+ kernel->positive_range += kernel->values[i] = (double) scale;
+ else
+ kernel->values[i] = nan;
+ }
+ kernel->minimum = kernel->maximum = (double) scale;
+ if ( type == PeaksKernel ) {
+ /* set the central point in the middle */
+ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
+ kernel->positive_range = 1.0;
+ kernel->maximum = 1.0;
+ }
+ break;
}
- break;
- }
- case EdgesKernel:
- {
- kernel=ParseKernelArray("3: 0,0,0 -,1,- 1,1,1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandMirrorKernelInfo(kernel); /* mirror expansion of other kernels */
- break;
- }
- case CornersKernel:
- {
- kernel=ParseKernelArray("3: 0,0,- 0,1,1 -,1,-");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
- break;
- }
- case ThinDiagonalsKernel:
- {
- switch ( (int) args->rho ) {
- case 0:
- default:
- { KernelInfo
- *new_kernel;
- kernel=ParseKernelArray("3: 0,0,0 0,1,1 1,1,-");
+ case EdgesKernel:
+ {
+ kernel=AcquireKernelInfo("ThinSE:482");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
+ break;
+ }
+ case CornersKernel:
+ {
+ kernel=AcquireKernelInfo("ThinSE:87");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
+ break;
+ }
+ case DiagonalsKernel:
+ {
+ switch ( (int) args->rho ) {
+ case 0:
+ default:
+ { KernelInfo
+ *new_kernel;
+ kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ ExpandMirrorKernelInfo(kernel);
+ return(kernel);
+ }
+ case 1:
+ kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
+ break;
+ case 2:
+ kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
+ break;
+ }
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ RotateKernelInfo(kernel, args->sigma);
+ break;
+ }
+ case LineEndsKernel:
+ { /* Kernels for finding the end of thin lines */
+ switch ( (int) args->rho ) {
+ case 0:
+ default:
+ /* set of kernels to find all end of lines */
+ return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>"));
+ case 1:
+ /* kernel for 4-connected line ends - no rotation */
+ kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
+ break;
+ case 2:
+ /* kernel to add for 8-connected lines - no rotation */
+ kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
+ break;
+ case 3:
+ /* kernel to add for orthogonal line ends - does not find corners */
+ kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
+ break;
+ case 4:
+ /* traditional line end - fails on last T end */
+ kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
+ break;
+ }
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ RotateKernelInfo(kernel, args->sigma);
+ break;
+ }
+ case LineJunctionsKernel:
+ { /* kernels for finding the junctions of multiple lines */
+ switch ( (int) args->rho ) {
+ case 0:
+ default:
+ /* set of kernels to find all line junctions */
+ return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>"));
+ case 1:
+ /* Y Junction */
+ kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
+ break;
+ case 2:
+ /* Diagonal T Junctions */
+ kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
+ break;
+ case 3:
+ /* Orthogonal T Junctions */
+ kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
+ break;
+ case 4:
+ /* Diagonal X Junctions */
+ kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
+ break;
+ case 5:
+ /* Orthogonal X Junctions - minimal diamond kernel */
+ kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
+ break;
+ }
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ RotateKernelInfo(kernel, args->sigma);
+ break;
+ }
+ case RidgesKernel:
+ { /* Ridges - Ridge finding kernels */
+ KernelInfo
+ *new_kernel;
+ switch ( (int) args->rho ) {
+ case 1:
+ default:
+ kernel=ParseKernelArray("3x1:0,1,0");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
+ break;
+ case 2:
+ kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
- new_kernel=ParseKernelArray("3: 0,0,1 0,1,1 0,1,-");
+ ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
+
+ /* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
+ /* Unfortunatally we can not yet rotate a non-square kernel */
+ /* But then we can't flip a non-symetrical kernel either */
+ new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
- ExpandMirrorKernelInfo(kernel);
- break;
- }
- case 1:
- kernel=ParseKernelArray("3: 0,0,0 0,1,1 1,1,-");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- RotateKernelInfo(kernel, args->sigma);
- break;
- case 2:
- kernel=ParseKernelArray("3: 0,0,1 0,1,1 0,1,-");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- RotateKernelInfo(kernel, args->sigma);
- break;
- }
- break;
- }
- case LineEndsKernel:
- { /* Kernels for finding the end of thin lines */
- switch ( (int) args->rho ) {
- case 0:
- default:
- /* set of kernels to find all end of lines */
- kernel=AcquireKernelInfo("LineEnds:1>;LineEnds:2>");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- break;
- case 1:
- /* kernel for 4-connected line ends - no rotation */
- kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- RotateKernelInfo(kernel, args->sigma);
- break;
- case 2:
- /* kernel to add for 8-connected lines - no rotation */
- kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- RotateKernelInfo(kernel, args->sigma);
- break;
- case 3:
- /* kernel to add for orthogonal line ends - does not find corners */
- kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- RotateKernelInfo(kernel, args->sigma);
- break;
- case 4:
- /* traditional line end - fails on last T end */
- kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- RotateKernelInfo(kernel, args->sigma);
- break;
+ new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ LastKernelInfo(kernel)->next = new_kernel;
+ break;
+ }
+ break;
}
- break;
- }
- case LineJunctionsKernel:
- { /* kernels for finding the junctions of multiple lines */
- switch ( (int) args->rho ) {
- case 0:
- default:
- /* set of kernels to find all line junctions */
- kernel=AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- break;
- case 1:
- /* Y Junction */
- kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- RotateKernelInfo(kernel, args->sigma);
- break;
- case 2:
- /* Diagonal T Junctions */
- kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- RotateKernelInfo(kernel, args->sigma);
- break;
- case 3:
- /* Orthogonal T Junctions */
- kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- RotateKernelInfo(kernel, args->sigma);
- break;
- case 4:
- /* Diagonal X Junctions */
- kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- RotateKernelInfo(kernel, args->sigma);
- break;
- case 5:
- /* Orthogonal X Junctions - minimal diamond kernel */
- kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- RotateKernelInfo(kernel, args->sigma);
- break;
+ case ConvexHullKernel:
+ {
+ KernelInfo
+ *new_kernel;
+ /* first set of 8 kernels */
+ kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ ExpandRotateKernelInfo(kernel, 90.0);
+ /* append the mirror versions too - no flip function yet */
+ new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
+ if (new_kernel == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ new_kernel->type = type;
+ ExpandRotateKernelInfo(new_kernel, 90.0);
+ LastKernelInfo(kernel)->next = new_kernel;
+ break;
}
- break;
- }
- case RidgesKernel:
- { /* Ridges - Ridge finding kernels */
- KernelInfo
- *new_kernel;
- switch ( (int) args->rho ) {
- case 1:
- default:
- kernel=ParseKernelArray("3x1:0,1,0");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
- break;
- case 2:
- kernel=ParseKernelArray("4x1:0,1,1,0");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
-
- /* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
- /* Unfortunatally we can not yet rotate a non-square kernel */
- /* But then we can't flip a non-symetrical kernel either */
- new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- break;
+ case SkeletonKernel:
+ {
+ switch ( (int) args->rho ) {
+ case 1:
+ default:
+ /* Traditional Skeleton...
+ ** A cyclically rotated single kernel
+ */
+ kernel=AcquireKernelInfo("ThinSE:482");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
+ break;
+ case 2:
+ /* HIPR Variation of the cyclic skeleton
+ ** Corners of the traditional method made more forgiving,
+ ** but the retain the same cyclic order.
+ */
+ kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ if (kernel->next == (KernelInfo *) NULL)
+ return(DestroyKernelInfo(kernel));
+ kernel->type = type;
+ kernel->next->type = type;
+ ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
+ break;
+ case 3:
+ /* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
+ ** "Connectivity-Preserving Morphological Image Thransformations"
+ ** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
+ ** http://www.leptonica.com/papers/conn.pdf
+ */
+ kernel=AcquireKernelInfo(
+ "ThinSE:41; ThinSE:42; ThinSE:43");
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ kernel->next->type = type;
+ kernel->next->next->type = type;
+ ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
+ break;
+ }
+ break;
}
- break;
- }
- case ConvexHullKernel:
- {
- KernelInfo
- *new_kernel;
- /* first set of 8 kernels */
- kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandRotateKernelInfo(kernel, 90.0);
- /* append the mirror versions too - no flip function yet */
- new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- ExpandRotateKernelInfo(new_kernel, 90.0);
- LastKernelInfo(kernel)->next = new_kernel;
- break;
- }
- case SkeletonKernel:
- {
- KernelInfo
- *new_kernel;
- switch ( (int) args->rho ) {
- case 1:
- default:
- /* Traditional Skeleton...
- ** A cyclically rotated single kernel
- */
- kernel=ParseKernelArray("3: 0,0,0 -,1,- 1,1,1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
- break;
- case 2:
- /* HIPR Variation of the cyclic skeleton
- ** Corners of the traditional method made more forgiving,
- ** but the retain the same cyclic order.
- */
- kernel=ParseKernelArray("3: 0,0,0 -,1,- 1,1,1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- new_kernel=ParseKernelArray("3: -,0,0 1,1,0 -,1,-");
- if (new_kernel == (KernelInfo *) NULL)
- return(new_kernel);
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
- break;
- case 3:
- /* Jittered Skeleton: do top, then bottom, then each sides */
- /* Do top edge */
- kernel=ParseKernelArray("3: 0,0,0 -,1,- 1,1,1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- new_kernel=ParseKernelArray("3: 0,0,- 0,1,1 -,1,-");
- if (new_kernel == (KernelInfo *) NULL)
- return(new_kernel);
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("3: -,0,0 1,1,0 -,1,-");
- if (new_kernel == (KernelInfo *) NULL)
- return(new_kernel);
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- /* Do Bottom edge */
- new_kernel=ParseKernelArray("3: 1,1,1 -,1,- 0,0,0");
- if (new_kernel == (KernelInfo *) NULL)
- return(new_kernel);
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("3: -,1,- 1,1,0 -,0,0");
- if (new_kernel == (KernelInfo *) NULL)
- return(new_kernel);
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("3: -,1,- 0,1,1 0,0,-");
- if (new_kernel == (KernelInfo *) NULL)
- return(new_kernel);
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- /* Last the two sides */
- new_kernel=ParseKernelArray("3: 0,-,1 0,1,1 0,-,1");
- if (new_kernel == (KernelInfo *) NULL)
- return(new_kernel);
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("3: 1,-,0 1,1,0 1,-,0");
- if (new_kernel == (KernelInfo *) NULL)
- return(new_kernel);
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- break;
- case 4:
- /* Just a simple 'Edge' kernel, but with a extra two kernels
- ** to finish off diagonal lines, top then bottom then sides.
- ** Works well for test case but fails for general case.
- */
- kernel=ParseKernelArray("3: 0,0,0 -,1,- 1,1,1");
- if (kernel == (KernelInfo *) NULL)
- return(kernel);
- kernel->type = type;
- new_kernel=ParseKernelArray("3: 0,0,0 0,1,1 1,1,-");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- new_kernel=ParseKernelArray("3: 0,0,0 1,1,0 -,1,1");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- LastKernelInfo(kernel)->next = new_kernel;
- ExpandMirrorKernelInfo(kernel);
- /* Append a set of corner kernels */
- new_kernel=ParseKernelArray("3: 0,0,- 0,1,1 -,1,-");
- if (new_kernel == (KernelInfo *) NULL)
- return(DestroyKernelInfo(kernel));
- new_kernel->type = type;
- ExpandRotateKernelInfo(new_kernel, 90.0);
- LastKernelInfo(kernel)->next = new_kernel;
- break;
+ case ThinSEKernel:
+ { /* Special kernels for general thinning, while preserving connections
+ ** "Connectivity-Preserving Morphological Image Thransformations"
+ ** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
+ ** http://www.leptonica.com/papers/conn.pdf
+ ** And
+ ** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
+ **
+ ** Note kernels do not specify the origin pixel, allowing them
+ ** to be used for both thickening and thinning operations.
+ */
+ switch ( (int) args->rho ) {
+ /* SE for 4-connected thinning */
+ case 41: /* SE_4_1 */
+ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
+ break;
+ case 42: /* SE_4_2 */
+ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
+ break;
+ case 43: /* SE_4_3 */
+ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
+ break;
+ case 44: /* SE_4_4 */
+ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
+ break;
+ case 45: /* SE_4_5 */
+ kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
+ break;
+ case 46: /* SE_4_6 */
+ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
+ break;
+ case 47: /* SE_4_7 */
+ kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
+ break;
+ case 48: /* SE_4_8 */
+ kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
+ break;
+ case 49: /* SE_4_9 */
+ kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
+ break;
+ /* SE for 8-connected thinning - negatives of the above */
+ case 81: /* SE_8_0 */
+ kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
+ break;
+ case 82: /* SE_8_2 */
+ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
+ break;
+ case 83: /* SE_8_3 */
+ kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
+ break;
+ case 84: /* SE_8_4 */
+ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
+ break;
+ case 85: /* SE_8_5 */
+ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
+ break;
+ case 86: /* SE_8_6 */
+ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
+ break;
+ case 87: /* SE_8_7 */
+ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
+ break;
+ case 88: /* SE_8_8 */
+ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
+ break;
+ case 89: /* SE_8_9 */
+ kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
+ break;
+ /* Special combined SE kernels */
+ case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
+ kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
+ break;
+ case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
+ kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
+ break;
+ case 481: /* SE_48_1 - General Connected Corner Kernel */
+ kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
+ break;
+ default:
+ case 482: /* SE_48_2 - General Edge Kernel */
+ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
+ break;
+ }
+ if (kernel == (KernelInfo *) NULL)
+ return(kernel);
+ kernel->type = type;
+ RotateKernelInfo(kernel, args->sigma);
+ break;
}
- break;
- }
- /* Distance Measuring Kernels */
- case ChebyshevKernel:
+ /*
+ Distance Measuring Kernels
+ */
+ case ChebyshevKernel:
+ {
+ if (args->rho < 1.0)
+ kernel->width = kernel->height = 3; /* default radius = 1 */
+ else
+ kernel->width = kernel->height = ((size_t)args->rho)*2+1;
+ kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+
+ kernel->values=(double *) AcquireQuantumMemory(kernel->width,
+ kernel->height*sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+
+ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
+ for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
+ kernel->positive_range += ( kernel->values[i] =
+ args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
+ kernel->maximum = kernel->values[0];
+ break;
+ }
+ case ManhattanKernel:
+ {
+ if (args->rho < 1.0)
+ kernel->width = kernel->height = 3; /* default radius = 1 */
+ else
+ kernel->width = kernel->height = ((size_t)args->rho)*2+1;
+ kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
+
+ kernel->values=(double *) AcquireQuantumMemory(kernel->width,
+ kernel->height*sizeof(double));
+ if (kernel->values == (double *) NULL)
+ return(DestroyKernelInfo(kernel));
+
+ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
+ for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
+ kernel->positive_range += ( kernel->values[i] =
+ args->sigma*(labs((long) u)+labs((long) v)) );
+ kernel->maximum = kernel->values[0];
+ break;
+ }
+ case OctagonalKernel:
{
- if (args->rho < 1.0)
- kernel->width = kernel->height = 3; /* default radius = 1 */
+ if (args->rho < 2.0)
+ kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- kernel->positive_range += ( kernel->values[i] =
- args->sigma*((labs((long) u)>labs((long) v)) ? labs((long) u) : labs((long) v)) );
- kernel->maximum = kernel->values[0];
- break;
- }
- case ManhattanKernel:
- {
- if (args->rho < 1.0)
- kernel->width = kernel->height = 3; /* default radius = 1 */
- else
- kernel->width = kernel->height = ((size_t)args->rho)*2+1;
- kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
-
- kernel->values=(double *) AcquireQuantumMemory(kernel->width,
- kernel->height*sizeof(double));
- if (kernel->values == (double *) NULL)
- return(DestroyKernelInfo(kernel));
-
- for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
- for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
- kernel->positive_range += ( kernel->values[i] =
- args->sigma*(labs((long) u)+labs((long) v)) );
+ {
+ double
+ r1 = MagickMax(fabs((double)u),fabs((double)v)),
+ r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
+ kernel->positive_range += kernel->values[i] =
+ args->sigma*MagickMax(r1,r2);
+ }
kernel->maximum = kernel->values[0];
break;
}
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
- kernel->width = kernel->height = ((size_t)args->rho)*2+1;
+ kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->maximum = kernel->values[0];
break;
}
- case UnityKernel:
default:
{
- /* Unity or No-Op Kernel - Basically just a single pixel on its own */
+ /* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
- kernel->type = ( type == UnityKernel ) ? UnityKernel : UndefinedKernel;
+ kernel->type = UndefinedKernel;
break;
}
break;
}
-
return(kernel);
}
\f
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
-% be destroyed using DestoryKernelInfo() when no ssize_ter needed.
+% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% %
% %
% %
-% E x p a n d M i r r o r K e r n e l I n f o %
++ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
% %
% %
% %
-% E x p a n d R o t a t e K e r n e l I n f o %
++ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
-% incrementally by the angle given, until the first kernel repeats.
+% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
- /* Test for Nan equivelence */
+ /* Test for Nan equivalence */
if ( IsNan(kernel1->values[i]) && !IsNan(kernel2->values[i]) )
return MagickFalse;
if ( IsNan(kernel2->values[i]) && !IsNan(kernel1->values[i]) )
return MagickFalse;
- /* Test actual values are equivelent */
+ /* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) > MagickEpsilon )
return MagickFalse;
}
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels.
%
-% It is basically equivelent to as MorphologyImageChannel() (see below) but
+% It is basically equivalent to as MorphologyImageChannel() (see below) but
% without any user controls. This allows internel programs to use this
% function, to actually perform a specific task without posible interference
% by any API user supplied settings.
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
-% const ssize_t iterations,const KernelInfo *kernel,
-% const CompositeMethod compose, const double bias,
-% ExceptionInfo *exception)
+% const ChannelType channel, const ssize_t iterations,
+% const KernelInfo *kernel, const CompositeMethod compose,
+% const double bias, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
-% o image: the image.
+% o image: the source image
%
% o method: the morphology method to be applied.
%
+% o channel: the channels to which the operations are applied
+% The channel 'sync' flag determines if 'alpha weighting' is
+% applied for convolution style operations.
+%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
-% Warning: kernel may be normalized for the Convolve method.
%
% o compose: How to handle or merge multi-kernel results.
-% If 'Undefined' use default of the Morphology method.
-% If 'No' force image to be re-iterated by each kernel.
-% Otherwise merge the results using the mathematical compose
-% method given.
+% If 'UndefinedCompositeOp' use default for the Morphology method.
+% If 'NoCompositeOp' force image to be re-iterated by each kernel.
+% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
%
*/
-
/* Apply a Morphology Primative to an image using the given kernel.
-** Two pre-created images must be provided, no image is created.
-** Returning the number of pixels that changed.
+** Two pre-created images must be provided, and no image is created.
+** It returns the number of pixels that changed between the images
+** for result convergence determination.
*/
-static size_t MorphologyPrimitive(const Image *image, Image
- *result_image, const MorphologyMethod method, const ChannelType channel,
+static ssize_t MorphologyPrimitive(const Image *image, Image *result_image,
+ const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,const double bias,ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
*q_view;
ssize_t
- y, offx, offy,
+ y, offx, offy;
+
+ size_t
+ virt_width,
changed;
MagickBooleanType
p_view=AcquireCacheView(image);
q_view=AcquireCacheView(result_image);
+ virt_width=image->columns+kernel->width-1;
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
- case DistanceMorphology:
+ /*case DistanceMorphology:*/
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
}
+
+ if ( method == ConvolveMorphology && kernel->width == 1 )
+ { /* Special handling (for speed) of vertical (blur) kernels.
+ ** This performs its handling in columns rather than in rows.
+ ** This is only done for convolve as it is the only method that
+ ** generates very large 1-D vertical kernels (such as a 'BlurKernel')
+ **
+ ** Timing tests (on single CPU laptop)
+ ** Using a vertical 1-d Blue with normal row-by-row (below)
+ ** time convert logo: -morphology Convolve Blur:0x10+90 null:
+ ** 0.807u
+ ** Using this column method
+ ** time convert logo: -morphology Convolve Blur:0x10+90 null:
+ ** 0.620u
+ **
+ ** Anthony Thyssen, 14 June 2010
+ */
+ register ssize_t
+ x;
+
#if defined(MAGICKCORE_OPENMP_SUPPORT)
- #pragma omp parallel for schedule(dynamic,4) shared(progress,status)
+#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
- for (y=0; y < (ssize_t) image->rows; y++)
- {
- MagickBooleanType
- sync;
-
- register const PixelPacket
- *restrict p;
+ for (x=0; x < (ssize_t) image->columns; x++)
+ {
+ register const PixelPacket
+ *restrict p;
- register const IndexPacket
- *restrict p_indexes;
+ register const IndexPacket
+ *restrict p_indexes;
- register PixelPacket
- *restrict q;
+ register PixelPacket
+ *restrict q;
- register IndexPacket
- *restrict q_indexes;
+ register IndexPacket
+ *restrict q_indexes;
- register ssize_t
- x;
+ register ssize_t
+ y;
- size_t
- r;
+ ssize_t
+ r;
- if (status == MagickFalse)
- continue;
- p=GetCacheViewVirtualPixels(p_view, -offx, y-offy,
- image->columns+kernel->width, kernel->height, exception);
- q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
- exception);
- if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
- {
- status=MagickFalse;
+ if (status == MagickFalse)
+ continue;
+ p=GetCacheViewVirtualPixels(p_view, x, -offy,1,
+ image->rows+kernel->height-1, exception);
+ q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception);
+ if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
+ {
+ status=MagickFalse;
+ continue;
+ }
+ p_indexes=GetCacheViewVirtualIndexQueue(p_view);
+ q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
+
+ /* offset to origin in 'p'. while 'q' points to it directly */
+ r = offy;
+
+ for (y=0; y < (ssize_t) image->rows; y++)
+ {
+ register ssize_t
+ v;
+
+ register const double
+ *restrict k;
+
+ register const PixelPacket
+ *restrict k_pixels;
+
+ register const IndexPacket
+ *restrict k_indexes;
+
+ MagickPixelPacket
+ result;
+
+ /* Copy input image to the output image for unused channels
+ * This removes need for 'cloning' a new image every iteration
+ */
+ *q = p[r];
+ if (image->colorspace == CMYKColorspace)
+ SetIndexPixelComponent(q_indexes+y,GetIndexPixelComponent(
+ p_indexes+r));
+
+ /* Set the bias of the weighted average output */
+ result.red =
+ result.green =
+ result.blue =
+ result.opacity =
+ result.index = bias;
+
+
+ /* Weighted Average of pixels using reflected kernel
+ **
+ ** NOTE for correct working of this operation for asymetrical
+ ** kernels, the kernel needs to be applied in its reflected form.
+ ** That is its values needs to be reversed.
+ */
+ k = &kernel->values[ kernel->height-1 ];
+ k_pixels = p;
+ k_indexes = p_indexes;
+ if ( ((channel & SyncChannels) == 0 ) ||
+ (image->matte == MagickFalse) )
+ { /* No 'Sync' involved.
+ ** Convolution is simple greyscale channel operation
+ */
+ for (v=0; v < (ssize_t) kernel->height; v++) {
+ if ( IsNan(*k) ) continue;
+ result.red += (*k)*GetRedPixelComponent(k_pixels);
+ result.green += (*k)*GetGreenPixelComponent(k_pixels);
+ result.blue += (*k)*GetBluePixelComponent(k_pixels);
+ result.opacity += (*k)*GetOpacityPixelComponent(k_pixels);
+ if ( image->colorspace == CMYKColorspace)
+ result.index += (*k)*(*k_indexes);
+ k--;
+ k_pixels++;
+ k_indexes++;
+ }
+ if ((channel & RedChannel) != 0)
+ SetRedPixelComponent(q,ClampToQuantum(result.red));
+ if ((channel & GreenChannel) != 0)
+ SetGreenPixelComponent(q,ClampToQuantum(result.green));
+ if ((channel & BlueChannel) != 0)
+ SetBluePixelComponent(q,ClampToQuantum(result.blue));
+ if ((channel & OpacityChannel) != 0
+ && image->matte == MagickTrue )
+ SetOpacityPixelComponent(q,ClampToQuantum(result.opacity));
+ if ((channel & IndexChannel) != 0
+ && image->colorspace == CMYKColorspace)
+ SetIndexPixelComponent(q_indexes+x,ClampToQuantum(result.index));
+ }
+ else
+ { /* Channel 'Sync' Flag, and Alpha Channel enabled.
+ ** Weight the color channels with Alpha Channel so that
+ ** transparent pixels are not part of the results.
+ */
+ MagickRealType
+ alpha, /* alpha weighting of colors : kernel*alpha */
+ gamma; /* divisor, sum of color weighting values */
+
+ gamma=0.0;
+ for (v=0; v < (ssize_t) kernel->height; v++) {
+ if ( IsNan(*k) ) continue;
+ alpha=(*k)*(QuantumScale*(QuantumRange-GetOpacityPixelComponent(k_pixels)));
+ gamma += alpha;
+ result.red += alpha*GetRedPixelComponent(k_pixels);
+ result.green += alpha*GetGreenPixelComponent(k_pixels);
+ result.blue += alpha*GetBluePixelComponent(k_pixels);
+ result.opacity += (*k)*GetOpacityPixelComponent(k_pixels);
+ if ( image->colorspace == CMYKColorspace)
+ result.index += alpha*(*k_indexes);
+ k--;
+ k_pixels++;
+ k_indexes++;
+ }
+ /* Sync'ed channels, all channels are modified */
+ gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
+ SetRedPixelComponent(q,ClampToQuantum(gamma*result.red));
+ SetGreenPixelComponent(q,ClampToQuantum(gamma*result.green));
+ SetBluePixelComponent(q,ClampToQuantum(gamma*result.blue));
+ SetOpacityPixelComponent(q,ClampToQuantum(result.opacity));
+ if (image->colorspace == CMYKColorspace)
+ SetIndexPixelComponent(q_indexes+x,ClampToQuantum(gamma*
+ result.index));
+ }
+
+ /* Count up changed pixels */
+ if ( ( p[r].red != GetRedPixelComponent(q))
+ || ( p[r].green != GetGreenPixelComponent(q))
+ || ( p[r].blue != GetBluePixelComponent(q))
+ || ( p[r].opacity != GetOpacityPixelComponent(q))
+ || ( image->colorspace == CMYKColorspace &&
+ GetIndexPixelComponent(p_indexes+r) != GetIndexPixelComponent(q_indexes+x) ) )
+ changed++; /* The pixel was changed in some way! */
+ p++;
+ q++;
+ } /* y */
+ if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
+ status=MagickFalse;
+ if (image->progress_monitor != (MagickProgressMonitor) NULL)
+ {
+ MagickBooleanType
+ proceed;
+
+#if defined(MAGICKCORE_OPENMP_SUPPORT)
+ #pragma omp critical (MagickCore_MorphologyImage)
+#endif
+ proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
+ if (proceed == MagickFalse)
+ status=MagickFalse;
+ }
+ } /* x */
+ result_image->type=image->type;
+ q_view=DestroyCacheView(q_view);
+ p_view=DestroyCacheView(p_view);
+ return(status ? (ssize_t) changed : 0);
+ }
+
+ /*
+ ** Normal handling of horizontal or rectangular kernels (row by row)
+ */
+#if defined(MAGICKCORE_OPENMP_SUPPORT)
+ #pragma omp parallel for schedule(dynamic,4) shared(progress,status)
+#endif
+ for (y=0; y < (ssize_t) image->rows; y++)
+ {
+ register const PixelPacket
+ *restrict p;
+
+ register const IndexPacket
+ *restrict p_indexes;
+
+ register PixelPacket
+ *restrict q;
+
+ register IndexPacket
+ *restrict q_indexes;
+
+ register ssize_t
+ x;
+
+ size_t
+ r;
+
+ if (status == MagickFalse)
+ continue;
+ p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width,
+ kernel->height, exception);
+ q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
+ exception);
+ if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
+ {
+ status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
- r = (image->columns+kernel->width)*offy+offx; /* constant */
+
+ /* offset to origin in 'p'. while 'q' points to it directly */
+ r = virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
- q_indexes[x] = p_indexes[r];
+ SetIndexPixelComponent(q_indexes+x,GetIndexPixelComponent(p_indexes+r));
/* Defaults */
min.red =
result.opacity = QuantumRange - (MagickRealType) p[r].opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
- result.index = (MagickRealType) p_indexes[r];
+ result.index = (MagickRealType) GetIndexPixelComponent(p_indexes+r);
switch (method) {
case ConvolveMorphology:
- /* Set the user defined bias of the weighted average output */
+ /* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
** For more details of Correlation vs Convolution see
** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
- if ( ((channel & SyncChannels) != 0 ) &&
- (image->matte == MagickTrue) )
- { /* Channel has a 'Sync' Flag, and Alpha Channel enabled.
+ k = &kernel->values[ kernel->width*kernel->height-1 ];
+ k_pixels = p;
+ k_indexes = p_indexes;
+ if ( ((channel & SyncChannels) == 0 ) ||
+ (image->matte == MagickFalse) )
+ { /* No 'Sync' involved.
+ ** Convolution is simple greyscale channel operation
+ */
+ for (v=0; v < (ssize_t) kernel->height; v++) {
+ for (u=0; u < (ssize_t) kernel->width; u++, k--) {
+ if ( IsNan(*k) ) continue;
+ result.red += (*k)*k_pixels[u].red;
+ result.green += (*k)*k_pixels[u].green;
+ result.blue += (*k)*k_pixels[u].blue;
+ result.opacity += (*k)*k_pixels[u].opacity;
+ if ( image->colorspace == CMYKColorspace)
+ result.index += (*k)*GetIndexPixelComponent(k_indexes+u);
+ }
+ k_pixels += virt_width;
+ k_indexes += virt_width;
+ }
+ if ((channel & RedChannel) != 0)
+ SetRedPixelComponent(q,ClampToQuantum(result.red));
+ if ((channel & GreenChannel) != 0)
+ SetGreenPixelComponent(q,ClampToQuantum(result.green));
+ if ((channel & BlueChannel) != 0)
+ SetBluePixelComponent(q,ClampToQuantum(result.blue));
+ if ((channel & OpacityChannel) != 0
+ && image->matte == MagickTrue )
+ SetOpacityPixelComponent(q,ClampToQuantum(result.opacity));
+ if ((channel & IndexChannel) != 0
+ && image->colorspace == CMYKColorspace)
+ SetIndexPixelComponent(q_indexes+x,ClampToQuantum(
+ result.index));
+ }
+ else
+ { /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
gamma; /* divisor, sum of color weighting values */
gamma=0.0;
- k = &kernel->values[ kernel->width*kernel->height-1 ];
- k_pixels = p;
- k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
result.blue += alpha*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
- result.index += alpha*k_indexes[u];
+ result.index+=alpha*GetIndexPixelComponent(k_indexes+u);
}
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
+ k_pixels += virt_width;
+ k_indexes += virt_width;
}
/* Sync'ed channels, all channels are modified */
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
- q->red = ClampToQuantum(gamma*result.red);
- q->green = ClampToQuantum(gamma*result.green);
- q->blue = ClampToQuantum(gamma*result.blue);
- q->opacity = ClampToQuantum(result.opacity);
+ SetRedPixelComponent(q,ClampToQuantum(gamma*result.red));
+ SetGreenPixelComponent(q,ClampToQuantum(gamma*result.green));
+ SetBluePixelComponent(q,ClampToQuantum(gamma*result.blue));
+ SetOpacityPixelComponent(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
- q_indexes[x] = ClampToQuantum(gamma*result.index);
- }
- else
- { /* No 'Sync' involved.
- ** Convolution is simple greyscale channel operation
- */
- k = &kernel->values[ kernel->width*kernel->height-1 ];
- k_pixels = p;
- k_indexes = p_indexes;
- for (v=0; v < (ssize_t) kernel->height; v++) {
- for (u=0; u < (ssize_t) kernel->width; u++, k--) {
- if ( IsNan(*k) ) continue;
- result.red += (*k)*k_pixels[u].red;
- result.green += (*k)*k_pixels[u].green;
- result.blue += (*k)*k_pixels[u].blue;
- result.opacity += (*k)*k_pixels[u].opacity;
- if ( image->colorspace == CMYKColorspace)
- result.index += (*k)*k_indexes[u];
- }
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
- }
- if ((channel & RedChannel) != 0)
- q->red = ClampToQuantum(result.red);
- if ((channel & GreenChannel) != 0)
- q->green = ClampToQuantum(result.green);
- if ((channel & BlueChannel) != 0)
- q->blue = ClampToQuantum(result.blue);
- if ((channel & OpacityChannel) != 0
- && image->matte == MagickTrue )
- q->opacity = ClampToQuantum(result.opacity);
- if ((channel & IndexChannel) != 0
- && image->colorspace == CMYKColorspace)
- q_indexes[x] = ClampToQuantum(result.index);
+ SetIndexPixelComponent(q_indexes+x,ClampToQuantum(gamma*
+ result.index));
}
break;
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
- Minimize(min.index, (double) k_indexes[u]);
+ Minimize(min.index,(double) GetIndexPixelComponent(
+ k_indexes+u));
}
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
+ k_pixels += virt_width;
+ k_indexes += virt_width;
}
break;
-
case DilateMorphology:
/* Maximum Value within kernel neighbourhood
**
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
- Maximize(max.index, (double) k_indexes[u]);
+ Maximize(max.index, (double) GetIndexPixelComponent(
+ k_indexes+u));
}
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
+ k_pixels += virt_width;
+ k_indexes += virt_width;
}
break;
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
- Minimize(min.index, (double) k_indexes[u]);
+ Minimize(min.index,(double) GetIndexPixelComponent(
+ k_indexes+u));
}
else if ( (*k) < 0.3 )
{ /* maximum of background pixels */
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
- Maximize(max.index, (double) k_indexes[u]);
+ Maximize(max.index, (double) GetIndexPixelComponent(
+ k_indexes+u));
}
}
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
+ k_pixels += virt_width;
+ k_indexes += virt_width;
}
/* Pattern Match if difference is positive */
min.red -= max.red; Maximize( min.red, 0.0 );
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
- for (v=0; v < (ssize_t) kernel->height; v++) {
- for (u=0; u < (ssize_t) kernel->width; u++, k++) {
- if ( IsNan(*k) || (*k) < 0.5 ) continue;
- if ( result.red == 0.0 ||
- PixelIntensity(&(k_pixels[u])) < PixelIntensity(q) ) {
- /* copy the whole pixel - no channel selection */
- *q = k_pixels[u];
- if ( result.red > 0.0 ) changed++;
- result.red = 1.0;
- }
+ for (v=0; v < (ssize_t) kernel->height; v++) {
+ for (u=0; u < (ssize_t) kernel->width; u++, k++) {
+ if ( IsNan(*k) || (*k) < 0.5 ) continue;
+ if ( result.red == 0.0 ||
+ PixelIntensity(&(k_pixels[u])) < PixelIntensity(q) ) {
+ /* copy the whole pixel - no channel selection */
+ *q = k_pixels[u];
+ if ( result.red > 0.0 ) changed++;
+ result.red = 1.0;
+ }
+ }
+ k_pixels += virt_width;
+ k_indexes += virt_width;
+ }
+ break;
+
+ case DilateIntensityMorphology:
+ /* Select Pixel with Maximum Intensity within kernel neighbourhood
+ **
+ ** WARNING: the intensity test fails for CMYK and does not
+ ** take into account the moderating effect of the alpha channel
+ ** on the intensity (yet).
+ **
+ ** NOTE for correct working of this operation for asymetrical
+ ** kernels, the kernel needs to be applied in its reflected form.
+ ** That is its values needs to be reversed.
+ */
+ k = &kernel->values[ kernel->width*kernel->height-1 ];
+ k_pixels = p;
+ k_indexes = p_indexes;
+ for (v=0; v < (ssize_t) kernel->height; v++) {
+ for (u=0; u < (ssize_t) kernel->width; u++, k--) {
+ if ( IsNan(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
+ if ( result.red == 0.0 ||
+ PixelIntensity(&(k_pixels[u])) > PixelIntensity(q) ) {
+ /* copy the whole pixel - no channel selection */
+ *q = k_pixels[u];
+ if ( result.red > 0.0 ) changed++;
+ result.red = 1.0;
+ }
+ }
+ k_pixels += virt_width;
+ k_indexes += virt_width;
+ }
+ break;
+#if 0
+ This code has been obsoleted by the MorphologyPrimitiveDirect() function.
+ However it is still (almost) correct coding for Grayscale Morphology.
+ That is...
+
+ GrayErode is equivalent but with kernel values subtracted from pixels
+ without the kernel rotation
+ GreyDilate is equivalent but using Maximum() instead of Minimum()
+ useing kernel rotation
+
+ case DistanceMorphology:
+ /* Add kernel Value and select the minimum value found.
+ ** The result is a iterative distance from edge of image shape.
+ **
+ ** All Distance Kernels are symetrical, but that may not always
+ ** be the case. For example how about a distance from left edges?
+ ** To work correctly with asymetrical kernels the reflected kernel
+ ** needs to be applied.
+ */
+ k = &kernel->values[ kernel->width*kernel->height-1 ];
+ k_pixels = p;
+ k_indexes = p_indexes;
+ for (v=0; v < (ssize_t) kernel->height; v++) {
+ for (u=0; u < (ssize_t) kernel->width; u++, k--) {
+ if ( IsNan(*k) ) continue;
+ Minimize(result.red, (*k)+k_pixels[u].red);
+ Minimize(result.green, (*k)+k_pixels[u].green);
+ Minimize(result.blue, (*k)+k_pixels[u].blue);
+ Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
+ if ( image->colorspace == CMYKColorspace)
+ Minimize(result.index,(*k)+GetIndexPixelComponent(
+ k_indexes+u));
+ }
+ k_pixels += virt_width;
+ k_indexes += virt_width;
+ }
+ break;
+#endif
+ case UndefinedMorphology:
+ default:
+ break; /* Do nothing */
+ }
+ /* Final mathematics of results (combine with original image?)
+ **
+ ** NOTE: Difference Morphology operators Edge* and *Hat could also
+ ** be done here but works better with iteration as a image difference
+ ** in the controling function (below). Thicken and Thinning however
+ ** should be done here so thay can be iterated correctly.
+ */
+ switch ( method ) {
+ case HitAndMissMorphology:
+ case ErodeMorphology:
+ result = min; /* minimum of neighbourhood */
+ break;
+ case DilateMorphology:
+ result = max; /* maximum of neighbourhood */
+ break;
+ case ThinningMorphology:
+ /* subtract pattern match from original */
+ result.red -= min.red;
+ result.green -= min.green;
+ result.blue -= min.blue;
+ result.opacity -= min.opacity;
+ result.index -= min.index;
+ break;
+ case ThickenMorphology:
+ /* Add the pattern matchs to the original */
+ result.red += min.red;
+ result.green += min.green;
+ result.blue += min.blue;
+ result.opacity += min.opacity;
+ result.index += min.index;
+ break;
+ default:
+ /* result directly calculated or assigned */
+ break;
+ }
+ /* Assign the resulting pixel values - Clamping Result */
+ switch ( method ) {
+ case UndefinedMorphology:
+ case ConvolveMorphology:
+ case DilateIntensityMorphology:
+ case ErodeIntensityMorphology:
+ break; /* full pixel was directly assigned - not a channel method */
+ default:
+ if ((channel & RedChannel) != 0)
+ SetRedPixelComponent(q,ClampToQuantum(result.red));
+ if ((channel & GreenChannel) != 0)
+ SetGreenPixelComponent(q,ClampToQuantum(result.green));
+ if ((channel & BlueChannel) != 0)
+ SetBluePixelComponent(q,ClampToQuantum(result.blue));
+ if ((channel & OpacityChannel) != 0
+ && image->matte == MagickTrue )
+ SetOpacityPixelComponent(q,ClampToQuantum(QuantumRange-result.opacity));
+ if ((channel & IndexChannel) != 0
+ && image->colorspace == CMYKColorspace)
+ SetIndexPixelComponent(q_indexes+x,ClampToQuantum(result.index));
+ break;
+ }
+ /* Count up changed pixels */
+ if ( ( p[r].red != GetRedPixelComponent(q) )
+ || ( p[r].green != GetGreenPixelComponent(q) )
+ || ( p[r].blue != GetBluePixelComponent(q) )
+ || ( p[r].opacity != GetOpacityPixelComponent(q) )
+ || ( image->colorspace == CMYKColorspace &&
+ GetIndexPixelComponent(p_indexes+r) != GetIndexPixelComponent(q_indexes+x) ) )
+ changed++; /* The pixel was changed in some way! */
+ p++;
+ q++;
+ } /* x */
+ if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
+ status=MagickFalse;
+ if (image->progress_monitor != (MagickProgressMonitor) NULL)
+ {
+ MagickBooleanType
+ proceed;
+
+#if defined(MAGICKCORE_OPENMP_SUPPORT)
+ #pragma omp critical (MagickCore_MorphologyImage)
+#endif
+ proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
+ if (proceed == MagickFalse)
+ status=MagickFalse;
+ }
+ } /* y */
+ q_view=DestroyCacheView(q_view);
+ p_view=DestroyCacheView(p_view);
+ return(status ? (ssize_t)changed : -1);
+}
+
+/* This is almost identical to the MorphologyPrimative() function above,
+** but will apply the primitive directly to the image in two passes.
+**
+** That is after each row is 'Sync'ed' into the image, the next row will
+** make use of those values as part of the calculation of the next row.
+** It then repeats, but going in the oppisite (bottom-up) direction.
+**
+** Because of this 'iterative' handling this function can not make use
+** of multi-threaded, parellel processing.
+*/
+static ssize_t MorphologyPrimitiveDirect(Image *image,
+ const MorphologyMethod method, const ChannelType channel,
+ const KernelInfo *kernel,ExceptionInfo *exception)
+{
+ CacheView
+ *auth_view,
+ *virt_view;
+
+ MagickBooleanType
+ status;
+
+ MagickOffsetType
+ progress;
+
+ ssize_t
+ y, offx, offy;
+
+ size_t
+ virt_width,
+ changed;
+
+ status=MagickTrue;
+ changed=0;
+ progress=0;
+
+ assert(image != (Image *) NULL);
+ assert(image->signature == MagickSignature);
+ assert(kernel != (KernelInfo *) NULL);
+ assert(kernel->signature == MagickSignature);
+ assert(exception != (ExceptionInfo *) NULL);
+ assert(exception->signature == MagickSignature);
+
+ /* Some methods (including convolve) needs use a reflected kernel.
+ * Adjust 'origin' offsets to loop though kernel as a reflection.
+ */
+ offx = kernel->x;
+ offy = kernel->y;
+ switch(method) {
+ case DistanceMorphology:
+ case VoronoiMorphology:
+ /* kernel needs to used with reflection about origin */
+ offx = (ssize_t) kernel->width-offx-1;
+ offy = (ssize_t) kernel->height-offy-1;
+ break;
+#if 0
+ case ?????Morphology:
+ /* kernel is used as is, without reflection */
+ break;
+#endif
+ default:
+ assert("Not a PrimativeDirect Morphology Method" != (char *) NULL);
+ break;
+ }
+
+ /* DO NOT THREAD THIS CODE! */
+ /* two views into same image (virtual, and actual) */
+ virt_view=AcquireCacheView(image);
+ auth_view=AcquireCacheView(image);
+ virt_width=image->columns+kernel->width-1;
+
+ for (y=0; y < (ssize_t) image->rows; y++)
+ {
+ register const PixelPacket
+ *restrict p;
+
+ register const IndexPacket
+ *restrict p_indexes;
+
+ register PixelPacket
+ *restrict q;
+
+ register IndexPacket
+ *restrict q_indexes;
+
+ register ssize_t
+ x;
+
+ ssize_t
+ r;
+
+ /* NOTE read virtual pixels, and authentic pixels, from the same image!
+ ** we read using virtual to get virtual pixel handling, but write back
+ ** into the same image.
+ **
+ ** Only top half of kernel is processed as we do a single pass downward
+ ** through the image iterating the distance function as we go.
+ */
+ if (status == MagickFalse)
+ break;
+ p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1,
+ exception);
+ q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
+ exception);
+ if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
+ status=MagickFalse;
+ if (status == MagickFalse)
+ break;
+ p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
+ q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
+
+ /* offset to origin in 'p'. while 'q' points to it directly */
+ r = (ssize_t) virt_width*offy + offx;
+
+ for (x=0; x < (ssize_t) image->columns; x++)
+ {
+ ssize_t
+ v;
+
+ register ssize_t
+ u;
+
+ register const double
+ *restrict k;
+
+ register const PixelPacket
+ *restrict k_pixels;
+
+ register const IndexPacket
+ *restrict k_indexes;
+
+ MagickPixelPacket
+ result;
+
+ /* Starting Defaults */
+ GetMagickPixelPacket(image,&result);
+ SetMagickPixelPacket(image,q,q_indexes,&result);
+ if ( method != VoronoiMorphology )
+ result.opacity = QuantumRange - result.opacity;
+
+ switch ( method ) {
+ case DistanceMorphology:
+ /* Add kernel Value and select the minimum value found. */
+ k = &kernel->values[ kernel->width*kernel->height-1 ];
+ k_pixels = p;
+ k_indexes = p_indexes;
+ for (v=0; v <= (ssize_t) offy; v++) {
+ for (u=0; u < (ssize_t) kernel->width; u++, k--) {
+ if ( IsNan(*k) ) continue;
+ Minimize(result.red, (*k)+k_pixels[u].red);
+ Minimize(result.green, (*k)+k_pixels[u].green);
+ Minimize(result.blue, (*k)+k_pixels[u].blue);
+ Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
+ if ( image->colorspace == CMYKColorspace)
+ Minimize(result.index, (*k)+GetIndexPixelComponent(k_indexes+u));
}
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
+ k_pixels += virt_width;
+ k_indexes += virt_width;
}
+ /* repeat with the just processed pixels of this row */
+ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
+ k_pixels = q-offx;
+ k_indexes = q_indexes-offx;
+ for (u=0; u < (ssize_t) offx; u++, k--) {
+ if ( x+u-offx < 0 ) continue; /* off the edge! */
+ if ( IsNan(*k) ) continue;
+ Minimize(result.red, (*k)+k_pixels[u].red);
+ Minimize(result.green, (*k)+k_pixels[u].green);
+ Minimize(result.blue, (*k)+k_pixels[u].blue);
+ Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
+ if ( image->colorspace == CMYKColorspace)
+ Minimize(result.index, (*k)+GetIndexPixelComponent(k_indexes+u));
+ }
break;
-
- case DilateIntensityMorphology:
- /* Select Pixel with Maximum Intensity within kernel neighbourhood
- **
- ** WARNING: the intensity test fails for CMYK and does not
- ** take into account the moderating effect of the alpha channel
- ** on the intensity (yet).
+ case VoronoiMorphology:
+ /* Apply Distance to 'Matte' channel, coping the closest color.
**
- ** NOTE for correct working of this operation for asymetrical
- ** kernels, the kernel needs to be applied in its reflected form.
- ** That is its values needs to be reversed.
+ ** This is experimental, and realy the 'alpha' component should
+ ** be completely separate 'masking' channel.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
- for (v=0; v < (ssize_t) kernel->height; v++) {
+ for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
- if ( IsNan(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
- if ( result.red == 0.0 ||
- PixelIntensity(&(k_pixels[u])) > PixelIntensity(q) ) {
- /* copy the whole pixel - no channel selection */
- *q = k_pixels[u];
- if ( result.red > 0.0 ) changed++;
- result.red = 1.0;
- }
+ if ( IsNan(*k) ) continue;
+ if( result.opacity > (*k)+k_pixels[u].opacity )
+ {
+ SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
+ &result);
+ result.opacity += *k;
+ }
}
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
+ k_pixels += virt_width;
+ k_indexes += virt_width;
}
+ /* repeat with the just processed pixels of this row */
+ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
+ k_pixels = q-offx;
+ k_indexes = q_indexes-offx;
+ for (u=0; u < (ssize_t) offx; u++, k--) {
+ if ( x+u-offx < 0 ) continue; /* off the edge! */
+ if ( IsNan(*k) ) continue;
+ if( result.opacity > (*k)+k_pixels[u].opacity )
+ {
+ SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
+ &result);
+ result.opacity += *k;
+ }
+ }
break;
+ default:
+ /* result directly calculated or assigned */
+ break;
+ }
+ /* Assign the resulting pixel values - Clamping Result */
+ switch ( method ) {
+ case VoronoiMorphology:
+ SetPixelPacket(image,&result,q,q_indexes);
+ break;
+ default:
+ if ((channel & RedChannel) != 0)
+ SetRedPixelComponent(q,ClampToQuantum(result.red));
+ if ((channel & GreenChannel) != 0)
+ SetGreenPixelComponent(q,ClampToQuantum(result.green));
+ if ((channel & BlueChannel) != 0)
+ SetBluePixelComponent(q,ClampToQuantum(result.blue));
+ if ((channel & OpacityChannel) != 0 && image->matte == MagickTrue )
+ SetOpacityPixelComponent(q,ClampToQuantum(QuantumRange-result.opacity));
+ if ((channel & IndexChannel) != 0
+ && image->colorspace == CMYKColorspace)
+ SetIndexPixelComponent(q_indexes+x,ClampToQuantum(result.index));
+ break;
+ }
+ /* Count up changed pixels */
+ if ( ( p[r].red != GetRedPixelComponent(q) )
+ || ( p[r].green != GetGreenPixelComponent(q) )
+ || ( p[r].blue != GetBluePixelComponent(q) )
+ || ( p[r].opacity != GetOpacityPixelComponent(q) )
+ || ( image->colorspace == CMYKColorspace &&
+ GetIndexPixelComponent(p_indexes+r) != GetIndexPixelComponent(q_indexes+x) ) )
+ changed++; /* The pixel was changed in some way! */
+
+ p++; /* increment pixel buffers */
+ q++;
+ } /* x */
+
+ if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
+ status=MagickFalse;
+ if (image->progress_monitor != (MagickProgressMonitor) NULL)
+ if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
+ == MagickFalse )
+ status=MagickFalse;
+
+ } /* y */
+
+ /* Do the reversed pass through the image */
+ for (y=(ssize_t)image->rows-1; y >= 0; y--)
+ {
+ register const PixelPacket
+ *restrict p;
+
+ register const IndexPacket
+ *restrict p_indexes;
+
+ register PixelPacket
+ *restrict q;
+
+ register IndexPacket
+ *restrict q_indexes;
+
+ register ssize_t
+ x;
+
+ ssize_t
+ r;
+
+ if (status == MagickFalse)
+ break;
+ /* NOTE read virtual pixels, and authentic pixels, from the same image!
+ ** we read using virtual to get virtual pixel handling, but write back
+ ** into the same image.
+ **
+ ** Only the bottom half of the kernel will be processes as we
+ ** up the image.
+ */
+ p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1,
+ exception);
+ q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
+ exception);
+ if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
+ status=MagickFalse;
+ if (status == MagickFalse)
+ break;
+ p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
+ q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
+
+ /* adjust positions to end of row */
+ p += image->columns-1;
+ q += image->columns-1;
+
+ /* offset to origin in 'p'. while 'q' points to it directly */
+ r = offx;
+
+ for (x=(ssize_t)image->columns-1; x >= 0; x--)
+ {
+ ssize_t
+ v;
+
+ register ssize_t
+ u;
+
+ register const double
+ *restrict k;
+
+ register const PixelPacket
+ *restrict k_pixels;
+
+ register const IndexPacket
+ *restrict k_indexes;
+ MagickPixelPacket
+ result;
+
+ /* Default - previously modified pixel */
+ GetMagickPixelPacket(image,&result);
+ SetMagickPixelPacket(image,q,q_indexes,&result);
+ if ( method != VoronoiMorphology )
+ result.opacity = QuantumRange - result.opacity;
+ switch ( method ) {
case DistanceMorphology:
- /* Add kernel Value and select the minimum value found.
- ** The result is a iterative distance from edge of image shape.
- **
- ** All Distance Kernels are symetrical, but that may not always
- ** be the case. For example how about a distance from left edges?
- ** To work correctly with asymetrical kernels the reflected kernel
- ** needs to be applied.
- **
- ** Actually this is really a GreyErode with a negative kernel!
- **
- */
- k = &kernel->values[ kernel->width*kernel->height-1 ];
+ /* Add kernel Value and select the minimum value found. */
+ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes;
- for (v=0; v < (ssize_t) kernel->height; v++) {
+ for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
- Minimize(result.index, (*k)+k_indexes[u]);
+ Minimize(result.index,(*k)+GetIndexPixelComponent(k_indexes+u));
}
- k_pixels += image->columns+kernel->width;
- k_indexes += image->columns+kernel->width;
+ k_pixels += virt_width;
+ k_indexes += virt_width;
}
+ /* repeat with the just processed pixels of this row */
+ k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
+ k_pixels = q-offx;
+ k_indexes = q_indexes-offx;
+ for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
+ if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
+ if ( IsNan(*k) ) continue;
+ Minimize(result.red, (*k)+k_pixels[u].red);
+ Minimize(result.green, (*k)+k_pixels[u].green);
+ Minimize(result.blue, (*k)+k_pixels[u].blue);
+ Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
+ if ( image->colorspace == CMYKColorspace)
+ Minimize(result.index, (*k)+GetIndexPixelComponent(k_indexes+u));
+ }
+ break;
+ case VoronoiMorphology:
+ /* Apply Distance to 'Matte' channel, coping the closest color.
+ **
+ ** This is experimental, and realy the 'alpha' component should
+ ** be completely separate 'masking' channel.
+ */
+ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
+ k_pixels = p;
+ k_indexes = p_indexes;
+ for (v=offy; v < (ssize_t) kernel->height; v++) {
+ for (u=0; u < (ssize_t) kernel->width; u++, k--) {
+ if ( IsNan(*k) ) continue;
+ if( result.opacity > (*k)+k_pixels[u].opacity )
+ {
+ SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
+ &result);
+ result.opacity += *k;
+ }
+ }
+ k_pixels += virt_width;
+ k_indexes += virt_width;
+ }
+ /* repeat with the just processed pixels of this row */
+ k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
+ k_pixels = q-offx;
+ k_indexes = q_indexes-offx;
+ for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
+ if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
+ if ( IsNan(*k) ) continue;
+ if( result.opacity > (*k)+k_pixels[u].opacity )
+ {
+ SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
+ &result);
+ result.opacity += *k;
+ }
+ }
break;
-
- case UndefinedMorphology:
- default:
- break; /* Do nothing */
- }
- /* Final mathematics of results (combine with original image?)
- **
- ** NOTE: Difference Morphology operators Edge* and *Hat could also
- ** be done here but works better with iteration as a image difference
- ** in the controling function (below). Thicken and Thinning however
- ** should be done here so thay can be iterated correctly.
- */
- switch ( method ) {
- case HitAndMissMorphology:
- case ErodeMorphology:
- result = min; /* minimum of neighbourhood */
- break;
- case DilateMorphology:
- result = max; /* maximum of neighbourhood */
- break;
- case ThinningMorphology:
- /* subtract pattern match from original */
- result.red -= min.red;
- result.green -= min.green;
- result.blue -= min.blue;
- result.opacity -= min.opacity;
- result.index -= min.index;
- break;
- case ThickenMorphology:
- /* Add the pattern matchs to the original */
- result.red += min.red;
- result.green += min.green;
- result.blue += min.blue;
- result.opacity += min.opacity;
- result.index += min.index;
- break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
- case UndefinedMorphology:
- case ConvolveMorphology:
- case DilateIntensityMorphology:
- case ErodeIntensityMorphology:
- break; /* full pixel was directly assigned - not a channel method */
+ case VoronoiMorphology:
+ SetPixelPacket(image,&result,q,q_indexes);
+ break;
default:
if ((channel & RedChannel) != 0)
- q->red = ClampToQuantum(result.red);
+ SetRedPixelComponent(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
- q->green = ClampToQuantum(result.green);
+ SetGreenPixelComponent(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
- q->blue = ClampToQuantum(result.blue);
- if ((channel & OpacityChannel) != 0
- && image->matte == MagickTrue )
- q->opacity = ClampToQuantum(QuantumRange-result.opacity);
+ SetBluePixelComponent(q,ClampToQuantum(result.blue));
+ if ((channel & OpacityChannel) != 0 && image->matte == MagickTrue )
+ SetOpacityPixelComponent(q,ClampToQuantum(QuantumRange-result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
- q_indexes[x] = ClampToQuantum(result.index);
+ SetIndexPixelComponent(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
- if ( ( p[r].red != q->red )
- || ( p[r].green != q->green )
- || ( p[r].blue != q->blue )
- || ( p[r].opacity != q->opacity )
+ if ( ( p[r].red != GetRedPixelComponent(q) )
+ || ( p[r].green != GetGreenPixelComponent(q) )
+ || ( p[r].blue != GetBluePixelComponent(q) )
+ || ( p[r].opacity != GetOpacityPixelComponent(q) )
|| ( image->colorspace == CMYKColorspace &&
- p_indexes[r] != q_indexes[x] ) )
+ GetIndexPixelComponent(p_indexes+r) != GetIndexPixelComponent(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
- p++;
- q++;
+
+ p--; /* go backward through pixel buffers */
+ q--;
} /* x */
- sync=SyncCacheViewAuthenticPixels(q_view,exception);
- if (sync == MagickFalse)
+ if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
- {
- MagickBooleanType
- proceed;
+ if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
+ == MagickFalse )
+ status=MagickFalse;
-#if defined(MAGICKCORE_OPENMP_SUPPORT)
- #pragma omp critical (MagickCore_MorphologyImage)
-#endif
- proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
- if (proceed == MagickFalse)
- status=MagickFalse;
- }
} /* y */
- result_image->type=image->type;
- q_view=DestroyCacheView(q_view);
- p_view=DestroyCacheView(p_view);
- return(status ? (size_t) changed : 0);
-}
+ auth_view=DestroyCacheView(auth_view);
+ virt_view=DestroyCacheView(virt_view);
+ return(status ? (ssize_t) changed : -1);
+}
+/* Apply a Morphology by calling theabove low level primitive application
+** functions. This function handles any iteration loops, composition or
+** re-iteration of results, and compound morphology methods that is based
+** on multiple low-level (staged) morphology methods.
+**
+** Basically this provides the complex grue between the requested morphology
+** method and raw low-level implementation (above).
+*/
MagickExport Image *MorphologyApply(const Image *image, const ChannelType
channel,const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,
const double bias, ExceptionInfo *exception)
{
+ CompositeOperator
+ curr_compose;
+
Image
*curr_image, /* Image we are working with or iterating */
- *work_image, /* secondary image for primative iteration */
+ *work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
*this_kernel; /* the kernel being applied */
MorphologyMethod
- primative; /* the current morphology primative being applied */
+ primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
+ special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
- method_loop, /* Loop 1: number of compound method iterations */
+ method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
- stage_loop, /* Loop 3: primative loop for compound morphology */
- stage_limit, /* how many primatives in this compound */
- kernel_loop, /* Loop 4: iterate the kernel (basic morphology) */
+ stage_loop, /* Loop 3: primitive loop for compound morphology */
+ stage_limit, /* how many primitives are in this compound */
+ kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
- count, /* total count of primative steps applied */
- changed, /* number pixels changed by last primative operation */
+ count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
+ ssize_t
+ changed; /* number pixels changed by last primitive operation */
+
char
v_info[80];
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
- count = 0; /* number of low-level morphology primatives performed */
+ count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *)NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
- kernel_limit = image->columns > image->rows ? image->columns : image->rows;
+ kernel_limit = image->columns>image->rows ? image->columns : image->rows;
- verbose = ( GetImageArtifact(image,"verbose") != (const char *) NULL ) ?
- MagickTrue : MagickFalse;
+ verbose = IsMagickTrue(GetImageArtifact(image,"verbose"));
/* initialise for cleanup */
curr_image = (Image *) image;
+ curr_compose = image->compose;
+ (void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
- * + how many primatives make up the compound morphology
+ * + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
- stage_limit = 1; /* assume method is not a compount */
+ stage_limit = 1; /* assume method is not a compound */
+ special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
- case SmoothMorphology: /* 4 primative compound morphology */
+ case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
- case OpenMorphology: /* 2 primative compound morphology */
+ case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
+ case DistanceMorphology:
+ case VoronoiMorphology:
+ special = MagickTrue;
+ break;
default:
break;
}
+ /* Apply special methods with special requirments
+ ** For example, single run only, or post-processing requirements
+ */
+ if ( special == MagickTrue )
+ {
+ rslt_image=CloneImage(image,0,0,MagickTrue,exception);
+ if (rslt_image == (Image *) NULL)
+ goto error_cleanup;
+ if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse)
+ {
+ InheritException(exception,&rslt_image->exception);
+ goto error_cleanup;
+ }
+
+ changed = MorphologyPrimitiveDirect(rslt_image, method,
+ channel, kernel, exception);
+
+ if ( verbose == MagickTrue )
+ (void) fprintf(stderr, "%s:%.20g.%.20g #%.20g => Changed %.20g\n",
+ CommandOptionToMnemonic(MagickMorphologyOptions, method),
+ 1.0,0.0,1.0, (double) changed);
+
+ if ( changed < 0 )
+ goto error_cleanup;
+
+ if ( method == VoronoiMorphology ) {
+ /* Preserve the alpha channel of input image - but turned off */
+ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
+ (void) CompositeImageChannel(rslt_image, DefaultChannels,
+ CopyOpacityCompositeOp, image, 0, 0);
+ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
+ }
+ goto exit_cleanup;
+ }
+
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
- /* Some methods require a reflected kernel to use with primatives.
+ /* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
break;
}
+ /* Loops around more primitive morpholgy methods
+ ** erose, dilate, open, close, smooth, edge, etc...
+ */
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
- /* Select primative morphology for this stage of compound method */
+ /* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
- primative = method; /* Assume method is a primative */
+ primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
- primative = ErodeMorphology;
+ primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
- primative = DilateMorphology;
+ primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
- primative = ErodeMorphology;
+ primitive = ErodeMorphology;
if ( stage_loop == 2 )
- primative = DilateMorphology;
+ primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
- primative = ErodeIntensityMorphology;
+ primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
- primative = DilateIntensityMorphology;
+ primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
- primative = DilateMorphology;
+ primitive = DilateMorphology;
if ( stage_loop == 2 )
- primative = ErodeMorphology;
+ primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
- primative = DilateIntensityMorphology;
+ primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
- primative = ErodeIntensityMorphology;
+ primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
- primative = ErodeMorphology;
+ primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
- primative = DilateMorphology;
+ primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
- primative = DilateMorphology;
+ primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
- primative = ErodeMorphology;
+ primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
- primative = DilateMorphology;
+ primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
- primative = ErodeMorphology;
+ primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
- primative = ConvolveMorphology;
+ primitive = ConvolveMorphology;
break;
default:
break;
if ( verbose == MagickTrue ) {
if ( stage_limit > 1 )
(void) FormatMagickString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ",
- MagickOptionToMnemonic(MagickMorphologyOptions,method),(double)
+ CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
- else if ( primative != method )
+ else if ( primitive != method )
(void) FormatMagickString(v_info, MaxTextExtent, "%s:%.20g -> ",
- MagickOptionToMnemonic(MagickMorphologyOptions, method),(double)
+ CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
- /* Loop 4: Iterate the kernel with primative */
+ /* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
- /* Create a destination image, if not yet defined */
+ /* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
InheritException(exception,&work_image->exception);
goto error_cleanup;
}
+ /* work_image->type=image->type; ??? */
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
- changed = MorphologyPrimitive(curr_image, work_image, primative,
- channel, this_kernel, bias, exception);
- kernel_changed += changed;
- method_changed += changed;
+ changed = MorphologyPrimitive(curr_image, work_image, primitive,
+ channel, this_kernel, bias, exception);
if ( verbose == MagickTrue ) {
if ( kernel_loop > 1 )
fprintf(stderr, "\n"); /* add end-of-line from previous */
(void) fprintf(stderr, "%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
- v_info,MagickOptionToMnemonic(MagickMorphologyOptions,
- primative),(this_kernel == rflt_kernel ) ? "*" : "",
+ v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
+ primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
+ if ( changed < 0 )
+ goto error_cleanup;
+ kernel_changed += changed;
+ method_changed += changed;
+
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
- } /* End Loop 4: Iterate the kernel with primative */
+ } /* End Loop 4: Iterate the kernel with primitive */
- if ( verbose == MagickTrue && kernel_changed != changed )
+ if ( verbose == MagickTrue && kernel_changed != (size_t)changed )
fprintf(stderr, " Total %.20g",(double) kernel_changed);
if ( verbose == MagickTrue && stage_loop < stage_limit )
fprintf(stderr, "\n"); /* add end-of-line before looping */
case BottomHatMorphology:
if ( verbose == MagickTrue )
fprintf(stderr, "\n%s: Difference with original image",
- MagickOptionToMnemonic(MagickMorphologyOptions, method) );
+ CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, image, 0, 0);
case EdgeMorphology:
if ( verbose == MagickTrue )
fprintf(stderr, "\n%s: Difference of Dilate and Erode",
- MagickOptionToMnemonic(MagickMorphologyOptions, method) );
+ CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, save_image, 0, 0);
curr_image = (Image *) image; /* continue with original image */
}
else
- { /* add the new 'current' result to the composition
+ { /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
- ** Turn off SVG composition 'alpha blending'.
- **
- ** The compose image order is specifically so that the new image can
- ** be subtarcted 'Minus' from the collected result, to allow you to
- ** convert a HitAndMiss methd into a Thinning method.
+ ** IE: Turn off SVG composition 'alpha blending'.
*/
if ( verbose == MagickTrue )
fprintf(stderr, " (compose \"%s\")",
- MagickOptionToMnemonic(MagickComposeOptions, rslt_compose) );
- (void) CompositeImageChannel(curr_image,
+ CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
+ (void) CompositeImageChannel(rslt_image,
(ChannelType) (channel & ~SyncChannels), rslt_compose,
- rslt_image, 0, 0);
- rslt_image = DestroyImage(rslt_image);
- rslt_image = curr_image;
+ curr_image, 0, 0);
+ curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if ( verbose == MagickTrue )
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
- if ( curr_image != (Image *) NULL &&
- curr_image != rslt_image &&
- curr_image != image )
- curr_image = DestroyImage(curr_image);
+ if ( curr_image == rslt_image )
+ curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
- if ( curr_image != (Image *) NULL &&
- curr_image != rslt_image &&
- curr_image != image )
+ if ( curr_image == rslt_image || curr_image == image )
+ curr_image = (Image *) NULL;
+ if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
+
\f
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
const ChannelType channel,const MorphologyMethod method,
const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception)
{
- const char
- *artifact;
-
KernelInfo
*curr_kernel;
curr_kernel = (KernelInfo *) kernel;
if ( method == ConvolveMorphology || method == CorrelateMorphology )
{
+ const char
+ *artifact;
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *)NULL ) {
if ( curr_kernel == kernel )
}
/* display the (normalized) kernel via stderr */
- artifact = GetImageArtifact(image,"showkernel");
- if ( artifact == (const char *) NULL)
- artifact = GetImageArtifact(image,"convolve:showkernel");
- if ( artifact == (const char *) NULL)
- artifact = GetImageArtifact(image,"morphology:showkernel");
- if ( artifact != (const char *) NULL)
+ if ( IsMagickTrue(GetImageArtifact(image,"showkernel"))
+ || IsMagickTrue(GetImageArtifact(image,"convolve:showkernel"))
+ || IsMagickTrue(GetImageArtifact(image,"morphology:showkernel")) )
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
- compose = UndefinedCompositeOp; /* use default for method */
- artifact = GetImageArtifact(image,"morphology:compose");
- if ( artifact != (const char *) NULL)
- compose = (CompositeOperator) ParseMagickOption(
+ { const char
+ *artifact;
+ artifact = GetImageArtifact(image,"morphology:compose");
+ compose = UndefinedCompositeOp; /* use default for method */
+ if ( artifact != (const char *) NULL)
+ compose = (CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,artifact);
-
+ }
/* Apply the Morphology */
morphology_image = MorphologyApply(image, channel, method, iterations,
curr_kernel, compose, image->bias, exception);
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
-% The format of the ScaleKernelInfo method is:
+% The format of the ScaleGeometryKernelInfo method is:
%
-% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
-% const MagickStatusType normalize_flags )
+% void ScaleGeometryKernelInfo(KernelInfo *kernel,
+% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
-% values seperatally to those of the negative values, so the kernel will be
+% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
if ( kernel->next != (KernelInfo *) NULL )
fprintf(stderr, " #%lu", (unsigned long) c );
fprintf(stderr, " \"%s",
- MagickOptionToMnemonic(MagickKernelOptions, k->type) );
+ CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) > MagickEpsilon )
fprintf(stderr, "@%lg", k->angle);
fprintf(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long) k->width,