[ VIGRA Homepage | Class Index | Function Index | File Index | Main Page ]
![]() |
vigra/stdconvolution.hxx | ![]() |
---|
00001 /************************************************************************/ 00002 /* */ 00003 /* Copyright 1998-2002 by Ullrich Koethe */ 00004 /* Cognitive Systems Group, University of Hamburg, Germany */ 00005 /* */ 00006 /* This file is part of the VIGRA computer vision library. */ 00007 /* ( Version 1.5.0, Dec 07 2006 ) */ 00008 /* The VIGRA Website is */ 00009 /* http://kogs-www.informatik.uni-hamburg.de/~koethe/vigra/ */ 00010 /* Please direct questions, bug reports, and contributions to */ 00011 /* koethe@informatik.uni-hamburg.de or */ 00012 /* vigra@kogs1.informatik.uni-hamburg.de */ 00013 /* */ 00014 /* Permission is hereby granted, free of charge, to any person */ 00015 /* obtaining a copy of this software and associated documentation */ 00016 /* files (the "Software"), to deal in the Software without */ 00017 /* restriction, including without limitation the rights to use, */ 00018 /* copy, modify, merge, publish, distribute, sublicense, and/or */ 00019 /* sell copies of the Software, and to permit persons to whom the */ 00020 /* Software is furnished to do so, subject to the following */ 00021 /* conditions: */ 00022 /* */ 00023 /* The above copyright notice and this permission notice shall be */ 00024 /* included in all copies or substantial portions of the */ 00025 /* Software. */ 00026 /* */ 00027 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */ 00028 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */ 00029 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */ 00030 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */ 00031 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */ 00032 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */ 00033 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */ 00034 /* OTHER DEALINGS IN THE SOFTWARE. */ 00035 /* */ 00036 /************************************************************************/ 00037 00038 00039 #ifndef VIGRA_STDCONVOLUTION_HXX 00040 #define VIGRA_STDCONVOLUTION_HXX 00041 00042 #include <cmath> 00043 #include "stdimage.hxx" 00044 #include "bordertreatment.hxx" 00045 #include "separableconvolution.hxx" 00046 #include "utilities.hxx" 00047 #include "sized_int.hxx" 00048 00049 namespace vigra { 00050 00051 template <class SrcIterator, class SrcAccessor, 00052 class DestIterator, class DestAccessor, 00053 class KernelIterator, class KernelAccessor, 00054 class KSumType> 00055 void internalPixelEvaluationByClip(int x, int y, int w, int h, SrcIterator xs, 00056 SrcAccessor src_acc, DestIterator xd, DestAccessor dest_acc, 00057 KernelIterator ki, Diff2D kul, Diff2D klr, KernelAccessor ak, 00058 KSumType norm) 00059 { 00060 typedef typename 00061 NumericTraits<typename SrcAccessor::value_type>::RealPromote SumType; 00062 typedef 00063 NumericTraits<typename DestAccessor::value_type> DestTraits; 00064 00065 // calculate width and height of the kernel 00066 int kernel_width = klr.x - kul.x + 1; 00067 int kernel_height = klr.y - kul.y + 1; 00068 00069 SumType sum = NumericTraits<SumType>::zero(); 00070 int xx, yy; 00071 int x0, y0, x1, y1; 00072 00073 y0 = (y<klr.y) ? -y : -klr.y; 00074 y1 = (h-y-1<-kul.y) ? h-y-1 : -kul.y; 00075 00076 x0 = (x<klr.x) ? -x : -klr.x; 00077 x1 = (w-x-1<-kul.x) ? w-x-1 : -kul.x; 00078 00079 SrcIterator yys = xs + Diff2D(x0, y0); 00080 KernelIterator yk = ki - Diff2D(x0, y0); 00081 00082 KSumType ksum = NumericTraits<KSumType>::zero(); 00083 kernel_width = x1 - x0 + 1; 00084 kernel_height = y1 - y0 + 1; 00085 00086 //es wird zuerst abgeschnitten und dann gespigelt! 00087 00088 for(yy=0; yy<kernel_height; ++yy, ++yys.y, --yk.y) 00089 { 00090 SrcIterator xxs = yys; 00091 KernelIterator xk = yk; 00092 00093 for(xx=0; xx<kernel_width; ++xx, ++xxs.x, --xk.x) 00094 { 00095 sum += ak(xk) * src_acc(xxs); 00096 ksum += ak(xk); 00097 } 00098 } 00099 00100 // store average in destination pixel 00101 dest_acc.set(DestTraits::fromRealPromote((norm / ksum) * sum), xd); 00102 00103 } 00104 00105 00106 #if 0 00107 00108 template <class SrcIterator, class SrcAccessor, 00109 class DestIterator, class DestAccessor, 00110 class KernelIterator, class KernelAccessor> 00111 void internalPixelEvaluationByWrapReflectRepeat(int x, int y, int src_width, int src_height, SrcIterator xs, 00112 SrcAccessor src_acc, DestIterator xd, DestAccessor dest_acc, 00113 KernelIterator ki, Diff2D kul, Diff2D klr, KernelAccessor ak, 00114 BorderTreatmentMode border) 00115 { 00116 00117 typedef typename 00118 NumericTraits<typename SrcAccessor::value_type>::RealPromote SumType; 00119 typedef 00120 NumericTraits<typename DestAccessor::value_type> DestTraits; 00121 00122 SumType sum = NumericTraits<SumType>::zero(); 00123 00124 SrcIterator src_ul = xs - Diff2D(x, y); 00125 SrcIterator src_lr = src_ul + Diff2D(src_width, src_height); 00126 00127 SrcIterator yys = xs; 00128 KernelIterator yk = ki; 00129 00130 // calculate width and height of the kernel 00131 int kernel_width = klr.x - kul.x + 1; 00132 int kernel_height = klr.y - kul.y + 1; 00133 00134 // where the kernel is beyond the borders: 00135 bool top_to_much = (y<klr.y) ? true : false; 00136 bool down_to_much = (src_height-y-1<-kul.y)? true : false; 00137 bool left_to_much = (x<klr.x)? true : false; 00138 bool right_to_much = (src_width-x-1<-kul.x)? true : false; 00139 00140 // direction of iteration, 00141 // e.g. (-1, +1) for ll->ur or (-1, -1) for lr->ul 00142 Diff2D way_increment; 00143 00144 /* Iteration is always done from valid to invalid range. 00145 The following tuple is composed as such: 00146 - If an invalid range is reached while iterating in X, 00147 a jump of border_increment.first is performed and 00148 border_increment.third is used for further iterating. 00149 - If an invalid range is reached while iterating in Y, 00150 a jump of border_increment.second is performed and 00151 border_increment.fourth is used for further iterating. 00152 */ 00153 tuple4<int, int, int, int> border_increment; 00154 if (border == BORDER_TREATMENT_REPEAT){ 00155 border_increment = tuple4<int, int, int, int>(1, 1, 0, 0); 00156 }else if (border == BORDER_TREATMENT_REFLECT){ 00157 border_increment = tuple4<int, int, int, int>(2, 2, -1, -1); 00158 }else{ // BORDER_TREATMENT_WRAP 00159 border_increment = tuple4<int, int, int, int>(src_width, src_height, 1, 1); 00160 } 00161 00162 pair<int, int> valid_step_count; 00163 00164 if(left_to_much && !top_to_much && !down_to_much) 00165 { 00166 yys += klr; 00167 yk += kul; 00168 way_increment = Diff2D(-1, -1); 00169 border_increment.third = -border_increment.third; 00170 border_increment.fourth = -border_increment.fourth; 00171 valid_step_count = std::make_pair((yys - src_ul).x + 1, kernel_height); 00172 } 00173 else if(top_to_much && !left_to_much && !right_to_much) 00174 { 00175 yys += klr; 00176 yk += kul; 00177 way_increment = Diff2D(-1, -1); 00178 border_increment.third = -border_increment.third; 00179 border_increment.fourth = -border_increment.fourth; 00180 valid_step_count = std::make_pair(kernel_width, (yys - src_ul).y + 1); 00181 } 00182 else if(right_to_much && !top_to_much && !down_to_much) 00183 { 00184 yys += kul; 00185 yk += klr; 00186 way_increment = Diff2D(1, 1); 00187 border_increment.first = -border_increment.first; 00188 border_increment.second = -border_increment.second; 00189 valid_step_count = std::make_pair((src_lr - yys).x, kernel_height); 00190 } 00191 else if(down_to_much && !left_to_much && !right_to_much) 00192 { 00193 yys += kul; 00194 yk += klr; 00195 way_increment = Diff2D(1, 1); 00196 border_increment.first = -border_increment.first; 00197 border_increment.second = -border_increment.second; 00198 valid_step_count = std::make_pair(kernel_width, (src_lr - yys).y); 00199 } 00200 else if(down_to_much && left_to_much) 00201 { 00202 yys += kul + Diff2D(kernel_width - 1, 0); 00203 yk += kul + Diff2D(0, kernel_height - 1); 00204 way_increment = Diff2D(-1, 1); 00205 border_increment.second = -border_increment.second; 00206 border_increment.third = -border_increment.third; 00207 valid_step_count = std::make_pair((yys - src_ul).x + 1, (src_lr - yys).y); 00208 } 00209 else if(down_to_much && right_to_much) 00210 { 00211 yys += kul; 00212 yk += klr; 00213 way_increment = Diff2D(1, 1); 00214 border_increment.first = -border_increment.first; 00215 border_increment.second = -border_increment.second; 00216 valid_step_count = std::make_pair((src_lr - yys).x, (src_lr - yys).y); 00217 } 00218 else if(top_to_much && left_to_much) 00219 { 00220 yys += klr; 00221 yk += kul; 00222 way_increment = Diff2D(-1, -1); 00223 border_increment.third = -border_increment.third; 00224 border_increment.fourth = -border_increment.fourth; 00225 valid_step_count = std::make_pair((yys - src_ul).x + 1, (yys - src_ul).y + 1); 00226 } 00227 else 00228 { //top_to_much && right_to_much 00229 yys += kul + Diff2D(0, kernel_height - 1); 00230 yk += kul + Diff2D(kernel_width - 1, 0); 00231 way_increment = Diff2D(1, -1); 00232 border_increment.first = -border_increment.first; 00233 border_increment.fourth = -border_increment.fourth; 00234 valid_step_count = std::make_pair((src_lr - yys).x, (yys - src_ul).y + 1); 00235 } 00236 00237 int yy = 0, xx; 00238 00239 //laeuft den zulässigen Bereich in y-Richtung durch 00240 for(; yy < valid_step_count.second; ++yy, yys.y += way_increment.y, yk.y -= way_increment.y ) 00241 { 00242 SrcIterator xxs = yys; 00243 KernelIterator xk = yk; 00244 00245 //laeuft den zulässigen Bereich in x-Richtung durch 00246 for(xx = 0; xx < valid_step_count.first; ++xx, xxs.x += way_increment.x, xk.x -= way_increment.x) 00247 { 00248 sum += ak(xk) * src_acc(xxs); 00249 } 00250 00251 //Nächstes ++xxs.x wuerde in unzulässigen Bereich 00252 //bringen => Sprung in zulaessigen Bereich 00253 xxs.x += border_increment.first; 00254 00255 for( ; xx < kernel_width; ++xx, xxs.x += border_increment.third, xk.x -= way_increment.x ) 00256 { 00257 sum += ak(xk) * src_acc(xxs); 00258 } 00259 } 00260 00261 //Nächstes ++yys.y wuerde in unzulässigen Bereich 00262 //bringen => Sprung in zulaessigen Bereich 00263 yys.y += border_increment.second; 00264 00265 for( ; yy < kernel_height; ++yy, yys.y += border_increment.third, yk.y -= way_increment.y) 00266 { 00267 SrcIterator xxs = yys; 00268 KernelIterator xk = yk; 00269 00270 for(xx=0; xx < valid_step_count.first; ++xx, xxs.x += way_increment.x, xk.x -= way_increment.x) 00271 { 00272 sum += ak(xk) * src_acc(xxs); 00273 } 00274 00275 //Sprung in den zulaessigen Bereich 00276 xxs.x += border_increment.first; 00277 00278 for( ; xx < kernel_width; ++xx, xxs.x += border_increment.third, xk.x -= way_increment.x ) 00279 { 00280 sum += ak(xk) * src_acc(xxs); 00281 } 00282 } 00283 00284 // store average in destination pixel 00285 dest_acc.set(DestTraits::fromRealPromote(sum), xd); 00286 00287 }// end of internalPixelEvaluationByWrapReflectRepeat 00288 #endif /* #if 0 */ 00289 00290 00291 template <class SrcIterator, class SrcAccessor, 00292 class KernelIterator, class KernelAccessor, 00293 class SumType> 00294 void 00295 internalPixelEvaluationByWrapReflectRepeat(SrcIterator xs, SrcAccessor src_acc, 00296 KernelIterator xk, KernelAccessor ak, 00297 int left, int right, int kleft, int kright, 00298 int borderskipx, int borderinc, SumType & sum) 00299 { 00300 SrcIterator xxs = xs + left; 00301 KernelIterator xxk = xk - left; 00302 00303 for(int xx = left; xx <= right; ++xx, ++xxs, --xxk) 00304 { 00305 sum += ak(xxk) * src_acc(xxs); 00306 } 00307 00308 xxs = xs + left - borderskipx; 00309 xxk = xk - left + 1; 00310 for(int xx = left - 1; xx >= -kright; --xx, xxs -= borderinc, ++xxk) 00311 { 00312 sum += ak(xxk) * src_acc(xxs); 00313 } 00314 00315 xxs = xs + right + borderskipx; 00316 xxk = xk - right - 1; 00317 for(int xx = right + 1; xx <= -kleft; ++xx, xxs += borderinc, --xxk) 00318 { 00319 sum += ak(xxk) * src_acc(xxs); 00320 } 00321 } 00322 00323 00324 /** \addtogroup StandardConvolution Two-dimensional convolution functions 00325 00326 Perform 2D non-separable convolution, with and without ROI mask. 00327 00328 These generic convolution functions implement 00329 the standard 2D convolution operation for images that fit 00330 into the required interface. Arbitrary ROI's are supported 00331 by the mask version of the algorithm. 00332 The functions need a suitable 2D kernel to operate. 00333 */ 00334 //@{ 00335 00336 /** \brief Performs a 2 dimensional convolution of the source image using the given 00337 kernel. 00338 00339 The KernelIterator must point to the center of the kernel, and 00340 the kernel's size is given by its upper left (x and y of distance <= 0) and 00341 lower right (distance >= 0) corners. The image must always be larger than the 00342 kernel. At those positions where the kernel does not completely fit 00343 into the image, the specified \ref BorderTreatmentMode is 00344 applied. You can choice between following BorderTreatmentModes: 00345 <ul> 00346 <li>BORDER_TREATMENT_CLIP</li> 00347 <li>BORDER_TREATMENT_AVOID</li> 00348 <li>BORDER_TREATMENT_WRAP</li> 00349 <li>BORDER_TREATMENT_REFLECT</li> 00350 <li>BORDER_TREATMENT_REPEAT</li> 00351 </ul><br> 00352 The images's pixel type (SrcAccessor::value_type) must be a 00353 linear space over the kernel's value_type (KernelAccessor::value_type), 00354 i.e. addition of source values, multiplication with kernel values, 00355 and NumericTraits must be defined. 00356 The kernel's value_type must be an algebraic field, 00357 i.e. the arithmetic operations (+, -, *, /) and NumericTraits must 00358 be defined. 00359 00360 <b> Declarations:</b> 00361 00362 pass arguments explicitly: 00363 \code 00364 namespace vigra { 00365 template <class SrcIterator, class SrcAccessor, 00366 class DestIterator, class DestAccessor, 00367 class KernelIterator, class KernelAccessor> 00368 void convolveImage(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00369 DestIterator dest_ul, DestAccessor dest_acc, 00370 KernelIterator ki, KernelAccessor ak, 00371 Diff2D kul, Diff2D klr, BorderTreatmentMode border); 00372 } 00373 \endcode 00374 00375 00376 use argument objects in conjunction with \ref ArgumentObjectFactories: 00377 \code 00378 namespace vigra { 00379 template <class SrcIterator, class SrcAccessor, 00380 class DestIterator, class DestAccessor, 00381 class KernelIterator, class KernelAccessor> 00382 void convolveImage(triple<SrcIterator, SrcIterator, SrcAccessor> src, 00383 pair<DestIterator, DestAccessor> dest, 00384 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00385 BorderTreatmentMode> kernel); 00386 } 00387 \endcode 00388 00389 <b> Usage:</b> 00390 00391 <b>\#include</b> "<a href="stdconvolution_8hxx-source.html">vigra/stdconvolution.hxx</a>"<br> 00392 Namespace: vigra 00393 00394 00395 \code 00396 vigra::FImage src(w,h), dest(w,h); 00397 ... 00398 00399 // define horizontal Sobel filter 00400 vigra::Kernel2D<float> sobel; 00401 00402 sobel.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = // upper left and lower right 00403 0.125, 0.0, -0.125, 00404 0.25, 0.0, -0.25, 00405 0.125, 0.0, -0.125; 00406 00407 vigra::convolveImage(srcImageRange(src), destImage(dest), kernel2d(sobel)); 00408 \endcode 00409 00410 <b> Required Interface:</b> 00411 00412 \code 00413 ImageIterator src_ul, src_lr; 00414 ImageIterator dest_ul; 00415 ImageIterator ik; 00416 00417 SrcAccessor src_accessor; 00418 DestAccessor dest_accessor; 00419 KernelAccessor kernel_accessor; 00420 00421 NumericTraits<SrcAccessor::value_type>::RealPromote s = src_accessor(src_ul); 00422 00423 s = s + s; 00424 s = kernel_accessor(ik) * s; 00425 s -= s; 00426 00427 dest_accessor.set( 00428 NumericTraits<DestAccessor::value_type>::fromRealPromote(s), dest_ul); 00429 00430 NumericTraits<KernelAccessor::value_type>::RealPromote k = kernel_accessor(ik); 00431 00432 k += k; 00433 k -= k; 00434 k = k / k; 00435 00436 \endcode 00437 00438 <b> Preconditions:</b> 00439 00440 \code 00441 kul.x <= 0 00442 kul.y <= 0 00443 klr.x >= 0 00444 klr.y >= 0 00445 src_lr.x - src_ul.x >= klr.x + kul.x + 1 00446 src_lr.y - src_ul.y >= klr.y + kul.y + 1 00447 \endcode 00448 00449 If border == BORDER_TREATMENT_CLIP: Sum of kernel elements must be 00450 != 0. 00451 00452 */ 00453 template <class SrcIterator, class SrcAccessor, 00454 class DestIterator, class DestAccessor, 00455 class KernelIterator, class KernelAccessor> 00456 void convolveImage(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00457 DestIterator dest_ul, DestAccessor dest_acc, 00458 KernelIterator ki, KernelAccessor ak, 00459 Diff2D kul, Diff2D klr, BorderTreatmentMode border) 00460 { 00461 vigra_precondition((border == BORDER_TREATMENT_CLIP || 00462 border == BORDER_TREATMENT_AVOID || 00463 border == BORDER_TREATMENT_REFLECT || 00464 border == BORDER_TREATMENT_REPEAT || 00465 border == BORDER_TREATMENT_WRAP), 00466 "convolveImage():\n" 00467 " Border treatment must be one of follow treatments:\n" 00468 " - BORDER_TREATMENT_CLIP\n" 00469 " - BORDER_TREATMENT_AVOID\n" 00470 " - BORDER_TREATMENT_REFLECT\n" 00471 " - BORDER_TREATMENT_REPEAT\n" 00472 " - BORDER_TREATMENT_WRAP\n"); 00473 00474 vigra_precondition(kul.x <= 0 && kul.y <= 0, 00475 "convolveImage(): coordinates of " 00476 "kernel's upper left must be <= 0."); 00477 vigra_precondition(klr.x >= 0 && klr.y >= 0, 00478 "convolveImage(): coordinates of " 00479 "kernel's lower right must be >= 0."); 00480 00481 // use traits to determine SumType as to prevent possible overflow 00482 typedef typename 00483 PromoteTraits<typename SrcAccessor::value_type, 00484 typename KernelAccessor::value_type>::Promote SumType; 00485 typedef typename 00486 NumericTraits<typename KernelAccessor::value_type>::RealPromote KernelSumType; 00487 typedef 00488 NumericTraits<typename DestAccessor::value_type> DestTraits; 00489 00490 // calculate width and height of the image 00491 int w = src_lr.x - src_ul.x; 00492 int h = src_lr.y - src_ul.y; 00493 00494 // calculate width and height of the kernel 00495 int kernel_width = klr.x - kul.x + 1; 00496 int kernel_height = klr.y - kul.y + 1; 00497 00498 vigra_precondition(w >= kernel_width && h >= kernel_height, 00499 "convolveImage(): kernel larger than image."); 00500 00501 KernelSumType norm = NumericTraits<KernelSumType>::zero(); 00502 if(border == BORDER_TREATMENT_CLIP) 00503 { 00504 // calculate the sum of the kernel elements for renormalization 00505 KernelIterator yk = ki + klr; 00506 00507 // determine sum within kernel (= norm) 00508 for(int y = 0; y < kernel_height; ++y, --yk.y) 00509 { 00510 KernelIterator xk = yk; 00511 for(int x = 0; x < kernel_width; ++x, --xk.x) 00512 { 00513 norm += ak(xk); 00514 } 00515 } 00516 vigra_precondition(norm != NumericTraits<KernelSumType>::zero(), 00517 "convolveImage(): Cannot use BORDER_TREATMENT_CLIP with a DC-free kernel"); 00518 } 00519 00520 // create iterators for the interior part of the image (where the kernel always fits into the image) 00521 DestIterator yd = dest_ul + Diff2D(klr.x, klr.y); 00522 SrcIterator ys = src_ul + Diff2D(klr.x, klr.y); 00523 SrcIterator send = src_lr + Diff2D(kul.x, kul.y); 00524 00525 // iterate over the interior part 00526 for(; ys.y < send.y; ++ys.y, ++yd.y) 00527 { 00528 // create x iterators 00529 DestIterator xd(yd); 00530 SrcIterator xs(ys); 00531 00532 for(; xs.x < send.x; ++xs.x, ++xd.x) 00533 { 00534 // init the sum 00535 SumType sum = NumericTraits<SumType>::zero(); 00536 00537 SrcIterator yys = xs - klr; 00538 SrcIterator yyend = xs - kul; 00539 KernelIterator yk = ki + klr; 00540 00541 for(; yys.y <= yyend.y; ++yys.y, --yk.y) 00542 { 00543 typename SrcIterator::row_iterator xxs = yys.rowIterator(); 00544 typename SrcIterator::row_iterator xxe = xxs + kernel_width; 00545 typename KernelIterator::row_iterator xk = yk.rowIterator(); 00546 00547 for(; xxs < xxe; ++xxs, --xk) 00548 { 00549 sum += ak(xk) * src_acc(xxs); 00550 } 00551 } 00552 00553 // store convolution result in destination pixel 00554 dest_acc.set(DestTraits::fromRealPromote(sum), xd); 00555 } 00556 } 00557 00558 if(border == BORDER_TREATMENT_AVOID) 00559 return; // skip processing near the border 00560 00561 int interiorskip = w + kul.x - klr.x - 1; 00562 int borderskipx; 00563 int borderskipy; 00564 int borderinc; 00565 if(border == BORDER_TREATMENT_REPEAT) 00566 { 00567 borderskipx = 0; 00568 borderskipy = 0; 00569 borderinc = 0; 00570 } 00571 else if(border == BORDER_TREATMENT_REFLECT) 00572 { 00573 borderskipx = -1; 00574 borderskipy = -1; 00575 borderinc = -1; 00576 } 00577 else if(border == BORDER_TREATMENT_WRAP) 00578 { 00579 borderskipx = -w+1; 00580 borderskipy = -h+1; 00581 borderinc = 1; 00582 } 00583 00584 // create iterators for the entire image 00585 yd = dest_ul; 00586 ys = src_ul; 00587 00588 // work on entire image (but skip the already computed points in the loop) 00589 for(int y = 0; y < h; ++y, ++ys.y, ++yd.y) 00590 { 00591 int top = std::max(static_cast<IntBiggest>(-klr.y), 00592 static_cast<IntBiggest>(src_ul.y - ys.y)); 00593 int bottom = std::min(static_cast<IntBiggest>(-kul.y), 00594 static_cast<IntBiggest>(src_lr.y - ys.y - 1)); 00595 00596 // create x iterators 00597 DestIterator xd(yd); 00598 SrcIterator xs(ys); 00599 00600 for(int x = 0; x < w; ++x, ++xs.x, ++xd.x) 00601 { 00602 // check if we are away from the border 00603 if(y >= klr.y && y < h+kul.y && x == klr.x) 00604 { 00605 // yes => skip the already computed points 00606 x += interiorskip; 00607 xs.x += interiorskip; 00608 xd.x += interiorskip; 00609 continue; 00610 } 00611 if (border == BORDER_TREATMENT_CLIP) 00612 { 00613 internalPixelEvaluationByClip(x, y, w, h, xs, src_acc, xd, dest_acc, ki, kul, klr, ak, norm); 00614 } 00615 else 00616 { 00617 int left = std::max(-klr.x, src_ul.x - xs.x); 00618 int right = std::min(-kul.x, src_lr.x - xs.x - 1); 00619 00620 // init the sum 00621 SumType sum = NumericTraits<SumType>::zero(); 00622 00623 // create iterators for the part of the kernel that fits into the image 00624 SrcIterator yys = xs + Size2D(0, top); 00625 KernelIterator yk = ki - Size2D(0, top); 00626 00627 int yy; 00628 for(yy = top; yy <= bottom; ++yy, ++yys.y, --yk.y) 00629 { 00630 internalPixelEvaluationByWrapReflectRepeat(yys.rowIterator(), src_acc, yk.rowIterator(), ak, 00631 left, right, kul.x, klr.x, borderskipx, borderinc, sum); 00632 } 00633 yys = xs + Size2D(0, top - borderskipy); 00634 yk = ki - Size2D(0, top - 1); 00635 for(yy = top - 1; yy >= -klr.y; --yy, yys.y -= borderinc, ++yk.y) 00636 { 00637 internalPixelEvaluationByWrapReflectRepeat(yys.rowIterator(), src_acc, yk.rowIterator(), ak, 00638 left, right, kul.x, klr.x, borderskipx, borderinc, sum); 00639 } 00640 yys = xs + Size2D(0, bottom + borderskipy); 00641 yk = ki - Size2D(0, bottom + 1); 00642 for(yy = bottom + 1; yy <= -kul.y; ++yy, yys.y += borderinc, --yk.y) 00643 { 00644 internalPixelEvaluationByWrapReflectRepeat(yys.rowIterator(), src_acc, yk.rowIterator(), ak, 00645 left, right, kul.x, klr.x, borderskipx, borderinc, sum); 00646 } 00647 00648 // store convolution result in destination pixel 00649 dest_acc.set(DestTraits::fromRealPromote(sum), xd); 00650 00651 // internalPixelEvaluationByWrapReflectRepeat(x, y, w, h, xs, src_acc, xd, dest_acc, ki, kul, klr, ak, border); 00652 } 00653 } 00654 } 00655 } 00656 00657 00658 template <class SrcIterator, class SrcAccessor, 00659 class DestIterator, class DestAccessor, 00660 class KernelIterator, class KernelAccessor> 00661 inline 00662 void convolveImage( 00663 triple<SrcIterator, SrcIterator, SrcAccessor> src, 00664 pair<DestIterator, DestAccessor> dest, 00665 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00666 BorderTreatmentMode> kernel) 00667 { 00668 convolveImage(src.first, src.second, src.third, 00669 dest.first, dest.second, 00670 kernel.first, kernel.second, kernel.third, 00671 kernel.fourth, kernel.fifth); 00672 } 00673 00674 00675 /** \brief Performs a 2-dimensional normalized convolution, i.e. convolution with a mask image. 00676 00677 This functions computes 00678 <a href ="http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/PIRODDI1/NormConv/NormConv.html">normalized 00679 convolution</a> as defined in 00680 Knutsson, H. and Westin, C-F.: <i>Normalized and differential convolution: 00681 Methods for Interpolation and Filtering of incomplete and uncertain data</i>. 00682 Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition, 1993, 515-523. 00683 00684 The mask image must be binary and encodes which pixels of the original image 00685 are valid. It is used as follows: 00686 Only pixel under the mask are used in the calculations. Whenever a part of the 00687 kernel lies outside the mask, it is ignored, and the kernel is renormalized to its 00688 original norm (analogous to the CLIP \ref BorderTreatmentMode). Thus, a useful convolution 00689 result is computed whenever <i>at least one valid pixel is within the current window</i> 00690 Thus, destination pixels not under the mask still receive a value if they are <i>near</i> 00691 the mask. Therefore, this algorithm is useful as an interpolator of sparse input data. 00692 If you are only interested in the destination values under the mask, you can perform 00693 a subsequent \ref copyImageIf(). 00694 00695 The KernelIterator must point to the center of the kernel, and 00696 the kernel's size is given by its upper left (x and y of distance <= 0) and 00697 lower right (distance >= 0) corners. The image must always be larger than the 00698 kernel. At those positions where the kernel does not completely fit 00699 into the image, the specified \ref BorderTreatmentMode is 00700 applied. Only BORDER_TREATMENT_CLIP and BORDER_TREATMENT_AVOID are currently 00701 supported. 00702 00703 The images's pixel type (SrcAccessor::value_type) must be a 00704 linear space over the kernel's value_type (KernelAccessor::value_type), 00705 i.e. addition of source values, multiplication with kernel values, 00706 and NumericTraits must be defined. 00707 The kernel's value_type must be an algebraic field, 00708 i.e. the arithmetic operations (+, -, *, /) and NumericTraits must 00709 be defined. 00710 00711 <b> Declarations:</b> 00712 00713 pass arguments explicitly: 00714 \code 00715 namespace vigra { 00716 template <class SrcIterator, class SrcAccessor, 00717 class MaskIterator, class MaskAccessor, 00718 class DestIterator, class DestAccessor, 00719 class KernelIterator, class KernelAccessor> 00720 void 00721 normalizedConvolveImage(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00722 MaskIterator mul, MaskAccessor am, 00723 DestIterator dest_ul, DestAccessor dest_acc, 00724 KernelIterator ki, KernelAccessor ak, 00725 Diff2D kul, Diff2D klr, BorderTreatmentMode border); 00726 } 00727 \endcode 00728 00729 00730 use argument objects in conjunction with \ref ArgumentObjectFactories: 00731 \code 00732 namespace vigra { 00733 template <class SrcIterator, class SrcAccessor, 00734 class MaskIterator, class MaskAccessor, 00735 class DestIterator, class DestAccessor, 00736 class KernelIterator, class KernelAccessor> 00737 inline 00738 void normalizedConvolveImage(triple<SrcIterator, SrcIterator, SrcAccessor> src, 00739 pair<MaskIterator, MaskAccessor> mask, 00740 pair<DestIterator, DestAccessor> dest, 00741 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00742 BorderTreatmentMode> kernel); 00743 } 00744 \endcode 00745 00746 <b> Usage:</b> 00747 00748 <b>\#include</b> "<a href="stdconvolution_8hxx-source.html">vigra/stdconvolution.hxx</a>"<br> 00749 Namespace: vigra 00750 00751 00752 \code 00753 vigra::FImage src(w,h), dest(w,h); 00754 vigra::CImage mask(w,h); 00755 ... 00756 00757 // define 3x3 binomial filter 00758 vigra::Kernel2D<float> binom; 00759 00760 binom.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = // upper left and lower right 00761 0.0625, 0.125, 0.0625, 00762 0.125, 0.25, 0.125, 00763 0.0625, 0.125, 0.0625; 00764 00765 vigra::normalizedConvolveImage(srcImageRange(src), maskImage(mask), destImage(dest), kernel2d(binom)); 00766 \endcode 00767 00768 <b> Required Interface:</b> 00769 00770 \code 00771 ImageIterator src_ul, src_lr; 00772 ImageIterator mul; 00773 ImageIterator dest_ul; 00774 ImageIterator ik; 00775 00776 SrcAccessor src_accessor; 00777 MaskAccessor mask_accessor; 00778 DestAccessor dest_accessor; 00779 KernelAccessor kernel_accessor; 00780 00781 NumericTraits<SrcAccessor::value_type>::RealPromote s = src_accessor(src_ul); 00782 00783 s = s + s; 00784 s = kernel_accessor(ik) * s; 00785 s -= s; 00786 00787 if(mask_accessor(mul)) ...; 00788 00789 dest_accessor.set( 00790 NumericTraits<DestAccessor::value_type>::fromRealPromote(s), dest_ul); 00791 00792 NumericTraits<KernelAccessor::value_type>::RealPromote k = kernel_accessor(ik); 00793 00794 k += k; 00795 k -= k; 00796 k = k / k; 00797 00798 \endcode 00799 00800 <b> Preconditions:</b> 00801 00802 \code 00803 kul.x <= 0 00804 kul.y <= 0 00805 klr.x >= 0 00806 klr.y >= 0 00807 src_lr.x - src_ul.x >= klr.x + kul.x + 1 00808 src_lr.y - src_ul.y >= klr.y + kul.y + 1 00809 border == BORDER_TREATMENT_CLIP || border == BORDER_TREATMENT_AVOID 00810 \endcode 00811 00812 Sum of kernel elements must be != 0. 00813 00814 */ 00815 template <class SrcIterator, class SrcAccessor, 00816 class DestIterator, class DestAccessor, 00817 class MaskIterator, class MaskAccessor, 00818 class KernelIterator, class KernelAccessor> 00819 void 00820 normalizedConvolveImage(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00821 MaskIterator mul, MaskAccessor am, 00822 DestIterator dest_ul, DestAccessor dest_acc, 00823 KernelIterator ki, KernelAccessor ak, 00824 Diff2D kul, Diff2D klr, BorderTreatmentMode border) 00825 { 00826 vigra_precondition((border == BORDER_TREATMENT_CLIP || 00827 border == BORDER_TREATMENT_AVOID), 00828 "normalizedConvolveImage(): " 00829 "Border treatment must be BORDER_TREATMENT_CLIP or BORDER_TREATMENT_AVOID."); 00830 00831 vigra_precondition(kul.x <= 0 && kul.y <= 0, 00832 "normalizedConvolveImage(): left borders must be <= 0."); 00833 vigra_precondition(klr.x >= 0 && klr.y >= 0, 00834 "normalizedConvolveImage(): right borders must be >= 0."); 00835 00836 // use traits to determine SumType as to prevent possible overflow 00837 typedef typename 00838 NumericTraits<typename SrcAccessor::value_type>::RealPromote SumType; 00839 typedef typename 00840 NumericTraits<typename KernelAccessor::value_type>::RealPromote KSumType; 00841 typedef 00842 NumericTraits<typename DestAccessor::value_type> DestTraits; 00843 00844 // calculate width and height of the image 00845 int w = src_lr.x - src_ul.x; 00846 int h = src_lr.y - src_ul.y; 00847 int kernel_width = klr.x - kul.x + 1; 00848 int kernel_height = klr.y - kul.y + 1; 00849 00850 int x,y; 00851 int ystart = (border == BORDER_TREATMENT_AVOID) ? klr.y : 0; 00852 int yend = (border == BORDER_TREATMENT_AVOID) ? h+kul.y : h; 00853 int xstart = (border == BORDER_TREATMENT_AVOID) ? klr.x : 0; 00854 int xend = (border == BORDER_TREATMENT_AVOID) ? w+kul.x : w; 00855 00856 // create y iterators 00857 DestIterator yd = dest_ul + Diff2D(xstart, ystart); 00858 SrcIterator ys = src_ul + Diff2D(xstart, ystart); 00859 MaskIterator ym = mul + Diff2D(xstart, ystart); 00860 00861 KSumType norm = ak(ki); 00862 int xx, yy; 00863 KernelIterator yk = ki + klr; 00864 for(yy=0; yy<kernel_height; ++yy, --yk.y) 00865 { 00866 KernelIterator xk = yk; 00867 00868 for(xx=0; xx<kernel_width; ++xx, --xk.x) 00869 { 00870 norm += ak(xk); 00871 } 00872 } 00873 norm -= ak(ki); 00874 00875 00876 for(y=ystart; y < yend; ++y, ++ys.y, ++yd.y, ++ym.y) 00877 { 00878 // create x iterators 00879 DestIterator xd(yd); 00880 SrcIterator xs(ys); 00881 MaskIterator xm(ym); 00882 00883 for(x=xstart; x < xend; ++x, ++xs.x, ++xd.x, ++xm.x) 00884 { 00885 // how much of the kernel fits into the image ? 00886 int x0, y0, x1, y1; 00887 00888 y0 = (y<klr.y) ? -y : -klr.y; 00889 y1 = (h-y-1<-kul.y) ? h-y-1 : -kul.y; 00890 x0 = (x<klr.x) ? -x : -klr.x; 00891 x1 = (w-x-1<-kul.x) ? w-x-1 : -kul.x; 00892 00893 bool first = true; 00894 // init the sum 00895 SumType sum; 00896 KSumType ksum; 00897 00898 SrcIterator yys = xs + Diff2D(x0, y0); 00899 MaskIterator yym = xm + Diff2D(x0, y0); 00900 KernelIterator yk = ki - Diff2D(x0, y0); 00901 00902 int xx, kernel_width, kernel_height; 00903 kernel_width = x1 - x0 + 1; 00904 kernel_height = y1 - y0 + 1; 00905 for(yy=0; yy<kernel_height; ++yy, ++yys.y, --yk.y, ++yym.y) 00906 { 00907 typename SrcIterator::row_iterator xxs = yys.rowIterator(); 00908 typename SrcIterator::row_iterator xxend = xxs + kernel_width; 00909 typename MaskIterator::row_iterator xxm = yym.rowIterator(); 00910 typename KernelIterator::row_iterator xk = yk.rowIterator(); 00911 00912 for(xx=0; xxs < xxend; ++xxs.x, --xk.x, ++xxm.x) 00913 { 00914 if(!am(xxm)) continue; 00915 00916 if(first) 00917 { 00918 sum = ak(xk) * src_acc(xxs); 00919 ksum = ak(xk); 00920 first = false; 00921 } 00922 else 00923 { 00924 sum += ak(xk) * src_acc(xxs); 00925 ksum += ak(xk); 00926 } 00927 } 00928 } 00929 // store average in destination pixel 00930 if(!first && 00931 ksum != NumericTraits<KSumType>::zero()) 00932 { 00933 dest_acc.set(DestTraits::fromRealPromote((norm / ksum) * sum), xd); 00934 } 00935 } 00936 } 00937 } 00938 00939 00940 template <class SrcIterator, class SrcAccessor, 00941 class DestIterator, class DestAccessor, 00942 class MaskIterator, class MaskAccessor, 00943 class KernelIterator, class KernelAccessor> 00944 inline 00945 void normalizedConvolveImage( 00946 triple<SrcIterator, SrcIterator, SrcAccessor> src, 00947 pair<MaskIterator, MaskAccessor> mask, 00948 pair<DestIterator, DestAccessor> dest, 00949 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00950 BorderTreatmentMode> kernel) 00951 { 00952 normalizedConvolveImage(src.first, src.second, src.third, 00953 mask.first, mask.second, 00954 dest.first, dest.second, 00955 kernel.first, kernel.second, kernel.third, 00956 kernel.fourth, kernel.fifth); 00957 } 00958 00959 /** \brief Deprecated name of 2-dimensional normalized convolution, i.e. convolution with a mask image. 00960 00961 See \ref normalizedConvolveImage() for documentation. 00962 00963 <b> Declarations:</b> 00964 00965 pass arguments explicitly: 00966 \code 00967 namespace vigra { 00968 template <class SrcIterator, class SrcAccessor, 00969 class MaskIterator, class MaskAccessor, 00970 class DestIterator, class DestAccessor, 00971 class KernelIterator, class KernelAccessor> 00972 void 00973 convolveImageWithMask(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00974 MaskIterator mul, MaskAccessor am, 00975 DestIterator dest_ul, DestAccessor dest_acc, 00976 KernelIterator ki, KernelAccessor ak, 00977 Diff2D kul, Diff2D klr, BorderTreatmentMode border); 00978 } 00979 \endcode 00980 00981 00982 use argument objects in conjunction with \ref ArgumentObjectFactories: 00983 \code 00984 namespace vigra { 00985 template <class SrcIterator, class SrcAccessor, 00986 class MaskIterator, class MaskAccessor, 00987 class DestIterator, class DestAccessor, 00988 class KernelIterator, class KernelAccessor> 00989 inline 00990 void convolveImageWithMask(triple<SrcIterator, SrcIterator, SrcAccessor> src, 00991 pair<MaskIterator, MaskAccessor> mask, 00992 pair<DestIterator, DestAccessor> dest, 00993 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00994 BorderTreatmentMode> kernel); 00995 } 00996 \endcode 00997 */ 00998 template <class SrcIterator, class SrcAccessor, 00999 class DestIterator, class DestAccessor, 01000 class MaskIterator, class MaskAccessor, 01001 class KernelIterator, class KernelAccessor> 01002 inline void 01003 convolveImageWithMask(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 01004 MaskIterator mul, MaskAccessor am, 01005 DestIterator dest_ul, DestAccessor dest_acc, 01006 KernelIterator ki, KernelAccessor ak, 01007 Diff2D kul, Diff2D klr, BorderTreatmentMode border) 01008 { 01009 normalizedConvolveImage(src_ul, src_lr, src_acc, 01010 mul, am, 01011 dest_ul, dest_acc, 01012 ki, ak, kul, klr, border); 01013 } 01014 01015 template <class SrcIterator, class SrcAccessor, 01016 class DestIterator, class DestAccessor, 01017 class MaskIterator, class MaskAccessor, 01018 class KernelIterator, class KernelAccessor> 01019 inline 01020 void convolveImageWithMask( 01021 triple<SrcIterator, SrcIterator, SrcAccessor> src, 01022 pair<MaskIterator, MaskAccessor> mask, 01023 pair<DestIterator, DestAccessor> dest, 01024 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 01025 BorderTreatmentMode> kernel) 01026 { 01027 normalizedConvolveImage(src.first, src.second, src.third, 01028 mask.first, mask.second, 01029 dest.first, dest.second, 01030 kernel.first, kernel.second, kernel.third, 01031 kernel.fourth, kernel.fifth); 01032 } 01033 01034 //@} 01035 01036 /********************************************************/ 01037 /* */ 01038 /* Kernel2D */ 01039 /* */ 01040 /********************************************************/ 01041 01042 /** \brief Generic 2 dimensional convolution kernel. 01043 01044 This kernel may be used for convolution of 2 dimensional signals. 01045 01046 Convolution functions access the kernel via an ImageIterator 01047 which they get by calling \ref center(). This iterator 01048 points to the center of the kernel. The kernel's size is given by its upperLeft() 01049 (upperLeft().x <= 0, upperLeft().y <= 0) 01050 and lowerRight() (lowerRight().x >= 0, lowerRight().y >= 0) methods. 01051 The desired border treatment mode is returned by borderTreatment(). 01052 (Note that the \ref StandardConvolution "2D convolution functions" don't currently 01053 support all modes.) 01054 01055 The different init functions create a kernel with the specified 01056 properties. The requirements for the kernel's value_type depend 01057 on the init function used. At least NumericTraits must be defined. 01058 01059 The kernel defines a factory function kernel2d() to create an argument object 01060 (see \ref KernelArgumentObjectFactories). 01061 01062 <b> Usage:</b> 01063 01064 <b>\#include</b> "<a href="stdconvolution_8hxx-source.html">vigra/stdconvolution.hxx</a>"<br> 01065 Namespace: vigra 01066 01067 \code 01068 vigra::FImage src(w,h), dest(w,h); 01069 ... 01070 01071 // define horizontal Sobel filter 01072 vigra::Kernel2D<float> sobel; 01073 01074 sobel.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = // upper left and lower right 01075 0.125, 0.0, -0.125, 01076 0.25, 0.0, -0.25, 01077 0.125, 0.0, -0.125; 01078 01079 vigra::convolveImage(srcImageRange(src), destImage(dest), kernel2d(sobel)); 01080 \endcode 01081 01082 <b> Required Interface:</b> 01083 01084 \code 01085 value_type v = NumericTraits<value_type>::one(); 01086 \endcode 01087 01088 See also the init functions. 01089 */ 01090 template <class ARITHTYPE> 01091 class Kernel2D 01092 { 01093 public: 01094 /** the kernel's value type 01095 */ 01096 typedef ARITHTYPE value_type; 01097 01098 /** 2D random access iterator over the kernel's values 01099 */ 01100 typedef typename BasicImage<value_type>::traverser Iterator; 01101 01102 /** const 2D random access iterator over the kernel's values 01103 */ 01104 typedef typename BasicImage<value_type>::const_traverser ConstIterator; 01105 01106 /** the kernel's accessor 01107 */ 01108 typedef typename BasicImage<value_type>::Accessor Accessor; 01109 01110 /** the kernel's const accessor 01111 */ 01112 typedef typename BasicImage<value_type>::ConstAccessor ConstAccessor; 01113 01114 struct InitProxy 01115 { 01116 typedef typename 01117 BasicImage<value_type>::ScanOrderIterator Iterator; 01118 01119 InitProxy(Iterator i, int count, value_type & norm) 01120 : iter_(i), base_(i), 01121 count_(count), sum_(count), 01122 norm_(norm) 01123 {} 01124 01125 ~InitProxy() 01126 { 01127 vigra_precondition(count_ == 1 || count_ == sum_, 01128 "Kernel2D::initExplicitly(): " 01129 "Too few init values."); 01130 } 01131 01132 InitProxy & operator,(value_type const & v) 01133 { 01134 if(count_ == sum_) norm_ = *iter_; 01135 01136 --count_; 01137 vigra_precondition(count_ > 0, 01138 "Kernel2D::initExplicitly(): " 01139 "Too many init values."); 01140 01141 norm_ += v; 01142 01143 ++iter_; 01144 *iter_ = v; 01145 01146 return *this; 01147 } 01148 01149 Iterator iter_, base_; 01150 int count_, sum_; 01151 value_type & norm_; 01152 }; 01153 01154 static value_type one() { return NumericTraits<value_type>::one(); } 01155 01156 /** Default constructor. 01157 Creates a kernel of size 1x1 which would copy the signal 01158 unchanged. 01159 */ 01160 Kernel2D() 01161 : kernel_(1, 1, one()), 01162 left_(0, 0), 01163 right_(0, 0), 01164 norm_(one()), 01165 border_treatment_(BORDER_TREATMENT_CLIP) 01166 {} 01167 01168 /** Copy constructor. 01169 */ 01170 Kernel2D(Kernel2D const & k) 01171 : kernel_(k.kernel_), 01172 left_(k.left_), 01173 right_(k.right_), 01174 norm_(k.norm_), 01175 border_treatment_(k.border_treatment_) 01176 {} 01177 01178 /** Copy assignment. 01179 */ 01180 Kernel2D & operator=(Kernel2D const & k) 01181 { 01182 if(this != &k) 01183 { 01184 kernel_ = k.kernel_; 01185 left_ = k.left_; 01186 right_ = k.right_; 01187 norm_ = k.norm_; 01188 border_treatment_ = k.border_treatment_; 01189 } 01190 return *this; 01191 } 01192 01193 /** Initialization. 01194 This initializes the kernel with the given constant. The norm becomes 01195 v*width()*height(). 01196 01197 Instead of a single value an initializer list of length width()*height() 01198 can be used like this: 01199 01200 \code 01201 vigra::Kernel2D<float> binom; 01202 01203 binom.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = 01204 0.0625, 0.125, 0.0625, 01205 0.125, 0.25, 0.125, 01206 0.0625, 0.125, 0.0625; 01207 \endcode 01208 01209 In this case, the norm will be set to the sum of the init values. 01210 An initializer list of wrong length will result in a run-time error. 01211 */ 01212 InitProxy operator=(value_type const & v) 01213 { 01214 int size = (right_.x - left_.x + 1) * 01215 (right_.y - left_.y + 1); 01216 kernel_ = v; 01217 norm_ = (double)size*v; 01218 01219 return InitProxy(kernel_.begin(), size, norm_); 01220 } 01221 01222 /** Destructor. 01223 */ 01224 ~Kernel2D() 01225 {} 01226 01227 /** Init the 2D kernel as the cartesian product of two 1D kernels 01228 of type \ref Kernel1D. The norm becomes the product of the two original 01229 norms. 01230 01231 <b> Required Interface:</b> 01232 01233 The kernel's value_type must be a linear algebra. 01234 01235 \code 01236 vigra::Kernel2D<...>::value_type v; 01237 v = v * v; 01238 \endcode 01239 */ 01240 void initSeparable(Kernel1D<value_type> & kx, 01241 Kernel1D<value_type> & ky) 01242 { 01243 left_ = Diff2D(kx.left(), ky.left()); 01244 right_ = Diff2D(kx.right(), ky.right()); 01245 int w = right_.x - left_.x + 1; 01246 int h = right_.y - left_.y + 1; 01247 kernel_.resize(w, h); 01248 01249 norm_ = kx.norm() * ky.norm(); 01250 01251 typedef typename Kernel1D<value_type>::Iterator KIter; 01252 typename Kernel1D<value_type>::Accessor ka; 01253 01254 KIter kiy = ky.center() + left_.y; 01255 Iterator iy = center() + left_; 01256 01257 for(int y=left_.y; y<=right_.y; ++y, ++kiy, ++iy.y) 01258 { 01259 KIter kix = kx.center() + left_.x; 01260 Iterator ix = iy; 01261 for(int x=left_.x; x<=right_.x; ++x, ++kix, ++ix.x) 01262 { 01263 *ix = ka(kix) * ka(kiy); 01264 } 01265 } 01266 } 01267 01268 /** Init the 2D kernel as the cartesian product of two 1D kernels 01269 given explicitly by iterators and sizes. The norm becomes the 01270 sum of the resulting kernel values. 01271 01272 <b> Required Interface:</b> 01273 01274 The kernel's value_type must be a linear algebra. 01275 01276 \code 01277 vigra::Kernel2D<...>::value_type v; 01278 v = v * v; 01279 v += v; 01280 \endcode 01281 01282 <b> Preconditions:</b> 01283 01284 \code 01285 xleft <= 0; 01286 xright >= 0; 01287 yleft <= 0; 01288 yright >= 0; 01289 \endcode 01290 */ 01291 template <class KernelIterator> 01292 void initSeparable(KernelIterator kxcenter, int xleft, int xright, 01293 KernelIterator kycenter, int yleft, int yright) 01294 { 01295 vigra_precondition(xleft <= 0 && yleft <= 0, 01296 "Kernel2D::initSeparable(): left borders must be <= 0."); 01297 vigra_precondition(xright >= 0 && yright >= 0, 01298 "Kernel2D::initSeparable(): right borders must be >= 0."); 01299 01300 left_ = Point2D(xleft, yleft); 01301 right_ = Point2D(xright, yright); 01302 01303 int w = right_.x - left_.x + 1; 01304 int h = right_.y - left_.y + 1; 01305 kernel_.resize(w, h); 01306 01307 KernelIterator kiy = kycenter + left_.y; 01308 Iterator iy = center() + left_; 01309 01310 for(int y=left_.y; y<=right_.y; ++y, ++kiy, ++iy.y) 01311 { 01312 KernelIterator kix = kxcenter + left_.x; 01313 Iterator ix = iy; 01314 for(int x=left_.x; x<=right_.x; ++x, ++kix, ++ix.x) 01315 { 01316 *ix = *kix * *kiy; 01317 } 01318 } 01319 01320 typename BasicImage<value_type>::iterator i = kernel_.begin(); 01321 typename BasicImage<value_type>::iterator iend = kernel_.end(); 01322 norm_ = *i; 01323 ++i; 01324 01325 for(; i!= iend; ++i) 01326 { 01327 norm_ += *i; 01328 } 01329 } 01330 01331 /** Init the 2D kernel as a circular averaging filter. The norm will be 01332 calculated as 01333 <TT>NumericTraits<value_type>::one() / (number of non-zero kernel values)</TT>. 01334 The kernel's value_type must be a linear space. 01335 01336 <b> Required Interface:</b> 01337 01338 \code 01339 value_type v = vigra::NumericTraits<value_type>::one(); 01340 01341 double d; 01342 v = d * v; 01343 \endcode 01344 01345 <b> Precondition:</b> 01346 01347 \code 01348 radius > 0; 01349 \endcode 01350 */ 01351 void initDisk(int radius) 01352 { 01353 vigra_precondition(radius > 0, 01354 "Kernel2D::initDisk(): radius must be > 0."); 01355 01356 left_ = Point2D(-radius, -radius); 01357 right_ = Point2D(radius, radius); 01358 int w = right_.x - left_.x + 1; 01359 int h = right_.y - left_.y + 1; 01360 kernel_.resize(w, h); 01361 norm_ = NumericTraits<value_type>::one(); 01362 01363 kernel_ = NumericTraits<value_type>::zero(); 01364 double count = 0.0; 01365 01366 Iterator k = center(); 01367 double r2 = (double)radius*radius; 01368 01369 int i; 01370 for(i=0; i<= radius; ++i) 01371 { 01372 double r = (double) i - 0.5; 01373 int w = (int)(VIGRA_CSTD::sqrt(r2 - r*r) + 0.5); 01374 for(int j=-w; j<=w; ++j) 01375 { 01376 k(j, i) = NumericTraits<value_type>::one(); 01377 k(j, -i) = NumericTraits<value_type>::one(); 01378 count += (i != 0) ? 2.0 : 1.0; 01379 } 01380 } 01381 01382 count = 1.0 / count; 01383 01384 for(int y=-radius; y<=radius; ++y) 01385 { 01386 for(int x=-radius; x<=radius; ++x) 01387 { 01388 k(x,y) = count * k(x,y); 01389 } 01390 } 01391 } 01392 01393 /** Init the kernel by an explicit initializer list. 01394 The upper left and lower right corners of the kernel must be passed. 01395 A comma-separated initializer list is given after the assignment operator. 01396 This function is used like this: 01397 01398 \code 01399 // define horizontal Sobel filter 01400 vigra::Kernel2D<float> sobel; 01401 01402 sobel.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = 01403 0.125, 0.0, -0.125, 01404 0.25, 0.0, -0.25, 01405 0.125, 0.0, -0.125; 01406 \endcode 01407 01408 The norm is set to the sum of the initialzer values. If the wrong number of 01409 values is given, a run-time error results. It is, however, possible to give 01410 just one initializer. This creates an averaging filter with the given constant: 01411 01412 \code 01413 vigra::Kernel2D<float> average3x3; 01414 01415 average3x3.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = 1.0/9.0; 01416 \endcode 01417 01418 Here, the norm is set to value*width()*height(). 01419 01420 <b> Preconditions:</b> 01421 01422 \code 01423 1. upperleft.x <= 0; 01424 2. upperleft.y <= 0; 01425 3. lowerright.x >= 0; 01426 4. lowerright.y >= 0; 01427 5. the number of values in the initializer list 01428 is 1 or equals the size of the kernel. 01429 \endcode 01430 */ 01431 Kernel2D & initExplicitly(Diff2D upperleft, Diff2D lowerright) 01432 { 01433 vigra_precondition(upperleft.x <= 0 && upperleft.y <= 0, 01434 "Kernel2D::initExplicitly(): left borders must be <= 0."); 01435 vigra_precondition(lowerright.x >= 0 && lowerright.y >= 0, 01436 "Kernel2D::initExplicitly(): right borders must be >= 0."); 01437 01438 left_ = Point2D(upperleft); 01439 right_ = Point2D(lowerright); 01440 01441 int w = right_.x - left_.x + 1; 01442 int h = right_.y - left_.y + 1; 01443 kernel_.resize(w, h); 01444 01445 return *this; 01446 } 01447 01448 /** Coordinates of the upper left corner of the kernel. 01449 */ 01450 Point2D upperLeft() const { return left_; } 01451 01452 /** Coordinates of the lower right corner of the kernel. 01453 */ 01454 Point2D lowerRight() const { return right_; } 01455 01456 /** Width of the kernel. 01457 */ 01458 int width() const { return right_.x - left_.x + 1; } 01459 01460 /** Height of the kernel. 01461 */ 01462 int height() const { return right_.y - left_.y + 1; } 01463 01464 /** ImageIterator that points to the center of the kernel (coordinate (0,0)). 01465 */ 01466 Iterator center() { return kernel_.upperLeft() - left_; } 01467 01468 /** ImageIterator that points to the center of the kernel (coordinate (0,0)). 01469 */ 01470 ConstIterator center() const { return kernel_.upperLeft() - left_; } 01471 01472 /** Access kernel entry at given position. 01473 */ 01474 value_type & operator()(int x, int y) 01475 { return kernel_[Diff2D(x,y) - left_]; } 01476 01477 /** Read kernel entry at given position. 01478 */ 01479 value_type operator()(int x, int y) const 01480 { return kernel_[Diff2D(x,y) - left_]; } 01481 01482 /** Access kernel entry at given position. 01483 */ 01484 value_type & operator[](Diff2D const & d) 01485 { return kernel_[d - left_]; } 01486 01487 /** Read kernel entry at given position. 01488 */ 01489 value_type operator[](Diff2D const & d) const 01490 { return kernel_[d - left_]; } 01491 01492 /** Norm of the kernel (i.e. sum of its elements). 01493 */ 01494 value_type norm() const { return norm_; } 01495 01496 /** The kernels default accessor. 01497 */ 01498 Accessor accessor() { return Accessor(); } 01499 01500 /** The kernels default const accessor. 01501 */ 01502 ConstAccessor accessor() const { return ConstAccessor(); } 01503 01504 /** Normalize the kernel to the given value. (The norm is the sum of all kernel 01505 elements.) The kernel's value_type must be a division algebra or 01506 algebraic field. 01507 01508 <b> Required Interface:</b> 01509 01510 \code 01511 value_type v = vigra::NumericTraits<value_type>::one(); // if norm is not 01512 // given explicitly 01513 01514 v += v; 01515 v = v * v; 01516 v = v / v; 01517 \endcode 01518 */ 01519 void normalize(value_type norm) 01520 { 01521 typename BasicImage<value_type>::iterator i = kernel_.begin(); 01522 typename BasicImage<value_type>::iterator iend = kernel_.end(); 01523 typename NumericTraits<value_type>::RealPromote sum = *i; 01524 ++i; 01525 01526 for(; i!= iend; ++i) 01527 { 01528 sum += *i; 01529 } 01530 01531 sum = norm / sum; 01532 i = kernel_.begin(); 01533 for(; i != iend; ++i) 01534 { 01535 *i = *i * sum; 01536 } 01537 01538 norm_ = norm; 01539 } 01540 01541 /** Normalize the kernel to norm 1. 01542 */ 01543 void normalize() 01544 { 01545 normalize(one()); 01546 } 01547 01548 /** current border treatment mode 01549 */ 01550 BorderTreatmentMode borderTreatment() const 01551 { return border_treatment_; } 01552 01553 /** Set border treatment mode. 01554 Only <TT>BORDER_TREATMENT_CLIP</TT> and <TT>BORDER_TREATMENT_AVOID</TT> are currently 01555 allowed. 01556 */ 01557 void setBorderTreatment( BorderTreatmentMode new_mode) 01558 { 01559 vigra_precondition((new_mode == BORDER_TREATMENT_CLIP || 01560 new_mode == BORDER_TREATMENT_AVOID || 01561 new_mode == BORDER_TREATMENT_REFLECT || 01562 new_mode == BORDER_TREATMENT_REPEAT || 01563 new_mode == BORDER_TREATMENT_WRAP), 01564 "convolveImage():\n" 01565 " Border treatment must be one of follow treatments:\n" 01566 " - BORDER_TREATMENT_CLIP\n" 01567 " - BORDER_TREATMENT_AVOID\n" 01568 " - BORDER_TREATMENT_REFLECT\n" 01569 " - BORDER_TREATMENT_REPEAT\n" 01570 " - BORDER_TREATMENT_WRAP\n"); 01571 01572 border_treatment_ = new_mode; 01573 } 01574 01575 01576 private: 01577 BasicImage<value_type> kernel_; 01578 Point2D left_, right_; 01579 value_type norm_; 01580 BorderTreatmentMode border_treatment_; 01581 }; 01582 01583 /**************************************************************/ 01584 /* */ 01585 /* Argument object factories for Kernel2D */ 01586 /* */ 01587 /* (documentation: see vigra/convolution.hxx) */ 01588 /* */ 01589 /**************************************************************/ 01590 01591 template <class KernelIterator, class KernelAccessor> 01592 inline 01593 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, BorderTreatmentMode> 01594 kernel2d(KernelIterator ik, KernelAccessor ak, Diff2D kul, Diff2D klr, 01595 BorderTreatmentMode border) 01596 01597 { 01598 return 01599 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, BorderTreatmentMode> ( 01600 ik, ak, kul, klr, border); 01601 } 01602 01603 template <class T> 01604 inline 01605 tuple5<typename Kernel2D<T>::ConstIterator, 01606 typename Kernel2D<T>::ConstAccessor, 01607 Diff2D, Diff2D, BorderTreatmentMode> 01608 kernel2d(Kernel2D<T> const & k) 01609 01610 { 01611 return 01612 tuple5<typename Kernel2D<T>::ConstIterator, 01613 typename Kernel2D<T>::ConstAccessor, 01614 Diff2D, Diff2D, BorderTreatmentMode>( 01615 k.center(), 01616 k.accessor(), 01617 k.upperLeft(), k.lowerRight(), 01618 k.borderTreatment()); 01619 } 01620 01621 template <class T> 01622 inline 01623 tuple5<typename Kernel2D<T>::ConstIterator, 01624 typename Kernel2D<T>::ConstAccessor, 01625 Diff2D, Diff2D, BorderTreatmentMode> 01626 kernel2d(Kernel2D<T> const & k, BorderTreatmentMode border) 01627 01628 { 01629 return 01630 tuple5<typename Kernel2D<T>::ConstIterator, 01631 typename Kernel2D<T>::ConstAccessor, 01632 Diff2D, Diff2D, BorderTreatmentMode>( 01633 k.center(), 01634 k.accessor(), 01635 k.upperLeft(), k.lowerRight(), 01636 border); 01637 } 01638 01639 01640 } // namespace vigra 01641 01642 #endif // VIGRA_STDCONVOLUTION_HXX
© Ullrich Köthe (koethe@informatik.uni-hamburg.de) |
html generated using doxygen and Python
|