Codebase list libvigraimpex / cc1a1c8
New upstream version 1.11.1+dfsg Daniel Stender 5 years ago
220 changed file(s) with 17801 addition(s) and 6677 deletion(s). Raw diff Collapse all Expand all
2525 include(VigraSetDefaults)
2626 include(VigraCMakeUtils)
2727 INCLUDE_DIRECTORIES(${vigra_SOURCE_DIR}/include)
28
29 if(SUPPRESS_3RD_PARTY_WARNINGS)
30 set(SUPPRESS_WARNINGS SYSTEM)
31 endif()
2832
2933 IF(VIGRA_STATIC_LIB)
3034 SET(LIBTYPE STATIC)
151155 ENDIF()
152156
153157 if(WITH_BOOST AND Boost_FOUND)
154 INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIR})
158 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${Boost_INCLUDE_DIR})
155159 IF(WITH_BOOST_THREAD)
156160 ADD_DEFINITIONS(-DUSE_BOOST_THREAD)
157161 ENDIF()
55
66 Copyright 1998-2013 by Ullrich Koethe
77
8
89 This file is part of the VIGRA computer vision library.
910 You may use, modify, and distribute this software according
1011 to the terms stated in the LICENSE.txt file included in
1112 the VIGRA distribution.
1213
1314 The VIGRA Website is
14 http://hci.iwr.uni-heidelberg.de/vigra/
15 http://ukoethe.github.io/vigra/
1516 Please direct questions, bug reports, and contributions to
1617 ullrich.koethe@iwr.uni-heidelberg.de or
1718 vigra@informatik.uni-hamburg.de
3132 snapshot), you find these instructions in
3233 $VIGRA_PATH/docsrc/installation.dxx
3334 or online at
34 http://hci.iwr.uni-heidelberg.de/vigra/doc/vigra/Installation.html
35 http://ukoethe.github.io/vigra/doc-release/vigra/Installation.html
3536
3637 Documentation
3738 -------------
3839
3940 If you downloaded an official release, the documentation can be found in `$VIGRA_PATH/doc/vigra/`, the start file
40 is `$VIGRA_PATH/doc/vigra/index.html`.
41 is `$VIGRA_PATH/doc/vigra/index.html` or online at http://ukoethe.github.io/vigra/#documentation.
4142
42 When you use the development version from github, you can generate documentation by `make doc`. Up-to-date
43 online documentation for the 'master' branch is regularly pushed to http://ukoethe.github.io/vigra/doc/vigra/
43 When you use the development version from github, you can generate documentation by `make doc`.
4444
4545 Download
4646 --------
4747
48 VIGRA can be downloaded at http://hci.iwr.uni-heidelberg.de/vigra/#download The official development
48 VIGRA can be downloaded at http://ukoethe.github.io/vigra/#download. The official development
4949 repository is at https://github.com/ukoethe/vigra
5050
5151 What is VIGRA
2424
2525 FIND_PATH(OPENEXR_INCLUDE_DIR ImfRgbaFile.h PATH_SUFFIXES OpenEXR)
2626
27 FIND_LIBRARY(OPENEXR_ILMIMF_LIBRARY NAMES IlmImf)
28 FIND_LIBRARY(OPENEXR_IMATH_LIBRARY NAMES Imath)
27 FOREACH(V "" -2_2 -2_1 -2_0 -1_7)
28 if(NOT OPENEXR_ILMIMF_LIBRARY)
29 FIND_LIBRARY(OPENEXR_ILMIMF_LIBRARY NAMES IlmImf${V})
30 if(OPENEXR_ILMIMF_LIBRARY)
31 set(OPENEXR_VERSION ${V})
32 endif()
33 endif()
34 ENDFOREACH(V)
35
36 FIND_LIBRARY(OPENEXR_IMATH_LIBRARY NAMES Imath${OPENEXR_VERSION})
37 FIND_LIBRARY(OPENEXR_IEX_LIBRARY NAMES Iex${OPENEXR_VERSION})
38 FIND_LIBRARY(OPENEXR_ILMTHREAD_LIBRARY NAMES IlmThread${OPENEXR_VERSION})
2939 FIND_LIBRARY(OPENEXR_HALF_LIBRARY NAMES Half)
30 FIND_LIBRARY(OPENEXR_IEX_LIBRARY NAMES Iex)
31 FIND_LIBRARY(OPENEXR_ILMTHREAD_LIBRARY NAMES IlmThread)
3240
3341 INCLUDE(FindPackageHandleStandardArgs)
34 FIND_PACKAGE_HANDLE_STANDARD_ARGS(OPENEXR DEFAULT_MSG
42 FIND_PACKAGE_HANDLE_STANDARD_ARGS(OpenEXR DEFAULT_MSG
3543 OPENEXR_HALF_LIBRARY OPENEXR_IEX_LIBRARY OPENEXR_IMATH_LIBRARY
3644 OPENEXR_ILMIMF_LIBRARY OPENEXR_INCLUDE_DIR
3745 )
3846
39 IF(OPENEXR_FOUND)
47 IF(OpenEXR_FOUND)
4048 SET(OPENEXR_LIBRARIES ${OPENEXR_ILMIMF_LIBRARY}
4149 ${OPENEXR_IMATH_LIBRARY} ${OPENEXR_HALF_LIBRARY}
4250 ${OPENEXR_IEX_LIBRARY} ${OPENEXR_ILMTHREAD_LIBRARY} )
43 ENDIF(OPENEXR_FOUND)
51
52 if(MSVC)
53 execute_process(
54 COMMAND lib /list "${OPENEXR_HALF_LIBRARY}"
55 OUTPUT_VARIABLE OPENEXR_HALF_CONTENTS)
56 if(OPENEXR_HALF_CONTENTS MATCHES "Half.dll")
57 SET(OPENEXR_CPPFLAGS -DOPENEXR_DLL)
58 endif()
59 endif()
60 ELSE()
61 SET(OPENEXR_ILMIMF_LIBRARY OPENEXR_ILMIMF_LIBRARY-NOTFOUND)
62 SET(OPENEXR_IMATH_LIBRARY OPENEXR_IMATH_LIBRARY-NOTFOUND)
63 SET(OPENEXR_IEX_LIBRARY OPENEXR_IEX_LIBRARY-NOTFOUND)
64 SET(OPENEXR_ILMTHREAD_LIBRARY OPENEXR_ILMTHREAD_LIBRARY-NOTFOUND)
65 SET(OPENEXR_HALF_LIBRARY OPENEXR_HALF_LIBRARY-NOTFOUND)
66 ENDIF()
6969 OUTPUT_VARIABLE PYTHON_LIBRARY_NAME OUTPUT_STRIP_TRAILING_WHITESPACE)
7070 execute_process(COMMAND ${PYTHON_EXECUTABLE} -c
7171 "from distutils.sysconfig import *; print(get_config_var('LIBDIR'))"
72 OUTPUT_VARIABLE PYTHON_LIBRARY_PREFIX OUTPUT_STRIP_TRAILING_WHITESPACE)
72 OUTPUT_VARIABLE PYTHON_LIBRARY_PREFIX OUTPUT_STRIP_TRAILING_WHITESPACE)
7373 ENDIF()
7474 FIND_LIBRARY(PYTHON_LIBRARIES ${PYTHON_LIBRARY_NAME} HINTS "${PYTHON_LIBRARY_PREFIX}" "${PYTHON_PREFIX}"
7575 PATH_SUFFIXES lib lib64 libs DOC "Python libraries")
119119
120120 FIND_LIBRARY(Boost_PYTHON_LIBRARY
121121 NAMES ${BOOST_PYTHON_NAMES}
122 NAMES_PER_DIR
122123 HINTS "${Boost_LIBRARY_DIR}"
123124 DOC "boost_python libraries")
124125 ENDIF()
1111
1212 if(RUN_RESULT)
1313 message(FATAL_ERROR "Failed to detect c++ version with a simple test program!\n"
14 "Test program compiled, but did not execute cleanly. Run output is shown below.\n"
15 "${VIGRA_CPP_VERSION}")
14 "Test program compiled, but did not execute cleanly. Run output is shown below.\n"
15 "${VIGRA_CPP_VERSION}")
1616 endif()
1717
1818 message(STATUS "Detected C++ version: ${VIGRA_CPP_VERSION}")
4141 OPTION(WITH_BOOST_THREAD "Use boost::thread instead of std::thread" OFF)
4242
4343 OPTION(TEST_VIGRANUMPY "Consider lack of vigranumpy or failed vigranumpy test an error?" OFF)
44
45 OPTION(SUPPRESS_3RD_PARTY_WARNINGS "Switch-off compiler warnings originating from dependencies?" ON)
4446
4547 IF(TEST_VIGRANUMPY OR NOT DEFINED WITH_VIGRANUMPY)
4648 SET(WITH_VIGRANUMPY "ON")
115117 # executed once in the first configure run.
116118 IF(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANGXX)
117119 IF(NOT CMAKE_CXX_FLAGS)
118 if(NOT MINGW AND NOT MACOSX)
119 SET(CMAKE_CXX_FLAGS "-W -Wall -Wextra -Wno-unused-parameter -Wno-sign-compare -Wno-unused-variable -Wno-type-limits")
120 elseif(MACOSX)
121 SET(CMAKE_CXX_FLAGS "-W -Wall -Wextra -Wno-unused-parameter -Wno-sign-compare -Wno-unused-variable")
122 endif()
120 SET(CMAKE_CXX_FLAGS "-W -Wall -Wextra")
123121 ENDIF()
124122 IF(NOT CMAKE_C_FLAGS)
125123 SET(CMAKE_C_FLAGS "-W -Wall -Wextra -pedantic -std=c99 -Wno-sign-compare")
0 @echo off
1
2 SET MyPath=%PATH%
3 rem echo %MyPath%
4 rem echo --
5
6 setlocal EnableDelayedExpansion
7
8 SET TempPath="%MyPath:;=";"%"
9 SET var=
10 FOR %%a IN (%TempPath%) DO (
11 IF exist %%~sa (
12 SET "var=!var!;%%~sa"
13 ) ELSE (
14 rem echo %%a does not exist
15 )
16 )
17
18 rem echo --
19 echo !var:~1!
531531 # directories like "/usr/src/myproject". Separate the files or directories
532532 # with spaces.
533533
534 INPUT = @PROJECT_SOURCE_DIR@/include \
535 @PROJECT_SOURCE_DIR@/docsrc
534 INPUT = @PROJECT_SOURCE_DIR@/docsrc \
535 @PROJECT_SOURCE_DIR@/include
536536
537537 # This tag can be used to specify the character encoding of the source files
538538 # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
555555 installation.dxx \
556556 tutorial.dxx \
557557 image_processing.dxx \
558 parallel_processing.dxx \
558559 *.hxx \
559560 viff.h
560561
616617 # directories that contain image that are included in the documentation (see
617618 # the \image command).
618619
619 IMAGE_PATH = @PROJECT_SOURCE_DIR@/src/images
620 IMAGE_PATH = @PROJECT_SOURCE_DIR@/src/images \
621 @PROJECT_SOURCE_DIR@/docsrc/documents
620622
621623 # The INPUT_FILTER tag can be used to specify a program that doxygen should
622624 # invoke to filter for each input file. Doxygen will invoke the filter program
11861188
11871189 PREDEFINED = DOXYGEN \
11881190 __cplusplus \
1189 VIGRA_EXPORT=
1191 VIGRA_EXPORT= \
1192 WITH_LEMON
11901193
11911194 # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
11921195 # this tag can be used to specify a list of macro names that should be expanded.
1919 <li> Gunnar Kedenburg
2020 (<a href="mailto:gunnar@haquebright.de">gunnar@haquebright.de</a>)
2121 completely rewrote the image import/export library and implemented
22 much of the \ref vigra::MultiArray functionality.
22 the initial version of the \ref vigra::MultiArray functionality.
2323
2424 <li> Yevgen Reznichenko
2525 (<a href="mailto:rezniche@kogs.informatik.uni-hamburg.de">rezniche@kogs.informatik.uni-hamburg.de</a>)
2727
2828 <li> Christian Dennis Rahn
2929 (<a href="mailto:rahn@informatik.uni-hamburg.de">rahn@informatik.uni-hamburg.de</a>)
30 implemented much of the \ref MultiArrayConvolutionFilters.
30 implemented initial versions of multi-dimensional convolution filters.
3131
3232 <li> Kasim Terzic, Florian Heinrich and Benjamin Seppke
3333 implemented image analysis functionality for 3- and higher-dimensional data.
6464
6565 <li> Benjamin Seppke contributed various \ref Registration "image registration" algorithms.
6666
67 <li> John Kirkham figured out how to configure Travis to run tests for Python3 and on OS X.
68
69 <li> Philip Schill implemented version 3 of the Random Forest.
70
71 <li> David St&ouml;ckel contributed the 3D convex hull functionality.
72
6773 <li> Numerous people reported and fixed bugs and made suggestions.
6874 </ul>
6975
7076 Many thanks to all!
7177 <p>
7278
79 <b> Changes from Version 1.11.0 to 1.11.1</b>
80
81 <ul>
82 <li> Added 3D convex hull computation and features (David St&ouml;ckel).
83
84 <li> Added Random Forest version 3, inspired by LEMON's graph API, to simplify customization of RF variants (Philip Schill).
85
86 <li> Improved hierarchical clustering (Cpnstantin Pape).
87
88 <li> Minor improvements and bug fixes in the code and documentation.
89 </ul>
90
7391 <b> Changes from Version 1.10.0 to 1.11.0</b>
7492
7593 <ul>
87105
88106 <li> Added many \ref Registration "image registration" functions.
89107
90 <li> Extended the collection of \ref MultiArrayDistanceTransform "multi-dimensional distance transform" algorithms by vectorial DT, boundary DT, and eccentricity transform.
108 <li> Extended the collection of \ref DistanceTransform "multi-dimensional distance transform" algorithms by vectorial DT, boundary DT, and eccentricity transform.
91109
92110 <li> Added \ref skeletonizeImage(), nonLocalMean(), multi-dimensional integral images.
93111
191209
192210 <li> Added \ref vigra::StridedScanOrderIterator and corresponding \ref vigra::MultiArrayView::begin().
193211
194 <li> Extended \ref vigra::MultiArrayView. Added \ref vigra::Shape1 ... \ref vigra::Shape5 convenience typedefs.
212 <li> Extended \ref vigra::MultiArrayView. Added vigra::Shape1 ... vigra::Shape5 convenience typedefs.
195213
196214 <li> Implemented \ref MultiMathModule (arithmetic and algebraic functions for multi-dimensional arrays).
197215
289307 <ul>
290308 <li> Added functions for arrays of arbitrary dimensions:
291309 <ul>
292 <li> \ref MultiArrayDistanceTransform "Euclidean distance transform"
310 <li> \ref DistanceTransform "Euclidean distance transform"
293311 <li> \ref MultiArrayMorphology "separable morphology"
294312 <li> \ref resizeMultiArraySplineInterpolation()
295313 </ul>
466484 <b> Changes from Version 1.2.0 to 1.3.0</b>
467485
468486 <ul>
469 <li> Added algorithms for \ref MultiDimensionalArrays :
470 \ref MultiPointoperators and \ref MultiArrayConvolutionFilters
487 <li> Added algorithms for multi-dimensional arrays: see
488 \ref MultiPointoperators and \ref ConvolutionFilters
471489 and the \link vigra::MultiArrayNavigator navigator utility\endlink.
472490
473491 <li> Extended \ref convolveImage() (non-separable convolution)
542560 "metaprogramming.hxx", "tuple.hxx". "utilities.hxx" now includes
543561 these other files.
544562
545 <li> Added \ref MultiDimensionalArrays and \ref VolumeImpex
563 <li> Added multi-dimensional arrays and \ref VolumeImpex
546564
547565 <li> Redesigned \ref vigra::TinyVector, added \ref vigra::TinyVectorView
548566 </ul>
588606 <li> Added \ref FourierTransform "Fourier transform" support,
589607 and \ref vigra::FFTWComplex "FFTWComplex" complex number type.
590608
591 <li> Added convolution convenience functions (see \ref CommonConvolutionFilters).
609 <li> Added convolution convenience functions (see \ref ConvolutionFilters).
592610
593611 <li> Added \ref vigra::IteratorAdaptor template for quick and
594612 easy generation of iterator adaptors.
0 /*
1
2 CollapsibleLists.js
3
4 An object allowing lists to dynamically expand and collapse
5
6 Created by Stephen Morley - http://code.stephenmorley.org/ - and released under
7 the terms of the CC0 1.0 Universal legal code:
8
9 http://creativecommons.org/publicdomain/zero/1.0/legalcode
10
11 */
12
13 // create the CollapsibleLists object
14 var CollapsibleLists =
15 new function(){
16
17 /* Makes all lists with the class 'collapsibleList' collapsible. The
18 * parameter is:
19 *
20 * doNotRecurse - true if sub-lists should not be made collapsible
21 */
22 this.apply = function(doNotRecurse){
23
24 // loop over the unordered lists
25 var uls = document.getElementsByTagName('ul');
26 for (var index = 0; index < uls.length; index ++){
27
28 // check whether this list should be made collapsible
29 if (uls[index].className.match(/(^| )collapsibleList( |$)/)){
30
31 // make this list collapsible
32 this.applyTo(uls[index], true);
33
34 // check whether sub-lists should also be made collapsible
35 if (!doNotRecurse){
36
37 // add the collapsibleList class to the sub-lists
38 var subUls = uls[index].getElementsByTagName('ul');
39 for (var subIndex = 0; subIndex < subUls.length; subIndex ++){
40 subUls[subIndex].className += ' collapsibleList';
41 }
42
43 }
44
45 }
46
47 }
48
49 };
50
51 /* Makes the specified list collapsible. The parameters are:
52 *
53 * node - the list element
54 * doNotRecurse - true if sub-lists should not be made collapsible
55 */
56 this.applyTo = function(node, doNotRecurse){
57
58 // loop over the list items within this node
59 var lis = node.getElementsByTagName('li');
60 for (var index = 0; index < lis.length; index ++){
61
62 // check whether this list item should be collapsible
63 if (!doNotRecurse || node == lis[index].parentNode){
64
65 // prevent text from being selected unintentionally
66 if (lis[index].addEventListener){
67 lis[index].addEventListener(
68 'mousedown', function (e){ e.preventDefault(); }, false);
69 }else{
70 lis[index].attachEvent(
71 'onselectstart', function(){ event.returnValue = false; });
72 }
73
74 // add the click listener
75 if (lis[index].addEventListener){
76 lis[index].addEventListener(
77 'click', createClickListener(lis[index]), false);
78 }else{
79 lis[index].attachEvent(
80 'onclick', createClickListener(lis[index]));
81 }
82
83 // close the unordered lists within this list item
84 toggle(lis[index]);
85
86 }
87
88 }
89
90 };
91
92 /* Returns a function that toggles the display status of any unordered
93 * list elements within the specified node. The parameter is:
94 *
95 * node - the node containing the unordered list elements
96 */
97 function createClickListener(node){
98
99 // return the function
100 return function(e){
101
102 // ensure the event object is defined
103 if (!e) e = window.event;
104
105 // find the list item containing the target of the event
106 var li = (e.target ? e.target : e.srcElement);
107 while (li.nodeName != 'LI') li = li.parentNode;
108
109 // toggle the state of the node if it was the target of the event
110 if (li == node) toggle(node);
111
112 };
113
114 }
115
116 /* Opens or closes the unordered list elements directly within the
117 * specified node. The parameter is:
118 *
119 * node - the node containing the unordered list elements
120 */
121 function toggle(node){
122
123 // determine whether to open or close the unordered lists
124 var open = node.className.match(/(^| )collapsibleListClosed( |$)/);
125
126 // loop over the unordered list elements with the node
127 var uls = node.getElementsByTagName('ul');
128 for (var index = 0; index < uls.length; index ++){
129
130 // find the parent list item of this unordered list
131 var li = uls[index];
132 while (li.nodeName != 'LI') li = li.parentNode;
133
134 // style the unordered list if it is directly within this node
135 if (li == node) uls[index].style.display = (open ? 'block' : 'none');
136
137 }
138
139 // remove the current class from the node
140 node.className =
141 node.className.replace(
142 /(^| )collapsibleList(Open|Closed)( |$)/, '');
143
144 // if the node contains unordered lists, set its class
145 if (uls.length > 0){
146 node.className += ' collapsibleList' + (open ? 'Open' : 'Closed');
147 }
148 else {
149 node.className += ' lastItem';
150 }
151
152 }
153
154 }();
44 <link rel="shortcut icon" href="vigra-icon.ico" />
55 <script type="text/javascript">
66 function toggleHiddenDocumentation( textID, toggleID, toggleMessage )
7 {
7 {
88 if( document.getElementById(textID).style.display == 'none' )
99 {
1010 document.getElementById(textID).style.display = 'block';
1818 return false;
1919 }
2020 </script>
21 <script>
22 var runOnLoad=function(c,o,d,e){function x(){for(e=1;c.length;)c.shift()()}o[d]?(document[d]('DOMContentLoaded',x,0),o[d]('load',x,0)):o.attachEvent('onload',x);return function(t){e?o.setTimeout(t,0):c.push(t)}}([],window,'addEventListener');
23 </script>
24 <script type="text/javascript" src="documents/CollapsibleLists.js"></script>
25 <script type="text/javascript">
26 runOnLoad(function(){ CollapsibleLists.apply(); });
27 </script>
28 <style type="text/css">
29 .collapsibleList li.collapsibleListOpen{
30 list-style-image:url('documents/pfeil.gif');
31 cursor:pointer;
32 }
33
34 .collapsibleList li.collapsibleListClosed{
35 list-style-image:url('documents/bullet.gif');
36 cursor:pointer;
37 }
38
39 .collapsibleList li.lastItem{
40 list-style-image:url('documents/diamond.gif');
41 }
42 </style>
2143 </head>
2244 <body bgcolor="#f8f0e0" link="#0040b0" vlink="#a00040">
2345 <basefont face="Helvetica,Arial,sans-serif" size=3>
2446 <p align=right>
25 [ <a href="http://hci.iwr.uni-heidelberg.de/vigra/">VIGRA Homepage</a> |
47 [ <a href="http://ukoethe.github.io/vigra/">VIGRA Homepage</a> |
2648 <a href="functionindex.html">Function Index</a> |
2749 <a href="classes.html">Class Index</a> |
2850 <a href="namespaces.html">Namespaces</a> |
00 /** \page ImageProcessingTutorial Image Processing
11
22 <h2>Section Contents</h2>
3
3
44 In this chapter we'll use VIGRA's methods for some applications of Image Processing.
55
6 <ul style="list-style-image:url(documents/bullet.gif)">
6 <ul style="list-style-image:url(documents/diamond.gif)">
77 <li> \ref CallingConventions
88 <li> \ref ImageInversion
99 <li> \ref ImageBlending
1010 <li> \ref CompositeImage
1111 <li> \ref SmoothingTutorial
12 <ul type="disc">
12 <ul>
1313 <li> \ref Convolve2DTutorial
1414 <li> \ref SeparableConvolveTutorial
15 <li> \ref ParallelConvolveTutorial
1516 </ul>
1617 </ul>
1718
1819 \section CallingConventions Calling Conventions
19
20
2021 VIGRA's image processing functions follow a uniform calling convention: The argument list start with the input images or arrays, followed by the output images or arrays, followed by the function's parameters (if any). Some functions additionally accept an option object that allows more fine-grained control of the function's actions and must be passed as the last argument. Most functions assume that the output arrays already have the appropriate shape.
21
22
2223 All functions working on arrays expect their arguments to be passed as \ref vigra::MultiArrayView instances. Functions that only support 2-dimensional images usually contain the term "Image" in their name, whereas functions that act on arbitrary many dimensions usually contain the term "Multi" in their name. <br/>
23 Examples:
24 Examples:
2425 \code
2526 // determine the connected components in a binary image, using the 8-neighborhood
2627 MultiArray<2, UInt8> image(width, height);
2728 MultiArray<2, UInt32> labels(width, height);
2829 ... // fill image
2930 labelImage(image, labels, true);
30
31
3132 // smooth a 3D array with a gaussian filter with sigma=2.0
3233 MultiArray<3, float> volume(Shape3(300, 200, 100)),
3334 smoothed(Shape3(300, 200, 100));
3435 ... // fill volume
3536 gaussianSmoothMultiArray(volume, smoothed, 2.0);
36
37
3738 // compute the determinant of a 5x5 matrix
3839 MultiArray<2, float> matrix(Shape2(5, 5));
3940 ... // fill matrix with data
4041 float det = linalg::determinant(matrix);
4142 \endcode
42
43
4344 For historical reasons, VIGRA also supports two alternative APIs in terms of iterators. These APIs used to be considerably faster, but meanwhile compilers and processors have improved to the point where the much simpler MultiArrayView API no longer imposes a significant abstraction penalty. While there are no plans to remove support for the old APIs, they should not be used in new code.
44
45
4546 <ul>
4647 <li> Functions on 2-dimensional images may support an \ref ImageIterators API. These iterators are best passed to the functions via the convenience functions <tt>srcImageRange(array)</tt>, <tt>srcImage(array)</tt>, and <tt>destImage(array)</tt>. A detailed description of the convenience functions can be found in section \ref ArgumentObjectFactories. Example:
4748 \code
5152 ... // fill input with data
5253 transformImage(srcImageRange(input), destImage(result), &sqrt); // deprecated API
5354 \endcode
54
55
5556 <li> Functions for arbitrary-dimensional arrays may support hierarchical \ref MultiIteratorPage. These iterators are best passed to the functions via the convenience functions <tt>srcMultiArrayRange(array)</tt>, <tt>srcMultiArray(array)</tt>, and <tt>destMultiArray(array)</tt>. A detailed description of these convenience functions can also be found in section \ref ArgumentObjectFactories. Example:
5657 \code
5758 // compute the element-wise square root of a 4-dimensional array
6061 ... // fill input with data
6162 transformMultiArray(srcMultiArrayRange(input), destMultiArray(result), &sqrt); // deprecated API
6263 \endcode
63
64
6465 \section ImageInversion Inverting an Image
65
66 Inverting an (gray scale) image is quite easy. We just need to subtract every pixel's
67 value from white (255). This simple task doesn't need an explicit function call at all, but is best solved with a arithmetic expression implemented in namespace \ref MultiMathModule. To avoid possible overload ambiguities,
66
67 Inverting an (gray scale) image is quite easy. We just need to subtract every pixel's
68 value from white (255). This simple task doesn't need an explicit function call at all, but is best solved with a arithmetic expression implemented in namespace \ref MultiMathModule. To avoid possible overload ambiguities,
6869 you must explicitly activate array arithmetic via the command <tt>using namespace vigra::multi_math</tt> before use. To invert <tt>imageArray</tt> and overwrite its original contents, you write:
6970
7071 \code
8485 </Table>
8586
8687 \section ImageBlending Image Blending
87
88 In this example, we have two input images and want to blend them into one another.
88
89 In this example, we have two input images and want to blend them into one another.
8990 In the combined output image every pixel value is the mean of the two appropriate original pixels. This is also best solved with array arithmetic:
9091
9192 \code
9495 \endcode
9596
9697 Since it is not guaranteed that the two input images have the same shape, we first
97 determine the maximum possible shape of the blended image, which equals the minimum
98 size along each axis. With the help of subarray-method we just blend the appropriate
99 parts of the two images. These parts (subimages) are aligned around the centers
98 determine the maximum possible shape of the blended image, which equals the minimum
99 size along each axis. With the help of subarray-method we just blend the appropriate
100 parts of the two images. These parts (subimages) are aligned around the centers
100101 of the original images.
101102
102103 Here's the code:
112113 </table>
113114
114115 \section CompositeImage Creating a Composite Image
115
116 Let's come to a little gimmick. Given one input image we want to create a composite image
116
117 Let's come to a little gimmick. Given one input image we want to create a composite image
117118 of 4 images reflected with respect to each other. The result resembles the effect of a
118 kaleidoscope. Two of VIGRA's functions are sufficient for this purpose: \ref MultiArray_subarray
119 and \ref reflectImage(). Input and output images of reflectImage() are specified by MultiArrayViews.
119 kaleidoscope. Two of VIGRA's functions are sufficient for this purpose: \ref MultiArray_subarray
120 and \ref reflectImage(). Input and output images of reflectImage() are specified by MultiArrayViews.
120121 The third parameter specifies the desired reflection axis. The axis can either
121122 be horizontal, vertical or both (as in this example):
122123
136137 </Table>
137138
138139 \section SmoothingTutorial Smoothing
139
140
140141 \subsection Convolve2DTutorial 2-dimensional Convolution
141
142 There are many different ways to smooth an image. Before we use VIGRA's methods, we
143 want to write a smoothing code of our own. The idea is to choose each pixel in turn and
142
143 There are many different ways to smooth an image. Before we use VIGRA's methods, we
144 want to write a smoothing code of our own. The idea is to choose each pixel in turn and
144145 replace it with the mean of itself and the pixels in 5x5 window around it.
145 To calculate the mean in a window, we can just devide the sum of the pixel values
146 within the corresponding subarray by their number. MultiArrayView provides two useful
147 methods for doing this: <tt>sum</tt> and <tt>size</tt>.
148 In our code we iterate over every pixel, construct the surrounding 5x5 window via
146 To calculate the mean in a window, we can just devide the sum of the pixel values
147 within the corresponding subarray by their number. MultiArrayView provides two useful
148 methods for doing this: <tt>sum</tt> and <tt>size</tt>.
149 In our code we iterate over every pixel, construct the surrounding 5x5 window via
149150 <tt>subarray</tt>, and write the average of the window into the corresponding output pixel.
150151 Near the borders of the image we truncate the window appropriately so that it remains
151152 inside the image, and only take the average over the actually existing neighbours of the pixel.
161162 </TR>
162163 </Table>
163164
164 The technical term for this kind of operation is <i>convolution</i>. VIGRA provides
165 <dfn>convolveImage</dfn> as a comfortable way to perform 2-dimensional convolutions
165 The technical term for this kind of operation is <i>convolution</i>. VIGRA provides
166 <dfn>convolveImage</dfn> as a comfortable way to perform 2-dimensional convolutions
166167 with arbitrary filters. You may use it as follows:
167168
168169 \code
170171 \endcode
171172
172173 The filter of <i>convolution kernel</i> is given as argument object by <dfn>kernel2d()</dfn>.
173 To implement the above smoothing by taking averages in 3x3 windows, you need an averaging
174 kernel with radius 1. Kernel truncation near the image borders is performed when the
174 To implement the above smoothing by taking averages in 3x3 windows, you need an averaging
175 kernel with radius 1. Kernel truncation near the image borders is performed when the
175176 filter's border treatment mode is set to <tt>BORDER_TREATMENT_CLIP</tt>:
176
177
177178 \code
178179 Kernel2D<double> filter;
179180 filter.initAveraging(1);
180181 filter.setBorderTreatment(BORDER_TREATMENT_CLIP);
181182 \endcode
182
183
183184 By default, VIGRA's convolution functions use <tt>BORDER_TREATMENT_REFLECT</tt> (i.e. the
184185 image is virtually enlarged by reflecting the pixel values about the border), which usually
185 leads to superior results. The strength of smoothing can be controlled by increasing the filter
186 leads to superior results. The strength of smoothing can be controlled by increasing the filter
186187 radius.
187
188 Another improvement over simple averaging can be achieved when one takes a <i>weighted
188
189 Another improvement over simple averaging can be achieved when one takes a <i>weighted
189190 average</i> such that pixels near the center have more influence on the result.
190191 A popular choice here is the 5x5 binomial filter. VIGRA allows to specify arbitrary filter
191192 shapes and coefficients via the <tt>Kernel2D::initExplicitly()</tt>:
192
193
193194 \code
194195 Kernel2D<float> filter;
195
196
196197 // specify filter shape (lower right corner is inclusive here!)
197198 filter.initExplicitly(Shape2(-2,-2), Shape2(2,2));
198
199
199200 // specify filter coefficients
200201 filter = 1.0/256.0, 4.0/256.0, 6.0/256.0, 4.0/256.0, 1.0/256.0,
201202 4.0/256.0, 16.0/256.0, 24.0/256.0, 16.0/256.0, 4.0/256.0,
202203 6.0/256.0, 24.0/256.0, 36.0/256.0, 24.0/256.0, 6.0/256.0,
203204 4.0/256.0, 16.0/256.0, 24.0/256.0, 16.0/256.0, 4.0/256.0,
204205 1.0/256.0, 4.0/256.0, 6.0/256.0, 4.0/256.0, 1.0/256.0;
205
206
206207 // apply filter
207208 convolveImage(inputImage, resultImage, filter);
208209 \endcode
209
210 <tt>initExplicitly()</tt> receives the upper left and lower right corners of the
210
211 <tt>initExplicitly()</tt> receives the upper left and lower right corners of the
211212 filter window. Note that the lower right corner here is <i>included</i> in the window,
212 in contrast to <tt>MultiArray::subarray()</tt> where the end point is not included.
213
214 The filter weights are provided in a comma separated list. Normally, the sum of the
215 coefficients should to be 1 in order to preserve the average intensity of the image.
216 You must provide either as many coefficients as needed for the given filter size,
217 or exactly one value which will be used for all filter coefficients. Thus, the 3x3
213 in contrast to <tt>MultiArray::subarray()</tt> where the end point is not included.
214
215 The filter weights are provided in a comma separated list. Normally, the sum of the
216 coefficients should to be 1 in order to preserve the average intensity of the image.
217 You must provide either as many coefficients as needed for the given filter size,
218 or exactly one value which will be used for all filter coefficients. Thus, the 3x3
218219 averaging filter can also be created like this:
219
220
220221 \code
221222 Kernel2D<double> filter;
222223 filter.initExplicitly(Shape2(-1,-1), Shape2(1,1)) = 1.0/9.0;
223224 \endcode
224225
225226 For various theoretical and practical reasons, the Gaussian filter is the best choice
226 in most situations. Its coefficients are chosen according to a Gaussian (i.e.
227 bell-shaped) function with given standard deviation. The kernel class has a
228 convenient <dfn>initGaussian(std_dev)</dfn> method that creates the appropriate
227 in most situations. Its coefficients are chosen according to a Gaussian (i.e.
228 bell-shaped) function with given standard deviation. The kernel class has a
229 convenient <dfn>initGaussian(std_dev)</dfn> method that creates the appropriate
229230 coefficients:
230231
231232 \code
232 vigra::Kernel2D<float> filter;
233 vigra::Kernel2D<float> filter;
233234 filter.initGaussian(1.5);
234235 convolveImage(inputImage, resultImage, filter);
235236 \endcode
236
237
237238 A complete example using these possibilities can be found in <a href="smooth_convolve_8cxx-example.html">smooth_convolve.cxx</a>.
238239
239240 <hr>
240241
241242 \subsection SeparableConvolveTutorial Separable Convolution in 2D and nD Images
242
243 When filtering is implemented with 2-dimensional windows as in the previous section,
244 we need as many multiplications per pixel as there are coefficients in the filter.
245 Fortunately, many important filters (including averaging and Gaussian smoothing)
246 have the property of beeing <i>separable</i>, which allows a much more efficient
247 implementation in terms of 1-dimensional windows. A 2-dimensional filter is
248 separable if its coefficients \f$f_{ij}\f$ can be expressend as an outer product
243
244 When filtering is implemented with 2-dimensional windows as in the previous section,
245 we need as many multiplications per pixel as there are coefficients in the filter.
246 Fortunately, many important filters (including averaging and Gaussian smoothing)
247 have the property of beeing <i>separable</i>, which allows a much more efficient
248 implementation in terms of 1-dimensional windows. A 2-dimensional filter is
249 separable if its coefficients \f$f_{ij}\f$ can be expressend as an outer product
249250 of two 1-dimensional filters \f$h_i\f$ and \f$c_j\f$:
250251
251252 \f[
252253 f_{ij} = h_i \cdot c_j
253254 \f]
254
255 For example, the 3x3 averaging filter (with coefficients 1/9) is obtained as the outer
255
256 For example, the 3x3 averaging filter (with coefficients 1/9) is obtained as the outer
256257 product of two 3x1 filters (with coefficients 1/3):
257
258 \f[ \left( \begin{array}{ccc} \frac{1}{9} & \frac{1}{9} & \frac{1}{9} \\[1ex]
259 \frac{1}{9} & \frac{1}{9} & \frac{1}{9} \\[1ex]
260 \frac{1}{9} & \frac{1}{9} & \frac{1}{9} \end{array} \right) =
258
259 \f[ \left( \begin{array}{ccc} \frac{1}{9} & \frac{1}{9} & \frac{1}{9} \\[1ex]
260 \frac{1}{9} & \frac{1}{9} & \frac{1}{9} \\[1ex]
261 \frac{1}{9} & \frac{1}{9} & \frac{1}{9} \end{array} \right) =
261262 \left( \begin{array}{c} \frac{1}{3} \\[1ex] \frac{1}{3} \\[1ex] \frac{1}{3} \end{array} \right) \cdot
262263 \left( \begin{array}{ccc} \frac{1}{3} & \frac{1}{3} & \frac{1}{3} \end{array} \right)
263264 \f]
264
265
265266 The convolution with separable filters can be implemented by two consecutive 1-dimensional
266267 convolutions: first, one filters all rows of the image with the horizontal filter, and then
267268 all columns of the result with the vertical filter. Instead of the (n x m) operations required
268269 for a 2-dimensional window, we now only need (n + m) operations for the two 1-dimensional ones.
269270 Already for a 5x5 window, this reduces the number of operations from 25 to 10, and the difference
270271 becomes even bigger with increasing window size.
271
272
272273 To construct and apply 1-dimensional filters, VIGRA provides the class \ref vigra::Kernel1D and
273274 the functions separableConvolveX() resp. separableConvolveY(). To compute a 2D Gaussian filter
274275 we use the following code:
275276
276277 \code
277 Kernel1D<double> filter;
278 Kernel1D<double> filter;
278279 filter.initGaussian(1.5);
279
280
280281 MultiArray<2, float> tmpImage(inputImage.shape());
281282 separateConvolveX(inputImage, tmpImage, filter);
282283 separateConvolveY(tmpImage, resultImage, filter);
286287 The same result is more conveniently achieved by the functions \ref convolveImage() and
287288 \ref gaussianSmoothing() (see <a href="smooth_convolve_8cxx-example.html">smooth_convolve.cxx</a>
288289 for a working example):
289
290 \code
291 // apply 'filter' to both the x- and y-axis
290
291 \code
292 // apply 'filter' to both the x- and y-axis
292293 // (calls separateConvolveX() and separateConvolveY() internally)
293294 convolveImage(inputImage, resultImage, filter, filter);
294
295
295296 // smooth image with Gaussian filter with sigma=1.5
296297 // (calls convolveImage() with Gaussian filter internally)
297298 gaussianSmoothing(inputImage, resultImage, 1.5);
298299 \endcode
299300
300 It is, of course, also possible to apply different filters in the x- and y-directions.
301 It is, of course, also possible to apply different filters in the x- and y-directions.
301302 This is especially useful for derivative filters which are commonly used to compute
302303 image features, for example \ref gaussianGradient() and \ref gaussianGradientMagnitude().
303 For more information see \ref CommonConvolutionFilters and \ref Convolution.
304
304 For more information see \ref ConvolutionFilters.
305
305306 Separable filters are also the key for efficient convolution of higher-dimensional images
306 and arrays: An n-dimensional filter is simply implemented by n consecutive 1-dimensional
307 filter applications, regardsless of the size of n. This is the basis for VIGRA's
307 and arrays: An n-dimensional filter is simply implemented by n consecutive 1-dimensional
308 filter applications, regardless of the size of n. This is the basis for VIGRA's
308309 multi-dimensional filter functions. For example, Gaussian smoothing in arbitrary many
309310 dimensions is implemented in \ref gaussianSmoothMultiArray():
310
311
311312 \code
312313 MultiArray<3, UInt8> inputArray(Shape3(100, 100, 100));
313314 ... // fill inputArray with data
314
315
315316 MultiArray<3, float> resultArray(inputArray.shape());
316
317
317318 // perform isotropic Gaussian smoothing at scale 1.5
318319 gaussianSmoothMultiArray(inputArray, resultArray, 1.5);
319320 \endcode
320
321
321322 More information about VIGRA's multi-dimensional convolution funcions can be found in
322 the reference manual under \ref MultiArrayConvolutionFilters .
323 the reference manual under \ref ConvolutionFilters.
324
325 \subsection ParallelConvolveTutorial Parallel Execution of Gaussian Filters
326
327 The computation of Gaussian filters and their derivatives can be accelerated significantly
328 when rectangular blocks of a large image as processed in parallel.
329 This is easily achieved in VIGRA by passing the option object
330 \ref vigra::BlockwiseConvolutionOptions to the convolution functions:
331
332 \code
333 // create a big array
334 MultiArray<3, UInt8> inputArray(Shape3(1000, 1000, 100));
335 ... // fill inputArray with data
336
337 MultiArray<3, float> resultArray(inputArray.shape());
338
339 // perform isotropic Gaussian smoothing at scale 1.5 in parallel
340 gaussianSmoothMultiArray(inputArray, resultArray, 1.5,
341 BlockwiseConvolutionOptions<3>());
342 \endcode
343
344 This call will spawn the standard number of threads for the present platform
345 (as returned by <tt>std::thread::hardware_concurrency()</tt>) and distributes
346 the work across these threads in blocks with a suitable default shape.
347 You can customize the number of threads and the block shape via the option
348 object:
349
350 \code
351 gaussianSmoothMultiArray(inputArray, resultArray, 1.5,
352 BlockwiseConvolutionOptions<3>().numThreads(6)
353 .blockShape(Shape3(128, 128, 100)));
354 \endcode
355
356 The same works for Gaussian derivative filters such as \ref gaussianGradientMultiArray(),
357 \ref gaussianGradientMagnitude(), and \ref hessianOfGaussianMultiArray(). Refer to section
358 \ref ConvolutionFilters for more details.
323359 */
324360
325361 /** \example invert_tutorial.cxx
351387 <br>
352388 Usage: <TT>smooth_convolve infile outfile</TT>
353389 */
390
391 /** \page ImageSegmentationTutorial Image Segmentation
392
393 <h2>Section Contents</h2>
394
395 <ul style="list-style-image:url(documents/diamond.gif)">
396 <li> \ref SuperpixelsTutorial
397 <li> \ref RAGTutorial
398 <li> \ref ClusteringTutorial
399 </ul>
400
401 The complete code of the example described here can be found in <a href="graph_agglomerative_clustering_8cxx-example.html">graph_agglomerative_clustering.cxx</a>.
402
403 \section SuperpixelsTutorial Computing Superpixels
404
405 Hierarchical or agglomerative clustering can be applied either to the pixels directly
406 by using a \ref vigra::GridGraph, or on an initial oversegmentation into superpixels
407 whose region adjacency graph is represented in a \ref vigra::AdjacencyListGraph. We
408 describe the second variant here, as it offers more possibilities, but the first
409 works essentially in the same way.
410
411 Before computing superpixels, it is useful to transform the data from the RGB colorspace
412 into the Lab colorspace, because distances in the Lab space are perceptually more meaningful:
413
414 \code
415 // read the input image
416 ImageImportInfo info(filename);
417 MultiArray<2, TinyVector<float, 3> > imageArrayRGB(info.shape());
418 importImage(info, imageArrayRGB);
419
420 // convert into Lab color space
421 MultiArray<2, TinyVector<float, 3> > imageArrayLab(imageArrayRGB.shape());
422 transformMultiArray(imageArrayRGB, imageArrayLab, RGB2LabFunctor<float>());
423 \endcode
424
425 VIGRA offers two superpixel algorithms: watersheds and SLIC superpixels. We use
426 the watershed algorithm here, see \ref slicSuperpixels() for more information on
427 the alternative. To run the watershed algorithm, we first need an edge indicator
428 (i.e. an image with big values along edges and small values elsewhere) like the
429 gradient magnitude:
430
431 \code
432 // detect edges by the Gaussian gradient magnitude
433 MultiArray<2, float> gradMag(imageArrayLab.shape());
434
435 float sigmaGradMag = 3.0f;
436 gaussianGradientMagnitude(imageArrayLab, gradMag, sigmaGradMag);
437 \endcode
438
439 <Table cellspacing = "10">
440 <TR valign = "bottom">
441 <TD> \image html bears.jpg "input image" </TD>
442 <TD> \image html bears_gradient.png "gradient magnitude" </TD>
443 </TR>
444 </Table>
445
446 The watershed algorithm initiates a superpixel at every local minimum of the gradient
447 image and then grows these seeds along increasing gradients until they meet at the
448 gradient ridges (called "watersheds" because we can interpret the gradient as the
449 altitude of a landscape) which partly correspond to true image edges, but are also located
450 elsewhere. The goal of the subsequent hierarchical clustering is to identify the
451 true edges and delete the spurious ones. The superpixels are represented in a label
452 image that assigns the superpixel ID to every pixel. To visualize the superpixels,
453 it is useful to display the watershed lines as an overlay on an enlarged version
454 of the input image (Enlarging the image is necessary because the watersheds are
455 actually between pixels, i.e. at half-integer coordinates. Doubling maps these
456 to odd-valued coordinates in the enlarged image, so that rounding is avoided.).
457 In this example, we use the fast union-find watershed algorithm, which is also
458 available in a parallel version in function \ref unionFindWatershedsBlockwise():
459
460 \code
461 // create watershed superpixels with the fast union-find algorithm
462 MultiArray<2, unsigned int> labelArray(gradMag.shape());
463 unsigned int max_label = watershedsMultiArray(gradMag, labelArray, DirectNeighborhood,
464 WatershedOptions().unionFind());
465
466 // double the image resolution and create watershed overlay
467 MultiArray<2, TinyVector<float, 3> > imageArrayBig(imageArrayRGB.shape()*2-Shape2(1));
468 resizeMultiArraySplineInterpolation(imageArrayRGB, imageArrayBig);
469 regionImageToCrackEdgeImage(labelArray, imageArrayBig,
470 RGBValue<float>(255, 0, 0), EdgeOverlayOnly);
471 \endcode
472
473 \image html bears_superpixels.png
474
475 \section RAGTutorial Constructing the Region Adjacency Graph and its Feature Maps
476
477 Next, we invoke \ref makeRegionAdjacencyGraph() to construct the region adjacency
478 graph (RAG) of the superpixels. This function takes any graph along with a connected
479 components labeling and creates a new graph that has exactly one node per connected
480 component, and nodes are connected by an edge whenever the corresponding components are
481 neighbors in the original graph, i.e. the original graph contains at least one edge
482 with one end point in the first and the other in the second component. In general,
483 each pair of components has several edges with this property, and all of them are
484 mapped onto a single edge in the RAG. To keep track of this mapping,
485 makeRegionAdjacencyGraph() accepts an additional parameter \a affiliatedEdges, which
486 is a map from edge IDs in the RAG to vectors of edge IDs in the original graph.
487 In our case, the input graph is a \ref vigra::GridGraph whose labeling is stored
488 in the \a labelArray, and the output graph is a \ref vigra::AdjacencyListGraph.
489 The mapping \a affiliatedEdges is best constructed by using embedded types of
490 these graph classes:
491
492 \code
493 // create grid-graph of appropriate size
494 typedef GridGraph<2, undirected_tag> ImageGraph;
495 ImageGraph imageGraph(labelArray.shape());
496
497 // construct an empty graph to hold the region adjacency graph for the superpixels
498 typedef AdjacencyListGraph RAG;
499 RAG rag;
500
501 // create the mapping 'affiliatedEdges' from RAG edges to
502 // corresponding imageGraph edges and build the RAG
503 RAG::EdgeMap<std::vector<ImageGraph::Edge>> affiliatedEdges(rag);
504 makeRegionAdjacencyGraph(imageGraph, labelArray, rag, affiliatedEdges);
505 \endcode
506
507 Note that VIGRA's graph classes conform to the elegant API defined in the
508 <a href="https://lemon.cs.elte.hu/">LEMON Graph Library</a>. Although VIGRA
509 doesn't use LEMON's implementation of this API, it is worth reading their
510 <a href="http://lemon.cs.elte.hu/pub/tutorial/index.html">tutorial</a>
511 because VIGRA's graph classes behave in exactly the same way.
512
513 To control agglomerative clustering, i.e. to define the order in which
514 edges are contracted and nodes merged, we need some features that describe
515 the dissimilarity of superpixels. The more dissimilar two superpixels are,
516 the more likely they will remain separated, i.e. belong to different regions
517 of the final segmentation. We distinguish two kinds of features: edge weights
518 and node features.
519
520 Edge weights should be high when two superpixels are separated by an object
521 edge, i.e. when the gradient magnitude along the common superpixel boundary is high.
522 We define the edge weight as the average gradient magnitude along the boundary,
523 i.e. as the average over the grid edges that correspond to the present RAG edge.
524 However, as pointed out earlier, watershed boundaries are located between pixels,
525 i.e. on half-integer coordinates, whereas the gradient has been computed on pixels,
526 i.e. at integer coordinates. We solve this problem by linear interpolation: the
527 gradient of a grid edge is the average gradient of its two end points.
528
529 \code
530 // create edge maps for weights and lengths of the RAG edges (zero initialized)
531 RAG::EdgeMap<float> edgeWeights(rag),
532 edgeLengths(rag);
533
534 // iterate over all RAG edges (this loop follows a standard LEMON idiom)
535 for(RAG::EdgeIt rag_edge(rag); rag_edge != lemon::INVALID; ++rag_edge)
536 {
537 // iterate over all grid edges that constitute the present RAG edge
538 for(unsigned int k = 0; k < affiliatedEdges[*rag_edge].size(); ++k)
539 {
540 // look up the current grid edge and its end points
541 auto const & grid_edge = affiliatedEdges[*rag_edge][k];
542 auto start = imageGraph.u(grid_edge),
543 end = imageGraph.v(grid_edge);
544
545 // compute gradient by linear interpolation between end points
546 double grid_edge_gradient = 0.5 * (gradMag[start] + gradMag[end]);
547 // aggregate the total
548 edgeWeights[*rag_edge] += grid_edge_gradient;
549 }
550
551 // the length of the RAG edge equals the number of constituent grid edges
552 edgeLengths[*rag_edge] = affiliatedEdges[*rag_edge].size();
553 // define edge weights by the average gradient
554 edgeWeights[*rag_edge] /= edgeLengths[*rag_edge];
555 }
556 \endcode
557
558 Node features are defined by the average Lab color of each superpixel.
559 Hierarchical clustering will later turn this into a node dissimilarity by
560 computing the Euclidean distance between the average colors of neighboring
561 superpixels, possibly weighted by the superpixels' sizes. To compute these
562 features, we invoke VIGRA's \ref FeatureAccumulators framework:
563
564 \code
565 // determine size and average Lab color of each superpixel
566 using namespace acc;
567 AccumulatorChainArray<CoupledArrays<2, TinyVector<float, 3>, unsigned int>,
568 Select<DataArg<1>, LabelArg<2>, // where to look for data and region labels
569 Count, Mean> > // what statistics to compute
570 features;
571 extractFeatures(imageArrayLab, labelArray, features);
572 \endcode
573
574 To be understood by hierarchicalClustering(), we must copy the features into
575 node property maps which are compatible with the RAG data structure:
576
577 \code
578 // copy superpixel features into NodeMaps to be passed to hierarchicalClustering()
579 RAG::NodeMap<TinyVector<float, 3>> meanColor(rag);
580 RAG::NodeMap<unsigned int> regionSize(rag);
581 for(unsigned int k=0; k<=max_label; ++k) // max_label was returned from watershedsMultiArray()
582 {
583 meanColor[k] = get<Mean>(features, k);
584 regionSize[k] = get<Count>(features, k);
585 }
586 \endcode
587
588 \section ClusteringTutorial Perform Hierarchical Clustering
589
590 Now we have collected all information needed to perform agglomerative clustering.
591 We pass the superpixel adjacency graph and its feature maps to the clustering
592 function, and it returns the cluster assignment in a new property map
593 \a nodeLabels that assigns to every RAG node the ID of the cluster the node belongs
594 to. Thus, \a nodeLabels plays exactly the same role for the RAG as \a labelArray
595 did for the grid graph. Cluster IDs are identical to the node IDs of arbitrarly
596 chosen cluster representatives, i.e. they form a sparse subset of the original IDs.
597
598 \code
599 // customize parameters of the clustering algorithm
600 float beta = 0.5f; // importance of node features relative to edge weights
601 float wardness = 0.8f; // importance of cluster size
602 int numClusters = 30; // desired number of resulting regions (clusters)
603
604 // create a node map for the new (clustered) region labels and perform
605 // clustering to remove unimportant watershed edges
606 RAG::NodeMap<unsigned int> nodeLabels(rag);
607 hierarchicalClustering(rag, // input: the superpixel adjacency graph
608 edgeWeights, edgeLengths, meanColor, regionSize, // features
609 nodeLabels, // output: a cluster labeling of the RAG
610 ClusteringOptions().minRegionCount(numClusters)
611 .nodeFeatureImportance(beta)
612 .sizeImportance(wardness)
613 .nodeFeatureMetric(metrics::L2Norm)
614 );
615 \endcode
616
617 The details of the clustering algorithm can be customized by the option object
618 \ref vigra::ClusteringOptions. Here, we set the termination criterion \a numClusters,
619 the relative importance of node features and sizes (\a beta and \a wardness) and the
620 norm to be used to compute node feature dissimiliarity. Option objects like this are
621 a common idiom in the VIGRA library, because code readability matters.
622
623 Finally, we replace the original superpixel labels in \a labelArray with the new cluster
624 labels from \a nodeLabels and visualize the resulting region boundaries, this time
625 with a green overlay on the enlarged input image:
626
627 \code
628 // update label image with the new labels
629 transformMultiArray(labelArray, labelArray,
630 [&nodeLabels](unsigned int oldlabel)
631 {
632 return nodeLabels[oldlabel];
633 });
634
635 // visualize the salient edges as a green overlay
636 regionImageToCrackEdgeImage(labelArray, imageArrayBig,
637 RGBValue<float>( 0, 255, 0), EdgeOverlayOnly);
638 \endcode
639
640 \image html bears_segmentation.png
641 */
00 /** \mainpage VIGRA Reference Manual
11
2 <UL style="list-style-image:url(documents/bullet.gif)">
2 <UL class="collapsibleList">
3
34 <LI> \ref Installation
45 <BR>&nbsp;&nbsp;&nbsp;<em>how to get started</em>
5 <LI> \ref Tutorial
6 <LI> \ref Tutorial
67 <BR>&nbsp;&nbsp;&nbsp;<em>first steps with VIGRA</em>
7 <LI> \ref Concepts
8 <LI> <span style="font-weight:bold; color:#0040b0">Data Structures</span>
9 <BR>&nbsp;&nbsp;&nbsp;<em>multi-dimensional arrays, graphs, numeric types etc.</em>
10 <UL>
11 <LI> \ref vigra::TinyVector
12 <BR>&nbsp;&nbsp;&nbsp;<em>fixed-size arrays</em>
13 <LI> \ref vigra::MultiArrayView
14 <BR>&nbsp;&nbsp;&nbsp;<em>multi-dimensional array interface</em>
15 <LI> \ref vigra::MultiArray
16 <BR>&nbsp;&nbsp;&nbsp;<em>multi-dimensional array class that holds the actual memory</em>
17 <LI> \ref vigra::NumpyArray and \ref vigra::NumpyAnyArray
18 <BR>&nbsp;&nbsp;&nbsp;<em>wrappers offering VIGRA's multi-array API for Python arrays</em>
19 <LI> \ref vigra::linalg::Matrix
20 <BR>&nbsp;&nbsp;&nbsp;<em>array specialization for linear algebra</em>
21 <LI> \ref ImageContainers
22 <BR>&nbsp;&nbsp;&nbsp;<em>classes to manage multiple images and pyramids</em>
23 <LI> \ref vigra::SplineImageView
24 <BR>&nbsp;&nbsp;&nbsp;<em>on-the-fly interpolation of 2D images and their derivatives</em>
25 <LI> \ref ChunkedArrayClasses
26 <BR>&nbsp;&nbsp;&nbsp;<em>big data (potentially larger than RAM) stored as a collection of rectangular blocks</em>
27 <LI> \ref vigra::AdjacencyListGraph
28 <BR>&nbsp;&nbsp;&nbsp;<em>general graph class</em>
29 <LI> \ref vigra::GridGraph
30 <BR>&nbsp;&nbsp;&nbsp;<em>direct and indirect neighbor graphs on arbitrary dimensional grids</em>
31 <LI> \ref vigra::BinaryForest
32 <BR>&nbsp;&nbsp;&nbsp;<em>a collection of binary trees</em>
33 <LI> <span style="font-weight:bold; color:#0040b0">Pixel and Number Types</span>
34 <BR>&nbsp;&nbsp;&nbsp;<em>rationals, complex numbers, RGB tuples etc.</em>
35 <UL>
36 <LI> \ref FixedSizeInt
37 <LI> \ref vigra::RGBValue
38 <LI> \ref RGBValueTraits
39 <LI> \ref vigra::TinyVector
40 <LI> \ref vigra::TinyVectorView
41 <LI> \ref TinyVectorTraits
42 <LI> \ref vigra::FFTWComplex
43 <LI> \ref FFTWComplexTraits
44 <LI> \ref vigra::Rational
45 <LI> \ref vigra::FixedPoint
46 <LI> \ref vigra::FixedPoint16
47 </UL>
48 <LI> \ref vigra::BucketQueue and \ref vigra::MappedBucketQueue
49 <BR>&nbsp;&nbsp;&nbsp;<em>efficient priority queues for integer priorities</em>
50 <LI> \ref vigra::Any
51 <BR>&nbsp;&nbsp;&nbsp;<em>typesafe storage of arbitrary values</em>
52 <LI> \ref vigra::ArrayVector
53 <BR>&nbsp;&nbsp;&nbsp;<em>replacement for std::vector with stronger guarantees (deprecated as of C++ 11)</em>
54 <LI> \ref vigra::BasicImage
55 <BR>&nbsp;&nbsp;&nbsp;<em>deprecated type for 2D images</em>
56 <LI> \ref vigra::BasicImageView
57 <BR>&nbsp;&nbsp;&nbsp;<em>deprecated type for 2D images that use external memory</em>
58 </UL>
59 <LI> <span style="font-weight:bold; color:#0040b0">Image and Array I/O</span>
60 <BR>&nbsp;&nbsp;&nbsp;<em>read and write data from/to disk</em>
61 <UL>
62 <LI> \ref VigraImpex
63 <BR>&nbsp;&nbsp;&nbsp;<em>highlevel import/export interface for 2D images</em>
64 <LI> \ref VolumeImpex
65 <BR>&nbsp;&nbsp;&nbsp;<em>import/export interface for volume data</em>
66 <LI> \ref VigraHDF5Impex
67 <BR>&nbsp;&nbsp;&nbsp;<em>import/export of arbitrary-dimensional arrays in the
68 <a href="http://www.hdfgroup.org/HDF5/">HDF5</a> format</em>
69 <LI> \ref vigra::ChunkedArrayHDF5
70 <BR>&nbsp;&nbsp;&nbsp;<em>automated memory management of huge datasets via HDF5</em>
71 <LI> \ref TIFFImpex
72 <BR>&nbsp;&nbsp;&nbsp;<em>image import/export interface to call libtiff functions directly</em>
73 </UL>
74 <LI> \ref ParallelProcessing
75 <BR>&nbsp;&nbsp;&nbsp;<em>using std::thread</em>
76 <LI> <span style="font-weight:bold; color:#0040b0">Image Processing</span>
77 <BR>&nbsp;&nbsp;&nbsp;<em>array arithmetic, convolution filters, morphology, color conversion, registration etc.</em>
78 <UL>
79 <LI> \ref MultiMathModule
80 <BR>&nbsp;&nbsp;&nbsp;<em>arithmetic and algebraic expressions for multi-dimensional arrays</em>
81 <LI> \ref MultiPointoperators
82 <BR>&nbsp;&nbsp;&nbsp;<em>point operators on multi-dimensional arrays</em>
83 <LI> \ref ColorConversions
84 <BR>&nbsp;&nbsp;&nbsp;<em>convert between RGB and other color spaces like L*u*v* and Y'PbPr</em>
85 <LI> <span style="font-weight:bold; color:#0040b0">Filters</span>
86 <BR>&nbsp;&nbsp;&nbsp;<em>Gaussian filters, smoothing and sharpening, gradients etc.</em>
87 <UL>
88 <LI> \ref ConvolutionFilters
89 <BR>&nbsp;&nbsp;&nbsp;<em>arbitrary-dimensional filters in the spatial and Fourier domains</em>
90 <LI> \ref RecursiveConvolution
91 <BR>&nbsp;&nbsp;&nbsp;<em>recursive filters (1st and 2nd order)</em>
92 <LI> \ref ResamplingConvolutionFilters
93 <BR>&nbsp;&nbsp;&nbsp;<em>filters changing the array size</em>
94 <LI> \ref GaborFilter
95 <BR>&nbsp;&nbsp;&nbsp;<em>Gabor filter creation and related functionality</em>
96 <LI> \ref TensorImaging
97 <BR>&nbsp;&nbsp;&nbsp;<em>tensor-valued pixel features</em>
98 <LI> \ref vigra::Kernel1D and \ref vigra::Kernel2D
99 <BR>&nbsp;&nbsp;&nbsp;<em>generic discrete convolution kernels</em>
100 <LI> \ref SeparableConvolution
101 <BR>&nbsp;&nbsp;&nbsp;<em>1D convolution and separable filters in 2 dimensions</em>
102 <LI> \ref BorderTreatmentMode
103 </UL>
104 <LI> \ref GeometricTransformations "Geometric Transformations and Resizing"
105 <BR>&nbsp;&nbsp;&nbsp;<em>arbitrary-dimensional resize and interpolation, mirroring, rotation, affine warping</em>
106 <LI> \ref vigra::SplineImageView
107 <BR>&nbsp;&nbsp;&nbsp;<em>on-the-fly interpolation of 2D images and their derivatives</em>
108 <LI> \ref FourierTransform
109 <BR>&nbsp;&nbsp;&nbsp;<em>fast Fourier transform for arrays of arbitrary dimension</em>
110 <LI> \ref Correlation
111 <BR>&nbsp;&nbsp;&nbsp;<em>estimate the correlation between images</em>
112 <LI> \ref Registration
113 <BR>&nbsp;&nbsp;&nbsp;<em>transform images into a common coordinate system</em>
114 <LI> \ref NonLinearDiffusion
115 <BR>&nbsp;&nbsp;&nbsp;<em>edge-preserving smoothing and denoising</em>
116 <LI> \ref DistanceTransform
117 <BR>&nbsp;&nbsp;&nbsp;<em>distance transforms in arbitrary dimensions</em>
118 <LI> \ref MultiArrayMorphology
119 <BR>&nbsp;&nbsp;&nbsp;<em>separable morphology with parabola structuring functions in arbitrary dimensions</em>
120 <LI> \ref Morphology
121 <BR>&nbsp;&nbsp;&nbsp;<em>2D erosion, dilation, and median with disc structuring functions</em>
122 <LI> \ref NoiseNormalization
123 <BR>&nbsp;&nbsp;&nbsp;<em>transform intensity-dependent noise into additive Gaussian noise</em>
124 <LI> \ref SlantedEdgeMTF
125 <BR>&nbsp;&nbsp;&nbsp;<em>determine the magnitude transfer function (MTF) of a camera using the slanted edge method</em>
126 </UL>
127 <LI> <span style="font-weight:bold; color:#0040b0">Image Analysis</span>
128 <BR>&nbsp;&nbsp;&nbsp;<em>from pixels to structured data</em>
129 <UL>
130 <LI> \ref FeatureAccumulators
131 <BR>&nbsp;&nbsp;&nbsp;<em>computate multi-dimensional statistics for regions or hte entire image</em>
132 <LI> \ref InspectAlgo and \ref InspectFunctor
133 <BR>&nbsp;&nbsp;&nbsp;<em>outdated statistical analysis functions</em>
134 <LI> \ref vigra::Threshold
135 <BR>&nbsp;&nbsp;&nbsp;<em>good old thresholding</em>
136 <LI> \ref Labeling
137 <BR>&nbsp;&nbsp;&nbsp;<em>connected components labeling</em>
138 <LI> \ref LocalMinMax
139 <BR>&nbsp;&nbsp;&nbsp;<em>Including extremal plateaus larger than 1 pixel</em>
140 <LI> \ref DistanceTransform
141 <BR>&nbsp;&nbsp;&nbsp;<em>including vector distance, eccentricity transforms, and skeletons</em>
142 <LI> \ref EdgeDetection
143 <BR>&nbsp;&nbsp;&nbsp;<em>edge detectors based on first and second derivatives</em>
144 <LI> \ref CornerDetection
145 <BR>&nbsp;&nbsp;&nbsp;<em>measure the 'cornerness' at each pixel </em>
146 <LI> \ref SymmetryDetection
147 <BR>&nbsp;&nbsp;&nbsp;<em>measure the local symmetry at each pixel </em>
148 <LI> \ref Superpixels
149 <BR>&nbsp;&nbsp;&nbsp;<em>watersheds, SLIC superpixels, and seeded region growing</em>
150 <LI> \ref GraphDataStructures
151 <BR>&nbsp;&nbsp;&nbsp;<em>graph-based segmentation algorithms</em>
152 </UL>
153 <LI> \ref GraphDataStructures
154 <BR>&nbsp;&nbsp;&nbsp;<em>graph classes and graph-based algorithms</em>
155 <LI> \ref MachineLearning
156 <BR>&nbsp;&nbsp;&nbsp;<em>classification algorithms</em>
157 <LI> <span style="font-weight:bold; color:#0040b0">Mathematical Tools</span>
158 <BR>&nbsp;&nbsp;&nbsp;<em>number types, special functions, linear algebra and optimization etc.</em>
159 <UL>
160 <LI> \ref NumericPromotionTraits
161 <BR>&nbsp;&nbsp;&nbsp;<em>meta-information about arithmetic types</em>
162 <LI> \ref MathConstants
163 <BR>&nbsp;&nbsp;&nbsp;<em>M_PI, M_SQRT2</em>
164 <LI> \ref MathFunctions
165 <BR>&nbsp;&nbsp;&nbsp;<em>and functors</em>
166 <LI> <span style="font-weight:bold; color:#0040b0">Number Types</span>
167 <UL>
168 <LI> \ref vigra::Rational
169 <LI> \ref vigra::TinyVector
170 <LI> \ref vigra::autodiff::DualVector
171 <LI> \ref vigra::FFTWComplex
172 <LI> \ref vigra::FixedPoint16
173 <LI> \ref vigra::Quaternion
174 </UL>
175 <LI> \ref RandomNumberGeneration
176 <BR>&nbsp;&nbsp;&nbsp;<em>Mersenne twister class and random number functors</em>
177 <LI> \ref Polynomials
178 <BR>&nbsp;&nbsp;&nbsp;<em>Polynomials and root determination</em>
179 <LI> \ref vigra::linalg::Matrix
180 <BR>&nbsp;&nbsp;&nbsp;<em>array specialization for linear algebra</em>
181 <LI> \ref LinearAlgebraModule "Linear Algebra"
182 <BR>&nbsp;&nbsp;&nbsp;<em>matrix algebra, solution of linear systems, eigenvalue calculation etc.</em>
183 <LI> \ref Unsupervised_Decomposition "Unsupervised Decomposition"
184 <BR>&nbsp;&nbsp;&nbsp;<em>Unsupervised matrix decomposition methods (pLSA)</em>
185 <LI> \ref Optimization "Optimization and Regression"
186 <BR>&nbsp;&nbsp;&nbsp;<em>ordinary and non-negative least squares, ridge regression, least angle regression (LARS and LASSO)</em>
187 <LI> \ref AlgebraicConcepts
188 <BR>&nbsp;&nbsp;&nbsp;<em>Requirements for types that implement arithmetic operations</em>
189 </UL>
190 <LI> <span style="font-weight:bold; color:#0040b0">Utilities</span>
191 <BR>&nbsp;&nbsp;&nbsp;<em>error handling, speed measurement</em>
192 <UL>
193 <LI> \ref ErrorReporting
194 <BR>&nbsp;&nbsp;&nbsp;<em>exceptions and assertions</em>
195 <LI> \ref TimingMacros
196 <BR>&nbsp;&nbsp;&nbsp;<em>macros for taking execution speed measurements</em>
197 <LI> \ref VIGRA_FINALLY
198 <BR>&nbsp;&nbsp;&nbsp;<em>emulation of the 'finally' keyword from Python</em>
199 <LI> \ref vigra::Any
200 <BR>&nbsp;&nbsp;&nbsp;<em>typesafe storage of arbitrary values</em>
201 <LI> \ref vigra::BucketQueue and \ref vigra::MappedBucketQueue
202 <BR>&nbsp;&nbsp;&nbsp;<em>efficient priority queues for integer priorities</em>
203 <LI> \ref RangesAndPoints
204 <BR>&nbsp;&nbsp;&nbsp;<em>2-D and N-D positions, extents, and boxes</em>
205 <LI> \ref vigra::FilterIterator
206 <BR>&nbsp;&nbsp;&nbsp;<em>skip elements in a range that don't fulfill a predicate</em>
207 <LI> \ref vigra::IteratorAdaptor
208 <BR>&nbsp;&nbsp;&nbsp;<em>quickly create STL-compatible 1D iterator adaptors</em>
209 <LI> \ref TupleTypes
210 <BR>&nbsp;&nbsp;&nbsp;<em>pair, triple, tuple4, tuple5</em>
211 <LI> \ref PixelNeighborhood
212 <BR>&nbsp;&nbsp;&nbsp;<em>2D neighborhood definitions (deprecated, use \ref vigra::GridGraph instead)</em>
213 <LI> \ref VoxelNeighborhood
214 <BR>&nbsp;&nbsp;&nbsp;<em>3D neighborhood definitions (deprecated, use \ref vigra::GridGraph instead)</em>
215 <LI> \ref vigra::ArrayVector
216 <BR>&nbsp;&nbsp;&nbsp;<em>replacement for std::vector with stronger guarantees (deprecated as of C++ 11)</em>
217 </UL>
218 <LI> <span style="font-weight:bold; color:#0040b0">Concepts</span>
8219 <BR>&nbsp;&nbsp;&nbsp;<em>generic interface definitions</em>
9 <LI> \ref Utilities
10 <BR>&nbsp;&nbsp;&nbsp;<em>Basic helper functionality needed throughout</em>
11 <LI> \ref ErrorReporting
12 <BR>&nbsp;&nbsp;&nbsp;<em>Exceptions and assertions</em>
13 <LI> \ref MathFunctionality
14 <BR>&nbsp;&nbsp;&nbsp;<em>Number types, mathematical constants and functions, linear algebra etc.</em>
15 <LI> \ref PixelTypes
16 <BR>&nbsp;&nbsp;&nbsp;<em>Non-scalar types such as RGBValue and TinyVector</em>
17 <LI> \ref ImageDataStructures
18 <BR>&nbsp;&nbsp;&nbsp;<em>Images, image iterators, and supporting types and functions</em>
19 <LI> \ref MultiDimensionalArrays
20 <BR>&nbsp;&nbsp;&nbsp;<em>Arrays, iterators, and supporting types and functions
21 for arbitrary dimensions</em>
22 <LI> \ref ChunkedArrayClasses
23 <BR>&nbsp;&nbsp;&nbsp;<em>Store big data (potentially larger than RAM) as a collection of rectangular blocks</em>
24 <LI> \ref GraphDataStructures
25 <BR>&nbsp;&nbsp;&nbsp;<em>Graph-based algorithms (e.g. segmentation) and underlying graph classes (e.g. grid graphs for arbitrary dimensions)</em>
26 <LI> \ref ImportExport
27 <BR>&nbsp;&nbsp;&nbsp;<em>Conversion from and to other image data types</em>
28 <LI> \ref ColorConversions
29 <BR>&nbsp;&nbsp;&nbsp;<em>Convert between RGB and other color spaces, such as L*u*v*, Y'PbPr</em>
30 <LI> \ref ImageProcessing
31 <BR>&nbsp;&nbsp;&nbsp;<em>Point operators, image arithmetic, convolution, morphology</em>
32 <LI> \ref Registration
33 <BR>&nbsp;&nbsp;&nbsp;<em>Transforming different images into a common coordinate system</em>
34 <LI> \ref ImageAnalysis
35 <BR>&nbsp;&nbsp;&nbsp;<em>Segmentation and feature extraction algorithms</em>
36 <LI> \ref MachineLearning
37 <BR>&nbsp;&nbsp;&nbsp;<em>Classification algorithms</em>
220 <UL>
221 <LI> \ref AlgebraicConcepts
222 <BR>&nbsp;&nbsp;&nbsp;<em>requirements for types that implement arithmetic operations</em>
223 <LI> \ref ImageIterators
224 <BR>&nbsp;&nbsp;&nbsp;<em>2D iterator API</em>
225 <LI> \ref MultiIteratorPage
226 <BR>&nbsp;&nbsp;&nbsp;<em>multi-dimensional iterator API</em>
227 <LI> \ref vigra::MultiArrayNavigator
228 <BR>&nbsp;&nbsp;&nbsp;<em>navigator utility for multi-dimensional arrays</em>
229 <LI> \ref vigra::FunctorTraits
230 <BR>&nbsp;&nbsp;&nbsp;<em>requirements for functor traits</em>
231 <LI> \ref CrackEdgeImage
232 <BR>&nbsp;&nbsp;&nbsp;<em>topologically correct treatment of interpixel contours</em>
233 <LI> \ref DataAccessors
234 <BR>&nbsp;&nbsp;&nbsp;<em>requirements for data accessors (deprecated)</em>
235 </UL>
38236 <LI> \ref ExampleList
39 <BR>&nbsp;&nbsp;&nbsp;<em>Demonstration programs for VIGRA's usage </em>
237 <BR>&nbsp;&nbsp;&nbsp;<em>demonstration programs for VIGRA's usage </em>
40238 <LI> \ref VigraMatlab
41 <BR>&nbsp;&nbsp;&nbsp;<em>VIGRA Matlab bindings</em>
42 <LI> <b><a href="../vigranumpy/index.html">vigranumpy</a></b>
239 <BR>&nbsp;&nbsp;&nbsp;<em>VIGRA Matlab bindings (unsupported)</em>
240 <LI> <a href="../vigranumpy/index.html" style="text-decoration:none; font-weight:bold; color:#0040b0">vigranumpy</a>
43241 <BR>&nbsp;&nbsp;&nbsp;<em>VIGRA Python bindings</em>
44242 <LI> \ref CreditsChangelog
45 <BR>&nbsp;&nbsp;&nbsp;<em>Who contributed what?</em>
243 <BR>&nbsp;&nbsp;&nbsp;<em>who contributed what?</em>
46244 </UL>
47245
48246 \anchor _details
84282 VIGRA is subject to this <a href="LICENSE.txt">LICENSE</a>.
85283
86284 You can also subscribe to the <a href="https://mailhost.informatik.uni-hamburg.de/mailman/listinfo/vigra">VIGRA Mailing List</a> to get instant information about new releases, discuss VIGRA's features and development, and ask the experts for help.
87
88
89
90 */
91
92 /** \page Concepts Concepts
93
94 <DL>
95 <DT>
96 Description of the generic interface concepts used within VIGRA.
97 <DD>
98 <UL style="list-style-image:url(documents/bullet.gif)">
99 <LI> \ref AlgebraicConcepts
100 <BR>&nbsp;&nbsp;&nbsp;<em>Requirements for types that implement arithmetic operations</em>
101 <LI> \ref ImageIterators
102 <BR>&nbsp;&nbsp;&nbsp;<em>Requirements for 2D iterators</em>
103 <LI> \ref MultiIteratorPage
104 <BR>&nbsp;&nbsp;&nbsp;<em>Iterators for multi-dimensional arrays</em>
105 <LI> \ref DataAccessors
106 <BR>&nbsp;&nbsp;&nbsp;<em>Requirements for data accessors</em>
107 <LI> \ref vigra::FunctorTraits
108 <BR>&nbsp;&nbsp;&nbsp;<em>Requirements for functor traits</em>
109 <LI> \ref CrackEdgeImage
110 </UL>
111 </DL>
112 */
113
114 /** \page MathFunctionality Mathematical Tools
115
116 <b>Number types, mathematical constants, special functions, linear algebra</b>
117 <p>
118 <UL style="list-style-image:url(documents/bullet.gif)">
119 <LI> \ref AlgebraicConcepts
120 <BR>&nbsp;&nbsp;&nbsp;<em>Requirements for types that implement arithmetic operations</em>
121 <LI> \ref NumericPromotionTraits
122 <BR>&nbsp;&nbsp;&nbsp;<em>Meta-information about arithmetic types</em>
123 <LI> \ref MathConstants
124 <BR>&nbsp;&nbsp;&nbsp;<em>M_PI, M_SQRT2</em>
125 <LI> <b>Grid Neighborhood Specification</b>
126 <UL style="list-style-image:url(documents/bullet.gif)">
127 <LI> \ref PixelNeighborhood "2-dimensional" (4- and 8-neighborhood)
128 <LI> \ref VoxelNeighborhood "3-dimensional" (6- and 26-neighborhood)
129 </UL>
130 <LI> <b>Number Types</b>
131 <UL style="list-style-image:url(documents/bullet.gif)">
132 <LI> \ref vigra::Rational
133 <LI> \ref vigra::TinyVector
134 <LI> \ref vigra::autodiff::DualVector
135 <LI> \ref vigra::FFTWComplex
136 <LI> \ref vigra::FixedPoint16
137 <LI> \ref vigra::Quaternion
138 </UL>
139 <LI> \ref RandomNumberGeneration
140 <BR>&nbsp;&nbsp;&nbsp;<em>Mersenne twister class and random number functors</em>
141 <LI> \ref Polynomials
142 <BR>&nbsp;&nbsp;&nbsp;<em>Polynomials and root determination</em>
143 <LI> \ref MathFunctions
144 <BR>&nbsp;&nbsp;&nbsp;<em>and functors</em>
145 <LI> \ref vigra::linalg::Matrix "Matrix class"
146 <BR>&nbsp;&nbsp;&nbsp;<em>the matrix class</em>
147 <LI> \ref LinearAlgebraModule "Linear Algebra"
148 <BR>&nbsp;&nbsp;&nbsp;<em>matrix algebra, solution of linear systems, eigenvalue calculation etc.</em>
149 <LI> \ref Unsupervised_Decomposition "Unsupervised Decomposition"
150 <BR>&nbsp;&nbsp;&nbsp;<em>Unsupervised matrix decomposition methods (pLSA)</em>
151 <LI> \ref Optimization "Optimization and Regression"
152 <BR>&nbsp;&nbsp;&nbsp;<em>ordinary and non-negative least squares, ridge regression, least angle regression (LARS and LASSO)</em>
153 </UL>
154
155 */
156
157 /** \page PixelTypes Pixel Types
158
159 <DL>
160 <DT>
161 <b>Scalar types</b>
162 <DD>
163 <UL style="list-style-image:url(documents/bullet.gif)">
164 <LI> \ref FixedSizeInt
165 <LI> \ref vigra::Rational
166 <LI> \ref vigra::FixedPoint
167 <LI> \ref vigra::FixedPoint16
168 </UL>
169 <p>
170 <DT>
171 <b>RGB colors and related functionality</b>
172 <DD>
173 <UL style="list-style-image:url(documents/bullet.gif)">
174 <LI> \ref vigra::RGBValue
175 <LI> \ref RGBValueTraits
176 <LI> \ref RGBValueOperators
177 <LI> \ref RGBValueAccessors
178 </UL>
179 <p>
180 <DT>
181 <b>Fixed-size vectors and related functionality</b>
182 <DD>
183 <UL style="list-style-image:url(documents/bullet.gif)">
184 <LI> \ref vigra::TinyVector
185 <LI> \ref vigra::TinyVectorView
186 <LI> \ref TinyVectorTraits
187 <LI> \ref TinyVectorOperators
188 </UL>
189 <p>
190 <DT>
191 <b>Complex Numbers</b>
192 <DD>
193 <UL style="list-style-image:url(documents/bullet.gif)">
194 <LI> \ref vigra::FFTWComplex
195 <LI> \ref FFTWComplexTraits
196 <LI> \ref FFTWComplexOperators
197 <LI> \ref FFTWComplexAccessors
198 </UL>
199 </DL>
200 */
201
202 /** \page ImageDataStructures Image Data Structures and Iterators
203
204 <UL style="list-style-image:url(documents/bullet.gif)">
205 <LI> \ref vigra::BasicImage
206 <BR>&nbsp;&nbsp;&nbsp;<em>Fundamental class template for images </em>
207 <LI> \ref vigra::BasicImageView
208 <BR>&nbsp;&nbsp;&nbsp;<em>Class template for images that use external memory</em>
209 <LI> \ref StandardImageTypes
210 <BR>&nbsp;&nbsp;&nbsp;<em>The most common instantiations of \ref vigra::BasicImage</em>
211 <LI> \ref vigra::SplineImageView
212 <BR>&nbsp;&nbsp;&nbsp;<em>Wrap a discrete image as a continous function</em>
213 <LI> \ref VigraImpex
214 <BR>&nbsp;&nbsp;&nbsp;<em>Image import/export</em>
215 <LI> \ref ImageContainers
216 <BR>&nbsp;&nbsp;&nbsp;<em>Classes to manage multiple images (ImageArray..)</em>
217 <LI> \ref PixelNeighborhood
218 <BR>&nbsp;&nbsp;&nbsp;<em>Easy access to the 4- and 8-neighbors of a pixel</em>
219 <LI> \ref ImageIterators
220 <BR>&nbsp;&nbsp;&nbsp;<em>Basic image iterator implementations </em>
221 <LI> \ref ImageIteratorAdapters
222 <BR>&nbsp;&nbsp;&nbsp;<em>Iterate over rows, columns, and other image subsets </em>
223 <LI> \ref DataAccessors
224 <BR>&nbsp;&nbsp;&nbsp;<em>Basic templates to encapsulate access to the data of an iterator</em>
225 <LI> \ref ArgumentObjectFactories
226 <BR>&nbsp;&nbsp;&nbsp;<em>Factory functions to create argument objects which simplify long argument lists </em>
227 </UL>
228 */
229
230 /** \page MultiDimensionalArrays Multi-Dimensional Arrays and Iterators
231
232 <UL style="list-style-image:url(documents/bullet.gif)">
233 <LI> \ref vigra::MultiArrayView
234 <BR>&nbsp;&nbsp;&nbsp;<em>Interface for multi-dimensional arrays </em>
235 <LI> \ref vigra::MultiArray
236 <BR>&nbsp;&nbsp;&nbsp;<em>Array class that holds the actual memory</em>
237 <LI> \ref MultiMathModule
238 <BR>&nbsp;&nbsp;&nbsp;<em>Arithmetic and algebraic expressions for multi-dimensional arrays</em>
239 <LI> \ref MultiArrayTags
240 <BR>&nbsp;&nbsp;&nbsp;<em>Meta-programming tags to mark array's as strided or unstrided</em>
241 <LI> \ref MultiIteratorPage
242 <BR>&nbsp;&nbsp;&nbsp;<em>Iterators for multi-dimensional arrays</em>
243 <LI> \ref vigra::MultiArrayNavigator
244 <BR>&nbsp;&nbsp;&nbsp;<em>Navigator utility for multi-dimensional arrays</em>
245 <LI> \ref VolumeImpex
246 <BR>&nbsp;&nbsp;&nbsp;<em>Import/export of volume data.</em>
247 <LI> \ref MultiPointoperators
248 <BR>&nbsp;&nbsp;&nbsp;<em>Point operators on multi-dimensional arrays</em>
249 <LI> \ref MultiArrayConvolutionFilters
250 <BR>&nbsp;&nbsp;&nbsp;<em>Convolution filters in arbitrary dimensions</em>
251 <LI> \ref FourierTransform
252 <BR>&nbsp;&nbsp;&nbsp;<em>Fast Fourier transform for arrays of arbitrary dimension</em>
253 <LI> \ref resizeMultiArraySplineInterpolation()
254 <BR>&nbsp;&nbsp;&nbsp;<em>Interpolation of arrays in arbitrary dimensions</em>
255 <LI> \ref MultiArrayDistanceTransform
256 <BR>&nbsp;&nbsp;&nbsp;<em>Separable distance transform for arrays of arbitrary dimension</em>
257 <LI> \ref MultiArrayMorphology
258 <BR>&nbsp;&nbsp;&nbsp;<em>Separable morphology with parabola structuring function for arrays of arbitrary dimension</em>
259 <LI> \ref labelVolume(), \ref seededRegionGrowing3D(), \ref watersheds3D(), \ref localMinima3D(), \ref localMaxima3D(),
260 <BR>&nbsp;&nbsp;&nbsp;<em>3-dimensional image (i.e. volume) analysis</em>
261 <LI> \ref VoxelNeighborhood
262 <BR>&nbsp;&nbsp;&nbsp;<em>Easy access to the 6- and 26-neighbors of a voxel</em>
263 <LI> \ref vigra::NumpyArray and \ref vigra::NumpyAnyArray
264 <BR>&nbsp;&nbsp;&nbsp;<em>Provide the VIGRA multi array interface Python arrays</em>
265 </UL>
266 */
285 */
286
287 /** \addtogroup ConvolutionFilters Convolution Filters
288
289 The functions in this group implement separable convolutions (e.g. smoothing and sharpening,
290 Gaussian derivatives) and related filters (like the gradient magnitude) on
291 arbitrary-dimensional arrays. In addition, non-separable filters are supported for 2D images.
292 All functions accept the \ref vigra::MultiArrayView API (which can be wrapped around a wide
293 variety of data structures) as well as a number of deprecated APIs such as \ref ImageIterators.
294 */
295
296 /** \addtogroup GeometricTransformations Geometric Transformations
297
298 Resize or warp an array using various interpolation schemes or just pixel repetition.
299
300 See also: \ref Registration, \ref vigra::SplineImageView
301 */
302
303 /** \addtogroup DistanceTransform Distance Transform
304
305 The functions in this group perfrom Euclidean distance transforms in arbitrary dimensions,
306 as well Manhattan and chessboard distance transforms in 2D (this can easily be extended to nD
307 if need arises). In addition, a number of related transforms such as vector distance transforms,
308 eccentricity transforms, and 2D skeleton computations are offered.
309 */
310
311 /** \addtogroup Superpixels Superpixel Creation
312
313 Watersheds, SLIC superpixels, and seeded region growing.
314 */
315
316 /** \addtogroup ParallelProcessing Parallel Processing
317
318 \sa These algorithms and data structures also support parallel processing:
319 */
267320
268321 /** \page ImportExport Image Import and Export
269322
302355
303356 */
304357
305 /** \page ImageProcessing Image Processing
306
307 <UL style="list-style-image:url(documents/bullet.gif)">
308 <LI> \ref PointOperators
309 <BR>&nbsp;&nbsp;&nbsp;<em>algorithms and functors for image arithmetic, inspection, transformations etc.</em>
310 <LI> \ref MultiMathModule
311 <BR>&nbsp;&nbsp;&nbsp;<em>Arithmetic and algebraic expressions for multi-dimensional arrays</em>
312 <LI> \ref FunctorExpressions
313 <BR>&nbsp;&nbsp;&nbsp;<em>Expression templates for automated functor creation</em>
314 <LI> \ref GeometricTransformations "Resize and Other Geometric Image Transformations"
315 <BR>&nbsp;&nbsp;&nbsp;<em>resize and interpolation, image mirroring, rotation, arbitrary affine transformations</em>
316 <LI> \ref vigra::SplineImageView
317 <BR>&nbsp;&nbsp;&nbsp;<em>Wrap a discrete image as a continous function</em>
318 <LI> \ref Convolution
319 <BR>&nbsp;&nbsp;&nbsp;<em>1D, 2D, and nD filters, including separable and recursive convolution</em>
320 <LI> \ref NonLinearDiffusion
321 <BR>&nbsp;&nbsp;&nbsp;<em>Edge-preserving smoothing and denoising</em>
322 <LI> \ref Correlation
323 <BR>&nbsp;&nbsp;&nbsp;<em>Fast and slow algorithms to estimate the correlation between images</em>
324 <LI> \ref Registration
325 <BR>&nbsp;&nbsp;&nbsp;<em>Transforming different images into a common coordinate system</em>
326 <LI> \ref FourierTransform
327 <BR>&nbsp;&nbsp;&nbsp;<em>forward and backward FFT, cosine transform, and related
328 functionality</em>
329 <LI> \ref GaborFilter
330 <BR>&nbsp;&nbsp;&nbsp;<em>Gabor filter generation and related
331 functionality</em>
332 <LI> \ref TensorImaging
333 <BR>&nbsp;&nbsp;&nbsp;<em>Tensor image processing</em>
334 <LI> \ref Morphology
335 <BR>&nbsp;&nbsp;&nbsp;<em>erosion, dilation, and median with disc structuring functions</em>
336 <LI> \ref NoiseNormalization
337 <BR>&nbsp;&nbsp;&nbsp;<em>transform intensity-dependent noise into additive Gaussian noise</em>
338 <LI> \ref SlantedEdgeMTF
339 <BR>&nbsp;&nbsp;&nbsp;<em>determine the magnitude transfer function (MTF) of a camera using the slanted edge method</em>
340 </UL>
341 */
342
343 /** \page ImageAnalysis Image Analysis
344
345 <UL style="list-style-image:url(documents/bullet.gif)">
346 <LI> \ref InspectAlgo and \ref InspectFunctor
347 <BR>&nbsp;&nbsp;&nbsp;<em>Statistical analysis of images and regions</em>
348 <LI> \ref FeatureAccumulators
349 <BR>&nbsp;&nbsp;&nbsp;<em>Computation of global and per-region statistics of multi arrays via accumulators framework</em>
350 <LI> \ref vigra::Threshold
351 <BR>&nbsp;&nbsp;&nbsp;<em>Good old thresholding</em>
352 <LI> \ref Labeling
353 <BR>&nbsp;&nbsp;&nbsp;<em>Connected components labeling using 4 or 8 connectivity </em>
354 <LI> \ref LocalMinMax
355 <BR>&nbsp;&nbsp;&nbsp;<em>Including extremal plateaus larger than 1 pixel</em>
356 <LI> \ref DistanceTransform
357 <BR>&nbsp;&nbsp;&nbsp;<em>Distance transform using Euclidean, Manhattan, or chessboard metrics </em>
358 <LI> \ref TensorImaging
359 <BR>&nbsp;&nbsp;&nbsp;<em>Tensor image analysis</em>
360 <LI> \ref EdgeDetection
361 <BR>&nbsp;&nbsp;&nbsp;<em>Edge detectors based on first and second derivatives</em>
362 <LI> \ref CornerDetection
363 <BR>&nbsp;&nbsp;&nbsp;<em>Measure the 'cornerness' at each pixel </em>
364 <LI> \ref SymmetryDetection
365 <BR>&nbsp;&nbsp;&nbsp;<em>Measure the local symmetry at each pixel </em>
366 <LI> \ref SeededRegionGrowing
367 <BR>&nbsp;&nbsp;&nbsp;<em>Region growing, watersheds, and voronoi tesselation</em>
368 </UL>
369 */
370
371358 /** \page AlgebraicConcepts Algebraic Concepts
372359
373360 The algebraic concepts describe requirements for algebraic types, that is
00 /** \page Tutorial Tutorial
1
1
22 This tutorial will help you to learn VIGRA's most important concepts by means of simple examples. The tutorial consists of the following parts:
3
4 <ul style="list-style-image:url(documents/bullet.gif)">
3
4 <ul style="list-style-image:url(documents/diamond.gif)">
55 <li> \ref MultiDimensionalArrayTutorial
66 <BR>&nbsp;&nbsp;&nbsp;<em>VIGRA's most important data structure</em>
7 <ul style="list-style-image:url(documents/bullet.gif)">
7 <ul style="list-style-image:url(documents/diamond.gif)">
88 <li> \ref MultiArrayBasics
99 <li> \ref MultiArrayIndexing
1010 <li> \ref MultiArrayScanOrder
1818 <li> \ref MultiArray_unstrided
1919 </ul>
2020 </ul>
21
21
2222 <li> \ref MultiArrayArithmeticTutorial
2323 <BR>&nbsp;&nbsp;&nbsp;<em>mathematical operations on MultiArrays</em>
24 <ul style="list-style-image:url(documents/bullet.gif)">
24 <ul style="list-style-image:url(documents/diamond.gif)">
2525 <li> \ref MultiMathModule "Array Expressions"
2626 <li> \ref LinearAlgebraModule "Linear Algebra"
2727 <li> \ref MultiPointoperators "STL-style transformation algorithms"
2828 <li> \ref FeatureAccumulators
2929 </ul>
30
30
3131 <li> \ref ImageInputOutputTutorial
3232 <BR>&nbsp;&nbsp;&nbsp;<em>importing and exporting images and arbitrary-dimensional arrays</em>
33 <ul style="list-style-image:url(documents/bullet.gif)">
33 <ul style="list-style-image:url(documents/diamond.gif)">
3434 <li> \ref Impex2D
3535 <li> \ref ImpexND
3636 </ul>
3737
3838 <li> \ref ImageProcessingTutorial
3939 <BR>&nbsp;&nbsp;&nbsp;<em>basic applications of VIGRA's functions</em>
40 <ul style="list-style-image:url(documents/bullet.gif)">
40 <ul style="list-style-image:url(documents/diamond.gif)">
4141 <li> \ref CallingConventions
4242 <li> \ref ImageInversion
4343 <li> \ref ImageBlending
4646 <ul type="disc">
4747 <li> \ref Convolve2DTutorial
4848 <li> \ref SeparableConvolveTutorial
49 <li> \ref ParallelConvolveTutorial
4950 </ul>
5051 </ul>
5152
53 <li> \ref ImageSegmentationTutorial
54 <BR>&nbsp;&nbsp;&nbsp;<em>extracting meaningful regions from pixels</em>
55 <ul style="list-style-image:url(documents/diamond.gif)">
56 <li> \ref SuperpixelsTutorial
57 <li> \ref RAGTutorial
58 <li> \ref ClusteringTutorial
59 </ul>
60
5261 <li> \ref OwnFunctionsTutorial
5362 <BR>&nbsp;&nbsp;&nbsp;<em>... without getting confused by templates</em>
5463
5564 <li> \ref PythonBindingsTutorial
5665 <BR>&nbsp;&nbsp;&nbsp;<em>scripting with VIGRA made easy</em>
57
66
5867 </ul>
5968 */
6069
6170 /** \page MultiDimensionalArrayTutorial Multi-Dimensional Arrays
6271
6372 <h2>Section Contents</h2>
64
65 <ul style="list-style-image:url(documents/bullet.gif)">
73
74 <ul style="list-style-image:url(documents/diamond.gif)">
6675 <li> \ref MultiArrayBasics
6776 <li> \ref MultiArrayIndexing
6877 <li> \ref MultiArrayScanOrder
7685 <li> \ref MultiArray_unstrided
7786 </ul>
7887 </ul>
79
88
8089 \section MultiArrayBasics Basic MultiArray Usage
81
82 \ref vigra::MultiArray is the most fundamental data structure in VIGRA. It holds a rectangular block of values in arbitrary many dimensions. Most VIGRA functions operate on top of MultiArray or the associated class MultiArrayView (see \ref MultiArrayViewBasics).
83
90
91 \ref vigra::MultiArray is the most fundamental data structure in VIGRA. It holds a rectangular block of values in arbitrary many dimensions. Most VIGRA functions operate on top of MultiArray or the associated class MultiArrayView (see \ref MultiArrayViewBasics).
92
8493 A 2D image can be interpreted as a matrix, i.e. a 2-dimensional array, where each element holds the information of a specific pixel. Internally, the data are stored in a single 1-dimensional piece of memory, and MultiArray encapsulates the entire mapping between our familiar 2-dimensional notation and the raw memory. Pixels in an image are identified by a coordinate pair (x,y), where indexing starts at 0. That is, the pixels in a 800x600 image are indexed by <tt>x = 0,...,799 and y = 0,...,599</tt>. The principle analoguously extends to higher dimensions.
85
86 The structure of a multidimensional array is given by its <tt>shape</tt> vector, and the length of the shape vector is the array's <i>dimension</i>. The dimension must be fixed as a template parameter at compule time, while the shape is passed to the array's constructor. The second important template parameter is the pixel's <tt>value_type</tt>, as you know it form <tt>std::vector</tt>.
87
88 To represent the data of a gray scale image, we just need to store one value per pixel, so we
89 choose a 2-dimensional array, where each element has the <tt> unsigned char </tt> type
94
95 The structure of a multidimensional array is given by its <tt>shape</tt> vector, and the length of the shape vector is the array's <i>dimension</i>. The dimension must be fixed as a template parameter at compule time, while the shape is passed to the array's constructor. The second important template parameter is the pixel's <tt>value_type</tt>, as you know it form <tt>std::vector</tt>.
96
97 To represent the data of a gray scale image, we just need to store one value per pixel, so we
98 choose a 2-dimensional array, where each element has the <tt> unsigned char </tt> type
9099 (in VIGRA, this type is also available as \ref vigra::UInt8). We instantiate a gray scale image object like this:
91100
92101 \code
93102 #include <vigra/multi_array.hxx>
94
103
95104 using namespace vigra; // for brevity in the examples - don't do this in header files!
96
105
97106 int width = ..., height = ...;
98107 MultiArray<2, UInt8> image(Shape2(width, height));
99108 \endcode
100
109
101110 By default, VIGRA arrays are <b>always zero-initialized</b>. Another initial value can be provided in the constructor, or later via the <tt>init()</tt> function or the assignment operator:
102
111
103112 \code
104113 MultiArray<2, UInt8> image(Shape2(width, height), 255); // init with value 255
105
114
106115 image.init(128); // same effect, different initial value
107116 image = 100; // yet another way
108117 \endcode
109
110 The <tt>Shape2</tt> typedef also exists for higher dimensions up to five as <tt>Shape3</tt> etc. If you need even more dimensions, use <tt>MultiArrayShape<N>::type</tt> instead, were N is the number of dimensions:
118
119 The <tt>Shape2</tt> typedef also exists for higher dimensions up to five as <tt>Shape3</tt> etc. If you need even more dimensions, use <tt>MultiArrayShape<N>::type</tt> instead, were N is the number of dimensions:
111120
112121 \code
113122 // dimension 0 runs from 0, 1, ..., 299
114123 // dimension 1 runs from 0, 1, ..., 199
115124 // dimension 2 runs from 0, 1, ..., 99
116125 MultiArray<3, double> volume(Shape3(300, 200, 100));
117
126
118127 MultiArray<7, float> array7D(MultiArrayShape<7>::type(20, 10, ...));
119 \endcode
128 \endcode
120129
121130 When storing RGB images we obviously can't simply use the unsigned char type because every pixel contains 3 numbers: values for red, green and blue. Mathematically, you want to store a data vector for each pixel. To this end, VIGRA
122131 provides the <tt>vigra::RGBValue<ValueType></tt> class. So for RGB-images just use: </p>
129138
130139 Alternatively you can use a 3-dimensional array <tt>vigra::MultiArray<3, unsigned
131140 char></tt> to represent a color image. The third dimension has size 3 and contains the
132 information for the red, green and blue channel.
133
141 information for the red, green and blue channel.
142
134143 \code
135144 MultiArray<3, UInt8> rgb_array(Shape3(256, 128, 3));
136145 \endcode
154163 \endcode
155164
156165 <B>Important Remark:</B> Notice that VIGRA follows the mathematical convention of the index order: dimension 0 corresponds to the x (horizontal) coordinate, dimension 1 to the y (vertical) coordinate, and so on. Accordingly, dimension 0 is changing fastest in memory: when we increase x by one, we get to the next memory location. In matrix jargon, this is also known as <i>Fortran order</i>. Many image processing libraries (e.g. <a href="http://www.imagemagick.org/">Image Magick</a>, <a href="http://opencv.willowgarage.com/">OpenCV</a>, and <a href="http://qt-project.org/">Qt</a>) use the same convention. However, others like Matlab and numpy, use the reverse order (so called <i>C order</i>). Don't be confused!
157
166
158167 Internally, shape objects are implemented in terms of the powerful \ref vigra::TinyVector class. This means that shape objects support the usual mathematical operations like addition, multiplication and scalar products. Coordinate computations can thus be performed on entire coordinate objects at once - there is no need to manipulate the individual coordinates separately.
159
168
160169 Nonetheless, in some circumstances it is more convenient to provide coordinates individually rather than in a shape object. This is possible with round brackets (x,y):
161170
162171 \code
163172 // access via individual coordinates
164173 image(1,2) = 22;
165174 \endcode
166
175
167176 This kind of indexing is supported for dimensions up to five, and only if the array's dimension is known (this is not always the case: in a generic function where the dimension is a template parameter, you must use shape objects).
168 In combination with the method <tt>shape(n)</tt>, that returns the length of the n-th dimension,
177 In combination with the method <tt>shape(n)</tt>, that returns the length of the n-th dimension,
169178 we can use the coordinates to set the element of an entire row or column:
170179
171180 \code
183192 // bind x=2
184193 p[0] = 2;
185194 // iterator over row 2
186 for(p[1]=0; p[1]<image.shape(1); ++p[1])
195 for(p[1]=0; p[1]<image.shape(1); ++p[1])
187196 image[p] = 7;
188197 \endcode
189198
190199 We will discuss more powerful methods to access certain parts of an array in section \ref MultiArrayMethods.
191200
192201 \section MultiArrayScanOrder One-dimensional Indexing and Scan-Order Iterator
193
194 Regardless of the array's dimension, it is always possible to access elements with 1-dimensional index, its <i>scan-order index</i>, via the normal indexing operator. For example, <tt>array[1]</tt> refers to the index of the second array element. Defining a scan order is often called <i>flattening</i> of an array, because a high-dimensional data structure is accessed like a 1-dimensional vector. Notive that scan-order access in VIGRA does not require the data to be copied.
195
196 VIGRA defines scan-order by considering the dimensions from front to back. Thus, items are accessed such that only the x coordinate is incremented, while y (and possibly further coordinates) are held fixed at 0. When x is exhausted, y is incremented by one and the iteration starts again at x=0. To control iteration, the function <tt>array.size()</tt> returns the total number of elements:
202
203 Regardless of the array's dimension, it is always possible to access elements with 1-dimensional index, its <i>scan-order index</i>, via the normal indexing operator. For example, <tt>array[1]</tt> refers to the index of the second array element. Defining a scan order is often called <i>flattening</i> of an array, because a high-dimensional data structure is accessed like a 1-dimensional vector. Notive that scan-order access in VIGRA does not require the data to be copied.
204
205 VIGRA defines scan-order by considering the dimensions from front to back. Thus, items are accessed such that only the x coordinate is incremented, while y (and possibly further coordinates) are held fixed at 0. When x is exhausted, y is incremented by one and the iteration starts again at x=0. To control iteration, the function <tt>array.size()</tt> returns the total number of elements:
197206
198207 \code
199208 MultiArray<2, int> intArray(Shape2(3,2));
200209
201210 for(int k=0; k<intArray.size(); ++k=
202211 intArray[k] = k+1;
203
212
204213 // the array now contains the values
205214 //
206215 // 1 2 3
207216 // 4 5 6
208217 \endcode
209
218
210219 Alternatively, scan-order access can be achieved with an STL-compatible iterator pair obtained by calling <tt>array.begin()</tt> and <tt>array.end()</tt>. Continuing with the example above, we can write:
211220
212221 \code
217226 for (Iter i = intArray.begin(); i != intArray.end(); ++i)
218227 std::cout << *i << " ";
219228 std::cout << std::endl;
220
229
221230 // output: 1 2 3 4 5 6
222231 \endcode
223232
224 The iterator is implemented by class <tt>StridedScanOrderIterator</tt> which encapsulates all the bookkeeping necessary to get the elements in the correct order, even when the array was transposed (see below).
225
226 Scan-order access is useful to implement pointwise operations, e.g. the addition of two matrices. The following code adds two matrices and stores the result in the first one:
233 The iterator is implemented by class <tt>StridedScanOrderIterator</tt> which encapsulates all the bookkeeping necessary to get the elements in the correct order, even when the array was transposed (see below).
234
235 Scan-order access is useful to implement pointwise operations, e.g. the addition of two matrices. The following code adds two matrices and stores the result in the first one:
227236
228237 \code
229238 MultiArray<2, int> matrix1(Shape2(3,3)),
233242 // use indexing
234243 for (int i=0; i < matrix1.size(); ++i)
235244 matrix1[i] += matrix2[i];
236
245
237246 // use iterators
238247 for (Iter i = matrix1.begin(), j = matrix2.begin(); i != matrix1.end(); ++i, ++j)
239248 *i += *j;
259268
260269 For more information on mathematical operations on arrays see the \ref MultiMathModule "multi_math" module.
261270
262 As mentioned, VIGRA's scan order is similar to the NumPy-method <tt>array.flatten()</tt>. You use it,
271 As mentioned, VIGRA's scan order is similar to the NumPy-method <tt>array.flatten()</tt>. You use it,
263272 to copy a multi-dimensional array into an one-dimensional array, or to access elements in flattened order. The only
264273 difference is that NumPy uses "C-order" , i.e. the rightmost dimension takes priority, whereas
265274 VIGRA uses Fortran-order, i.e. the leftmost dimension takes priority. A method like flatten can be implemented in VIGRA as:
269278
270279 for(int k=0; k<intArray.size(); ++k=
271280 intArray[k] = k+1;
272
281
273282 // create 1D-array of appropriate size
274283 std::vector<int> flatArray(intArray.size());
275
284
276285 // copy 2D-array into 1D-array using the STL
277286 std::copy(intArray.begin(), intArray.end(), flatArray.begin());
278
287
279288 // print 1D-array on console
280 // (same output as printing from the StridedScanOrderIterator directly)
289 // (same output as printing from the StridedScanOrderIterator directly)
281290 for (std::vector<int>::iterator i = flatArray.begin(); i != flatArray.end(); ++i)
282291 std::cout << *iter << " ";
283292 std::cout << std::endl;
287296 used C-order in the code above:
288297
289298 \verbatim
290 flatArray - index 0 1 2 3 4 5
299 flatArray - index 0 1 2 3 4 5
291300 -----------------------------------------------------------------
292301 VIGRA-output: 1 2 3 4 5 6
293302 intArray - index [0,0] [1,0] [2,0] [0,1] [1,1] [2,1]
298307
299308 To change the axis priorities of the StridedScanOrderIterator, look at the transpose-function
300309 in the next section.
301
310
302311 \section MultiArrayMethods Important MultiArray Methods
303312
304313 This part of the tutorial explains important methods of MultiArray. However, before we proceed, we need to introduce the class \ref vigra::MultiArrayView.
305
314
306315 \subsection MultiArrayViewBasics The MultiArrayView Interface
307
316
308317 A \ref vigra::MultiArrayView has the same interface as a MultiArray (with the exception of <tt>reshape()</tt> and <tt>swap()</tt>), but it doesn't own its data. Instead, it provides a <i>view</i> onto the data of some other array. In contrast, a MultiArray owns its data and is responsible for freeing it in the destructor. MultiArrays are automatically converted into MultiArrayViews when needed.
309
318
310319 The point of this distinction is that MultiArrayViews can be used to access and manipulate the same data in many different ways <i>without any need for creating copies</i>. For example, we can work with a 2-dimensional slice of a volume dataset (i.e. a lower dimensional part of a 3D array) without first copying the slice into a 2D image. This is possible whenever the desired view can be realized by just manipulating the internal <i>mapping</i> from indices and shapes to memory locations, and not the memory layout itself.
311
320
312321 This possibility -- which is similarly implemented in other packages like Matlab and numpy -- is a key ingredient for efficient computations with multi-dimensional arrays. Thus, most VIGRA functions actually receive MultiArrayViews to maximize flexibility. This section describes the most important ways to create new MultiArrayViews from an existing array or view. The complete documentation is available in the \ref vigra::MultiArrayView reference.
313322
314323 <hr>
315324
316325 \subsection MultiArray_subarray subarray(p,q)
317
326
318327 This method creates a rectangular subarray of your array between the points p and q, where p (the starting point of the subregion) is included, q (the ending point) is not. <tt>subarray</tt> does not change the dimension of the array (this is the task of the various <tt>bind</tt>-methods).
319
328
320329 To give an example, we create a 4x4 array that consitst of a checkerboard with 2x2 squares:
321330
322331 \code
323332 MultiArray<2, float> array_4x4(Shape2(4,4)); // zero (black) initialized
324
333
325334 // paint the upper left 2x2 square white
326335 array_4x4.subarray(Shape2(0,0), Shape2(2,2)) = 1.0;
327
328 // likewise for the lower right 2x2 square, but this time we
336
337 // likewise for the lower right 2x2 square, but this time we
329338 // store the array view explicitly for illustration
330339 MultiArrayView<2, int> lower_right_square = array_4x4.subarray(Shape2(2,2), Shape2(4,4));
331340 lower_right_square = 1.0;
332
341
333342 // contents of array_4x4 now:
334343 // 1 1 0 0
335344 // 1 1 0 0
344353 \skip // read image
345354 \until exportImage
346355
347 After reading the (here: gray scale) image data to an array we need to calculate the
348 coordinates of our subimage. In this case we want to cut out the middle part of the image.
356 After reading the (here: gray scale) image data to an array we need to calculate the
357 coordinates of our subimage. In this case we want to cut out the middle part of the image.
349358 Afterwards we write the subimage into a new array. Look at the result:
350359
351360 <Table cellspacing = "10">
358367 <hr>
359368
360369 \subsection MultiArray_bind bind<M>(i) and bindAt(M, i)
361
362 These methods bind axis M to the index i and thus reduce the dimension of the array by one. The only difference between the two forms is that the axis to be bound must be known at compile time in the first form, whereas it can be specified at runtime in the second.
363
370
371 These methods bind axis M to the index i and thus reduce the dimension of the array by one. The only difference between the two forms is that the axis to be bound must be known at compile time in the first form, whereas it can be specified at runtime in the second.
372
364373 Binding is useful when we want to access and/or manipulate a particular row or column of an image, or a single slice of a volume. In principle, the same can also be achieved by explicit loops, but use of <tt>bind</tt> often leads to more elegant and more generic code. Consider the following code to initialize the third column of an image with the constant 5:
365374
366375 \code
370379 // initialize column 2 with value 5 using a loop
371380 for(int y=0; y<array2d.shape(1); ++y)
372381 array2d(2, y) = 5;
373
382
374383 // the same using bind
375384 array2d.bind<0>(2) = 5;
376385 \endcode
387396 MultiArray<1, int> array1d = array2d.bind<0>(2);
388397 \endcode
389398
390 The array <tt>array1d</tt> contains the elements the 3rd column of <tt>array2d</tt>. This bahavior nicely illustrates the difference between a copy and a view: <tt>array1d</tt> contains a copy of the 3rd column, whereas the <tt>bind</tt> function only creates a new view to the existing data in <tt>array2d</tt>.
391
392 At this point we have to distinguish between the classes <tt> MultiArray </tt> and
399 The array <tt>array1d</tt> contains the elements the 3rd column of <tt>array2d</tt>. This bahavior nicely illustrates the difference between a copy and a view: <tt>array1d</tt> contains a copy of the 3rd column, whereas the <tt>bind</tt> function only creates a new view to the existing data in <tt>array2d</tt>.
400
401 At this point we have to distinguish between the classes <tt> MultiArray </tt> and
393402 <tt> MultiArrayView </tt>. MultiArray inherits from MultiArrayView and contains the
394 memory management of the array. With MultiArrayView we can view the data stored in a
403 memory management of the array. With MultiArrayView we can view the data stored in a
395404 MultiArray. The code above produces a copy of the 3rd column of intArray. If we change the
396405 elements of <tt>lowArray</tt> nothing happens to <tt> intArray </tt>.
397406
401410
402411 // initialize new 1D array with 3rd column of a 2D array
403412 MultiArray<1, int> array1d = array2d.bind<0>(2);
404
413
405414 // overwrite element [0] of array1d
406415 array1d[0] = 1;
407
416
408417 // this has no effect on the original array2d
409418 // output: 0 1
410419 std::cout << array2d(2, 0) << " " << array1d[0] << std::endl;
411
420
412421 // initialize a view and overwrite element [0]
413422 MultiArrayView<1, int> array_view = array2d.bind<0>(2);
414423 array_view[0] = 2;
415
424
416425 // now, the original array2d has changed as well
417426 // output: 2 2
418427 std::cout << array2d(2, 0) << " " << array_view[0] << std::endl;
419428 \endcode
420429
421 Moving on to image processing we'll give an example how you can flip an image by using
422 bind. We read a gray scale image into a 2-dimensional array called <tt> imageArray </tt>.
430 Moving on to image processing we'll give an example how you can flip an image by using
431 bind. We read a gray scale image into a 2-dimensional array called <tt> imageArray </tt>.
423432 Then we initalize a new array <tt> newImageArray </tt> of the same dimension and size
424 and set the first row of <tt> newImageArray </tt> to the values of the last row of
433 and set the first row of <tt> newImageArray </tt> to the values of the last row of
425434 <tt> imageArray </tt>, the second row to the values of the second last row and so on.
426435 Hence, we flip the image top to bottom.
427436
428437 \dontinclude mirror_tutorial.cxx
429 \skip // mirror the image horizontally
438 \skip // mirror the image horizontally
430439 \until }
431
432 However, you don't need to implement a method like this yourself because VIGRA already provides the
440
441 However, you don't need to implement a method like this yourself because VIGRA already provides the
433442 function \ref reflectImage(). We use this function to flip the image left-to-right:
434
443
435444 \dontinclude mirror_tutorial.cxx
436445 \skip // mirror the image vertically
437446 \until reflectImage
438
447
439448 The complete example can be found in <a href="mirror_tutorial_8cxx-example.html">mirror_tutorial.cxx</a>.
440 (This program needs an infile and an outfile as command-line arguments and contains additional I/O code
449 (This program needs an infile and an outfile as command-line arguments and contains additional I/O code
441450 which will be explained in section \ref ImageInputOutputTutorial.) Here you can see what happens to an input file:
442451
443452 <Table cellspacing = "10">
449458 </Table>
450459
451460 For completeness, there are five additional versions of the bind()-method:
452
461
453462 <DL>
454463 <DT><b> bindInner(i) </b> with scalar or multi-dimensional index i:</DT>
455464 <DD> if i is an <tt> integer </tt>, the innermost dimension (axis 0) is fixed to i, <br>
456 if i is <tt>MultiArrayShape<M>::type</tt> (a shape of size M), then the M innermost
465 if i is <tt>MultiArrayShape<M>::type</tt> (a shape of size M), then the M innermost
457466 dimensions (axes 0...M-1) are fixed to the values in the shape vector </DD>
458467 <DT><b> bindOuter(i) </b> with scalar or multi-dimensional index i:</DT>
459468 <DD> if i is an <tt> integer </tt>, the outmost dimension (axis N-1) is fixed to i, <br>
460 if i is <tt>MultiArrayShape<M>::type</tt> (a shape of size M), then the M outmost dimensions
469 if i is <tt>MultiArrayShape<M>::type</tt> (a shape of size M), then the M outmost dimensions
461470 (axes N-M ... N-1) are fixed to the values in the shape vector </DD>
462471 <DT><b> diagonal() </b>:</DT>
463 <DD> Create a 1-dimensional view to the diagonal elements of the original array
472 <DD> Create a 1-dimensional view to the diagonal elements of the original array
464473 (i.e. <tt>view[i] == array(i,i,i)</tt> for a 3D original array). </DD>
465474 </DL>
466
475
467476 The opposite of binding - inserting a new axis - is also possible. However, since we cannot alter the internal memory layout and thus cannot insert additional data elements, a new axis must be singleton axis, i.e. an axis with length 1. The argument of <tt>insertSingletonDimension(k)</tt> determines the position of the new axis, with <tt>0 <= k <= N</tt> when the original array has <tt>N</tt> dimensions:
468
477
469478 \code
470479 MultiArray<2, int> array(20,10);
471480 std::cout << array.insertSingletonDimension(1).shape() << "\n"; // prints "(20, 1, 10)"
472481 \endcode
473
482
474483 <hr>
475484
476485 \subsection MultiArray_vector_elements expandElements(k) and bindElementChannel(i)
477
486
478487 When the array elements are vectors (i.e. \ref vigra::TinyVector or \ref vigra::RGBValue), we can expand these elements into an addtional array dimension:
479488 \code
480489 MultiArray<2, TinyVector<int, 3> > vector_array(20, 10);
481490 std::cout << vector_array.shape() << "\n"; // prints "(20, 10)"
482
491
483492 MultiArrayView<3, int> expanded(vector_array.expandElements(2));
484493 std::cout << expanded.shape() << "\n"; // prints "(20, 10, 3)"
485494 \endcode
486
487 The argument <tt>k</tt> of <tt>expandElements(k)</tt> determines the desired position of the channel axis, i.e. the index that refers to the vector elements. When the original vector array has <tt>N</tt> dimensions (not counting the channel axis), it is required that <tt>0 <= k <= N</tt>.
488
495
496 The argument <tt>k</tt> of <tt>expandElements(k)</tt> determines the desired position of the channel axis, i.e. the index that refers to the vector elements. When the original vector array has <tt>N</tt> dimensions (not counting the channel axis), it is required that <tt>0 <= k <= N</tt>.
497
489498 Often, we are only interested in a single channel of a vector-valued array. This can be achieved with the function <tt>bindElementChannel(i)</tt>. For example, we can extract the green channel (i.e. channel 1) from an RGB image like this:
490499 \code
491500 MultiArray<2, RGBValue<UInt8> > rgb_array(20, 10);
547556 Shape of array5D: (1, 2, 3, 4, 5)
548557 Shape of array5D view after default transpose(): (5, 4, 3, 2, 1)
549558 \endverbatim
550
559
551560 Finally, <tt>MultiArrayView::transpose()</tt> can also be called with a shape object that specifies the desired permutation of the axes: When <tt>permutation[k] = j</tt>, axis <tt>j</tt> of the original array becomes axis <tt>k</tt> of the transposed array (remember, that VIGRA counts the axes from 0):
552
561
553562 \dontinclude transpose.cxx
554563 \skip transpose to an explicitly specified axis permutation
555564 \until applied permutation
556
565
557566 The permutation in the example is 2,1,3,4,0. Thus, original dimension 0 appears in the last position of the new view, original dimension 2 appears in the first position, and so on as demonstrated by the output of the example:
558
567
559568 \verbatim
560569 Shape of array5D view after user-defined transpose(): (3, 2, 4, 5, 1)
561570 (applied permutation 2 => 0, 1 => 1, 3 => 2, 4 => 3, 0 => 4 to the axes)
580589
581590 <b>Important note:</b> Transposing an array also changes the direction of the StridedScanOrderIterator. Imagine a 3x4-
582591 matrix. Scan-order means that we iterate from left to right, row by row. Now, let's transpose the matrix to a 4x3 view. Than, scan-order in the new view is again left to right, row by row. However, in the original matrix this now corresponds to a transposed scan: from top to bottom, column by column. The same applies to the array's index operator with integer argument.
583
592
584593 <hr>
585594
586595 \subsection MultiArray_unstrided isUnstrided(k)
587
596
588597 A MultiArray always accesses its elements in consecutive memory order, i.e. <tt>&array[i] == &array.data()[i]</tt> for all <tt>i</tt> in the range <tt>[0, array.size())</tt>. However, this does in general not hold for MultiArrayViews, because changing array access is the whole point of view creation. Sometimes, it is necessary to find out if a view still has consecutive, unstrided memory access, for example when you want to pass on the view's data to an external library that only accepts plain C arrays: When the view happens to be unstrided, you can avoid to create a copy of the data. You can determine this with the function <tt>isUnstrided(k)</tt> which returns <tt>true</tt> when the array is unstrided up to dimension <tt>k</tt> (<tt>k</tt> defaults to <tt>N-1</tt>, i.e. the entire array must be unstrided):
589 \code
598 \code
590599 MultiArray<2, int> array(20,10);
591600 std::cout << array.isUnstrided() << " " << array.transpose().isUnstrided() << "\n"; // prints "true false"
592601 \endcode
617626 Usage: <TT>subimage_tutorial infile outfile</TT>
618627 */
619628
620
621 /** \page ImageInputOutputTutorial Image Input and Output
629 /** \example graph_agglomerative_clustering.cxx
630 Segment an image by hierarchical clustering on top of watershed superpixels
631 <br>
632 Usage: <TT>subimage_tutorial infile outfile</TT>
633 */
634
635
636 /** \page ImageInputOutputTutorial Image Input and Output
622637
623638 <h2>Section Contents</h2>
624 <ul style="list-style-image:url(documents/bullet.gif)">
639 <ul style="list-style-image:url(documents/diamond.gif)">
625640 <li> \ref Impex2D
626641 <li> \ref ImpexND
627642 </ul>
628643
629644 \section Impex2D Two-Dimensional Images
630
645
631646 In this section we'll show you how to import and export an image with VIGRA. If you
632 want to import an image from disk and enquire about its properties, you must use an
633 object of <tt>vigra::ImageImportInfo</tt> class. It reads the header of the image file.
634 The constructor expects the file name, the file type will be determined automatically.
635
647 want to import an image from disk and enquire about its properties, you must use an
648 object of <tt>vigra::ImageImportInfo</tt> class. It reads the header of the image file.
649 The constructor expects the file name, the file type will be determined automatically.
650
636651 The <tt>vigra::ImageImportInfo</tt> class currently recognizes the following file formats:
637652
638653 <DL>
669684 \dontinclude imageImportInfo_tutorial.cxx
670685 \skip read image
671686 \until numBands
672
687
673688
674689 As you can see, the <tt> ImageImportInfo </tt> object contains a lot of information,
675690 some of it is printed in the example. Using this image
692707 \dontinclude imageIO_tutorial.cxx
693708 \skip read image
694709 \until importImage(imageInfo
695
710
696711 If you already know the type of data in the file, you can also just pass the filename and a MultiArray, which will automatically be resized as appropariate:
697712
698713 \dontinclude imageIO_tutorial.cxx
699714 \skip if you don't need
700715 \until importImage(in_filename
701
716
702717 Writing the image data from an array to a file is quite similar. For this purpose, you use the function \ref vigra::exportImage(), which takes an 2D MultiArrayView and an \ref vigra::ImageExportInfo object or a string (the filename). The ImageExportInfo object also needs a file name, but gives you more control over how the image is written. The desired file format is guessed from the file name's extension (but can be overridden with the method <tt>ImageExportInfo::setFileType</tt>. Recognized extensions are: '.bmp', '.exr', '.gif', '.jpeg', '.jpg', '.p7', '.png', '.pbm', '.pgm', '.pnm', '.ppm', '.ras', '.tif', '.tiff', '.xv', '.hdr' (as for reading, '.exr' requires libopenexr, '.jpg' requires libjpeg, '.png' requires libpng and '.tif' requires libtiff). In the following example, we create and save a 160x160 pixels image, where the image is a checkerboard. The image is saved as "testimage.gif" in the same folder as the executed code.
703718
704719 \include imageExportInfo_tutorial.cxx
720735 \include imageIO_tutorial.cxx
721736
722737 The input image and the resulting output image are:
723
738
724739 <Table cellspacing = "10">
725740 <TR valign="bottom">
726741 <TD> \image html lenna_small.gif "input image" </TD>
737752 In this case, it is better to import and convert the data into a <tt>float</tt> array (i.e. <tt>vigra::MultiArray<2, float></tt>) instead of the simple <tt>unsigned char</tt> type in order to minimize rounding errors. When a file is imported into such an array, the conversion is automatically performed by the importImage() function. When an array is to be exported, the handling of <tt>float</tt> depends on the file format: If the file format supports float (currently: TIFF and VIFF), the data are written verbatim (unless this is explicitly overridden, see below). Otherwise, the data are mapped to <tt>unsigned char</tt> via a linear transform of the orginal range, followed by rounding (use \ref vigra::linearRangeMapping() to override this behavior by an explicit user-defined mapping).
738753
739754 The <tt>ImageExportInfo</tt> class provides a number of additional methods to <b>customize data export</b>, including:
740
755
741756 <DL>
742757 <DT><b>setCompression():</b></dt>
743758 <DD>Request compressed storage if the file format supports it.</DD>
748763 <DT><b> setXResolution(), setYResolution:</b></dt>
749764 <DD>Store resolution information for the two axes (ignored if unsupported by the file format).</DD>
750765 </DL>
751
766
752767 See \ref vigra::ImageExportInfo for a complete list and more details.
753768
754769 \section ImpexND Higher Dimensional Arrays
755
770
756771 The recommended file format for arrays of arbitrary dimension is <a href="http://www.hdfgroup.org/HDF5/">HDF5</a>. It supports all possible pixel types, arbitrary dimensions, on-the-fly compression, arbitrary many arrays per file, and flexible metadata storage along with arrays. See \ref VigraHDF5Impex for more information.
757772
758773 The functions \ref importVolume() and \ref exportVolume() support three additional methods to read and write 3D volume data:
782797 */
783798
784799 /** \page MultiArrayArithmeticTutorial Mathematics with Multi-Dimensional Arrays
785
800
786801 VIGRA supports various way to perform mathematical operations (arithmetic and algebraic functions, linear alebra) on arrays. Most of these functions operate element-wise.
787
788 <ul style="list-style-image:url(documents/bullet.gif)">
802
803 <ul style="list-style-image:url(documents/diamond.gif)">
789804 <li> \ref MultiMathModule "Array Expressions"
790805 <BR>The \ref MultiMathModule "vigra::multi_math" module overloads the usual arithmetic operators and algebraic functions for array arguments, similar to Matlab and numpy. This leads to very efficient and readable code.
791806 <li> \ref LinearAlgebraModule "Linear Algebra"
794809 <BR>VIGRA also provides functions like <tt>transformMultiArray()</tt> that generalize the corresponding STL functions to multiple dimensions. The functors needed for these functions are most easily created with the \ref FunctorExpressions module, VIGRA's "lambda library". This approach offers more flexibility than the array expressions above.
795810 <li> \ref FeatureAccumulators
796811 <BR>The \ref FeatureAccumulators "vigra::acc" module provides powerful and efficient methods to compute statistics accross entire arrays or arbitrary subparts of them.
797 </ul>
812 </ul>
798813 */
799814
800815 /** \page OwnFunctionsTutorial Writing your own Functions
801
802 Sooner or later, you will want to implement your own functions on the basis of VIGRA's functionality. Some people believe that this is very difficult because one needs to provide a lot of template magic and full genericity. However, this is <i>not</i> true: Your VIGRA functions need not be templated at all -- function arguments can simply be hard-wired. In other cases, it makes sense to template on the pixel type, but leave averything else fixed. Full genericity should only be implemented step-by-step as needed.
803
804 As an example, consider again the image smoothing example program <a href="smooth_explicitly_8cxx-example.html">smooth_explicitly.cxx</a>. It makes sense to encapsulate the smoothing algorithm into a function of its own. When we only need to support <tt>float</tt> images, the function is simply a verbatim copy of the algorithm. In contrast to the original version, we now allow an arbitrary window radius to be passed to the algorithm, so that the amount of smoothing can be controlled (this also nicely illustrates the use of <tt>vigra_precondition()</tt> for \ref ErrorReporting):
805
816
817 Sooner or later, you will want to implement your own functions on the basis of VIGRA's functionality. Some people believe that this is very difficult because one needs to provide a lot of template magic and full genericity. However, this is <i>not</i> true: Your VIGRA functions need not be templated at all -- function arguments can simply be hard-wired. In other cases, it makes sense to template on the pixel type, but leave averything else fixed. Full genericity should only be implemented step-by-step as needed.
818
819 As an example, consider again the image smoothing example program <a href="smooth_explicitly_8cxx-example.html">smooth_explicitly.cxx</a>. It makes sense to encapsulate the smoothing algorithm into a function of its own. When we only need to support <tt>float</tt> images, the function is simply a verbatim copy of the algorithm. In contrast to the original version, we now allow an arbitrary window radius to be passed to the algorithm, so that the amount of smoothing can be controlled (this also nicely illustrates the use of <tt>vigra_precondition()</tt> for \ref ErrorReporting):
820
806821 \code
807822 void smooth(MultiArrayView<2, float> input, MultiArrayView<2, float> result, int radius)
808823 {
809824 vigra_precondition(radius >= 0, "smooth(): window radius must not be negative.");
810
825
811826 Shape2 current;
812827 for(current[1] = 0; current[1] < input.shape(1); ++current[1])
813828 {
821836 }
822837 }
823838 \endcode
824
825 If we don't need to support any higher dimension or other pixel type, we can just leave it at this -- no templates are required then.
826
839
840 If we don't need to support any higher dimension or other pixel type, we can just leave it at this -- no templates are required then.
841
827842 But suppose now that we want to generalize this code for arbitrary dimensional arrays. To do so, we specify the dimension <tt>N</tt> as a template parameter. Then we can no longer use <tt>Shape2</tt> because this class only works for 2-dimensional arrays. Instead, we use the <tt>MultiArrayShape</tt> traits class to ask for the appropriate shape object. Moreover, we cannot iterate over the array with two explicitly nested loops because the number of loops must correspond to the (unknown) number of dimensions. We can solve this problems by means of a \ref vigra::MultiCoordinateIterator from <tt>multi_iterator_coupled.hxx</tt> that iterates over all coordinates of an array, regardless of dimension. The current coordinate is returned by dereferencing the iterator:
828
843
829844 \code
830845 #include <vigra/multi_iterator_coupled.hxx>
831
846
832847 template <unsigned int N>
833848 void smooth(MultiArrayView<N, float> input, MultiArrayView<N, float> result, int radius)
834849 {
835850 vigra_precondition(radius >= 0, "smooth(): window radius must not be negative.");
836
851
837852 typedef typename MultiArrayShape<N>::type Shape;
838
853
839854 typename MultiCoordinateIterator<N> current(input.shape()),
840855 end = current.getEndIterator();
841
856
842857 for(; current != end; ++current)
843858 {
844859 Shape windowStart = max(Shape(0), *current - Shape(radius));
848863 }
849864 }
850865 \endcode
851
866
852867 Another useful generalization is in terms of the array's value_type. For one, we want to be able to smooth color images as well. Furthermore, most images are stored with pixel type <tt>unsigned char</tt>, and we don't want to force the user to convert them into <tt>float</tt> images before smoothing. We therefore specify the value_types as template parameters as well (notice that we allow input and result to have different types). In addition, we have to make the type of the sum in <tt>window.sum<...>()</tt> generic. However, there is a caveat: We cannot simply use the input value_type here, because this might lead to overflow. This is easily seen when the value_type is <tt>unsigned char</tt>: This type already overflows when the sum exceeds the value 255, which is very likely to happen even if the windows is only 3x3. In situations like this, a suitable temporary type for the sum can be obtained from the <tt>RealPromote</tt> type in VIGRA's \ref NumericTraits "NumericTraits" class:
853
868
854869 \code
855870 template <unsigned int N, class InputValue, class ResultValue>
856 void smooth(MultiArrayView<N, InputValue> input,
857 MultiArrayView<N, ResultValue> result,
871 void smooth(MultiArrayView<N, InputValue> input,
872 MultiArrayView<N, ResultValue> result,
858873 int radius)
859874 {
860875 vigra_precondition(radius >= 0, "smooth(): window radius must not be negative.");
861
876
862877 typedef typename MultiArrayShape<N>::type Shape;
863878 typedef typename NumericTraits<InputValue>::RealPromote SumType;
864
879
865880 typename MultiCoordinateIterator<N> current(input.shape()),
866881 end = current.getEndIterator();
867
882
868883 for(; current != end; ++current)
869884 {
870885 Shape windowStart = max(Shape(0), *current - Shape(radius));
874889 }
875890 }
876891 \endcode
877
892
878893 These simple tricks already get you a long way in the advanced use of VIGRA. You will notice, that many existing VIGRA functions are not implemented in temrs of \ref vigra::MultiArrayView, but in terms of \ref ImageIterators "image iterators" and \ref MultiIteratorGroup "hierarchical iterators". However, these iterators are more difficult to use, so the MultiArrayView approach is recommended for new code.
879894 */
880895
881896 /** \page PythonBindingsTutorial VIGRA Python Bindings
882
897
883898 See also the full <a href="../vigranumpy/index.html">vigranumpy reference</a>!
884
899
885900 When you configure VIGRA with the option <tt>-DWITH_VIGRANUMPY=1</tt> while running cmake, a Python module <tt>vigra</tt> will be compiled and installed. It exposes most of VIGRA's functionality for easy scripting and prototyping in Python. Most importantly, VIGRA's Python bindings are fully integrated with the popular 'numpy' package so that you can call vigra functions directly with numpy <tt>ndarrays</tt>. No explicit or implicit conversion of data formats is required.
886
901
887902 The syntax of the Python version is usually very similar to the C++ syntax, with one important difference: You do not have to pass pre-allocated result arrays to the functions. That is, while the call to <tt>gaussianSmoothing()</tt> in C++ is written like this
888
889 \code
890 MultiArray<2, float> inputImage(Shape2(width, height)),
903
904 \code
905 MultiArray<2, float> inputImage(Shape2(width, height)),
891906 resultImage(inputImage.shape()); // pre-allocate result with correct shape
892907 ... // fill inputImage
893
908
894909 // smooth image with Gaussian filter with sigma=1.5
895910 // (pre-allocated resultImage must be passed to the function)
896911 gaussianSmoothing(inputImage, resultImage, 1.5);
897912 \endcode
898
913
899914 the corresponding Python call is
900
915
901916 \code
902917 >>> import numpy, vigra
903
918
904919 >>> inputImage = numpy.zeros((width, height), dtype=numpy.float32)
905920 ... # fill inputImage
906
921
907922 >>> resultImage = vigra.filters.gaussianSmoothing(inputImage, 1.5);
908923 \endcode
909
924
910925 The result image is automatically allocated and returned by the function. Nonetheless, it is still possible to pass a result array of appropriate shape explicitly by means of the <tt>out</tt> parameter:
911
926
912927 \code
913928 >>> resultImage = numpy.zeros(inputImage.shape, dtype=numpy.float32)
914929 >>> vigra.filters.gaussianSmoothing(inputImage, 1.5, out=resultImage)
915930 \endcode
916
931
917932 This is, for example, useful when the same result image should be reused in several calls of the same function to avoid the repeated creation of new result arrays. Another possible use is the application of a function to only a rectangular region-of-interest: When the full result array is already allocated, you can pass a view of the approriate subarray to the <tt>out</tt> parameter in order to fill just the desired ROI.
918
933
919934 When a C++ function provides options, they are exposed on the Python side as keyword arguments:
920
935
921936 \code
922937 >>> labeling, max_label = vigra.analysis.watersheds(inputImage, seeds=seedImage, method='UnionFind')
923938 \endcode
924
939
925940 In general, the correspondence between a Python function and its C++ counterpart is straightforward, and the Python documentation frequently refers to the C++ documentation for details. However, there is a crucial difference: the default axis interpretation is different in VIGRA's <tt>MultiArray</tt> (which interpretes axes as x, y, z, so called 'Fortran' order) and in numpy's <tt>ndarray</tt> (which interpretes them as z, y, x, so called 'C'-order). To help you deal with this difficulty, vigranumpy provides a subclass <a href="../vigranumpy/index.html#axistags-and-the-vigraarray-data-structure">VigraArray</a> of <tt>ndarray</tt> and the concept of <a href="../vigranumpy/index.html#more-on-the-motivation-and-use-of-axistags">axistags</a>. Please take the time to read this material in order to avoid surprises.
926
941
927942 The full <a href="../vigranumpy/index.html">vigranumpy reference</a> is available via HTML or can be obtained directly at the Python prompt by the <tt>help()</tt> command:
928
943
929944 \code
930945 >>> help(vigra.filters.gaussianSmoothing)
931946 \endcode
932
947
933948 Another important difference between C++ and Python is that vigranumpy exposes most functions only for a restricted set of pixel types. This restriction is necessary because support for all possible type combinations would result in a combinatorial explosion and unreasonably large Python modules. In general, all functions are implemented for <tt>float</tt> pixel types (called <tt>numpy.float32</tt> on the Python side), and some provide <tt>uint8</tt> and/or <tt>uint32</tt> versions in addition. If you call a function with an unsupported pixel type, an error message listing the supported types will be printed:
934
949
935950 \code
936951 >>> a = vigra.ScalarImage((20,20), dtype=numpy.float64)
937952
939954 ArgumentError: Python argument types in
940955 vigra.filters.gaussianSmoothing(numpy.ndarray, int)
941956 did not match C++ signature:
942 gaussianSmoothing(class vigra::NumpyArray<3,struct vigra::Multiband<float>,struct vigra::StridedArrayTag> array, class boost::python::api::object sigma, class vigra::NumpyArray<3,struct vigra::Multiband<float>,struct vigra::StridedArrayTag> out=None, class boost::python::api::object sigma_d=0.0, class boost::python::api::object step_size=1.0, double window_size=0.0, class boost::python::api::object roi=None)
957 gaussianSmoothing(class vigra::NumpyArray<3,struct vigra::Multiband<float>,struct vigra::StridedArrayTag> array, class boost::python::api::object sigma, class vigra::NumpyArray<3,struct vigra::Multiband<float>,struct vigra::StridedArrayTag> out=None, class boost::python::api::object sigma_d=0.0, class boost::python::api::object step_size=1.0, double window_size=0.0, class boost::python::api::object roi=None)
943958
944959 gaussianSmoothing(class vigra::NumpyArray<4,struct vigra::Multiband<float>,struct vigra::StridedArrayTag> array, class boost::python::api::object sigma, class vigra::NumpyArray<4,struct vigra::Multiband<float>,struct vigra::StridedArrayTag> out=None, class boost::python::api::object sigma_d=0.0, class boost::python::api::object step_size=1.0, double window_size=0.0, class boost::python::api::object roi=None)
945960 \endcode
946
947 The error message is automatically generated by boost::python and therefore rather technical. It says that <tt>%gaussianSmoothing()</tt> supports 3- and 4-dimensional arrays where the rightmost dimension is interpreted as a channel axis, and the pixel type must be <tt>float</tt> (these properties are indicated by the type specifications <tt>NumpyArray<3,struct vigra::Multiband<float></tt> and <tt>NumpyArray<4,struct vigra::Multiband<float></tt> respectively). Thus, the input array must be a <tt>float32</tt> image or volume with either no explicit channel axis (in which case a singleton channel axis will be inserted automatically) or with arbitrary many channels (e.g. RGB).
948
961
962 The error message is automatically generated by boost::python and therefore rather technical. It says that <tt>%gaussianSmoothing()</tt> supports 3- and 4-dimensional arrays where the rightmost dimension is interpreted as a channel axis, and the pixel type must be <tt>float</tt> (these properties are indicated by the type specifications <tt>NumpyArray<3,struct vigra::Multiband<float></tt> and <tt>NumpyArray<4,struct vigra::Multiband<float></tt> respectively). Thus, the input array must be a <tt>float32</tt> image or volume with either no explicit channel axis (in which case a singleton channel axis will be inserted automatically) or with arbitrary many channels (e.g. RGB).
963
949964 <a href="../vigranumpy/index.html#more-on-the-motivation-and-use-of-axistags">Axistags</a> allow vigranumpy to distinguish if a given 3-dimensional array is to be interpreted as a 2D image with multiple channels, or as a 3D volume with only a single channel. If no axistags are attached to the array, it is unspecified which version of an algorithm will be called. Axistags are automatically specified when arrays are created with one of the factory functions in the <tt>vigra</tt> module, for example:
950
965
951966 \code
952967 >>> a = vigra.ScalarImage((30, 20))
953968 >>> print("%s \n %r" % (a.shape, a.axistags))
969984 (30L, 20L, 10L, 3L)
970985 x y z c
971986 \endcode
972
987
973988 Axistags are encoded 'x', 'y', 'z' for the three spatial axes, 'c' for a channel axis, and 't' for a time axis. If the channel axis is missing, vigranumpy will assume that the array has only a single channel. That is, arrays with shape (30, 20, 1) and axistags 'x y c' are equivalent to arrays with shape (30, 20) and axistags 'x y'. Functions that change the order of the axes (such as <tt>%array.transpose()</tt>) or reduce the number of axes (e.g. <tt>array[:, 1, :]</tt>) also modify the axistags accordingly, so that you can always ask for the axis meaning by simply calling <tt>array.axistags</tt>.
974989 */
101101 class RegionCircularity; // compare perimeter of a 2D region with a circle of same area
102102 class RegionEccentricity; // ecentricity of a 2D region from major and minor axis
103103
104 class ConvexHull; // base class for convex hull features
104 #ifdef WITH_LEMON
105 class ConvexHull; // base class for convex hull computation
106 class ConvexHullFeatures; // base class for convex hull features
107 #endif
105108
106109 /*
107110 Quantiles other than minimum and maximum require more thought:
488491
489492 // ignore all modifiers of RegionContour and related features
490493 VIGRA_REDUCE_MODFIER(template <class> class A, A<RegionContour>, RegionContour)
494 #ifdef WITH_LEMON
491495 VIGRA_REDUCE_MODFIER(template <class> class A, A<ConvexHull>, ConvexHull)
496 VIGRA_REDUCE_MODFIER(template <class> class A, A<ConvexHullFeatures>, ConvexHullFeatures)
497 #endif // WITH_LEMON
492498 VIGRA_REDUCE_MODFIER(template <class> class A, A<RegionPerimeter>, RegionPerimeter)
493499 VIGRA_REDUCE_MODFIER(template <class> class A, A<RegionCircularity>, RegionCircularity)
494500 VIGRA_REDUCE_MODFIER(template <class> class A, A<RegionEccentricity>, RegionEccentricity)
5252 #include "eigensystem.hxx"
5353 #include "histogram.hxx"
5454 #include "polygon.hxx"
55 #ifdef WITH_LEMON
56 #include "polytope.hxx"
57 #endif
5558 #include "functorexpression.hxx"
5659 #include "labelimage.hxx"
60 #include "multi_labeling.hxx"
5761 #include <algorithm>
5862 #include <iostream>
5963
372376 */
373377
374378
375 /** This namespace contains the accumulator classes, fundamental statistics and modifiers. See \ref FeatureAccumulators for examples of usage.
379 /** \brief Efficient computation of object statistics.
380
381 This namespace contains the accumulator classes, fundamental statistics and modifiers. See \ref FeatureAccumulators for examples of usage.
376382 */
377383 namespace acc {
378384
709715 struct CollectAccumulatorNames<void>
710716 {
711717 template <class BackInsertable>
712 static void exec(BackInsertable & a, bool skipInternals=true)
718 static void exec(BackInsertable &, bool /* skipInternals */ = true)
713719 {}
714720 };
715721
739745 struct ApplyVisitorToTag<void>
740746 {
741747 template <class Accu, class Visitor>
742 static bool exec(Accu & a, std::string const & tag, Visitor const & v)
748 static bool exec(Accu &, std::string const &, Visitor const &)
743749 {
744750 return false;
745751 }
775781 struct SetHistogramBincount
776782 {
777783 template <class Accu>
778 static void exec(Accu & a, HistogramOptions const & options)
784 static void exec(Accu &, HistogramOptions const &)
779785 {}
780786 };
781787
793799 struct ApplyHistogramOptions
794800 {
795801 template <class Accu>
796 static void exec(Accu & a, HistogramOptions const & options)
802 static void exec(Accu &, HistogramOptions const &)
797803 {}
798804 };
799805
801807 struct ApplyHistogramOptions<StandardQuantiles<TAG> >
802808 {
803809 template <class Accu>
804 static void exec(Accu & a, HistogramOptions const & options)
810 static void exec(Accu &, HistogramOptions const &)
805811 {}
806812 };
807813
990996 struct DecoratorImpl
991997 {
992998 template <class T>
993 static void exec(A & a, T const & t)
999 static void exec(A &, T const &)
9941000 {}
9951001
9961002 template <class T>
997 static void exec(A & a, T const & t, double weight)
1003 static void exec(A &, T const &, double)
9981004 {}
9991005 };
10001006
23262332
23272333 template<class IGNORED_DATA>
23282334 void
2329 updatePassN(const IGNORED_DATA & ignoreData,
2335 updatePassN(const IGNORED_DATA &,
23302336 const CoordType & coord,
23312337 unsigned int p)
23322338 {
36373643
36383644 static const unsigned int workInPass = 2;
36393645
3640 void operator+=(Impl const & o)
3646 void operator+=(Impl const &)
36413647 {
36423648 vigra_precondition(false,
36433649 "Central<...>::operator+=(): not supported.");
36443650 }
36453651
36463652 template <class T>
3647 void update(T const & t)
3653 void update(T const &)
36483654 {
36493655 ImplType::update(getDependency<Centralize>(*this));
36503656 }
36513657
36523658 template <class T>
3653 void update(T const & t, double weight)
3659 void update(T const &, double weight)
36543660 {
36553661 ImplType::update(getDependency<Centralize>(*this), weight);
36563662 }
37893795
37903796 static const unsigned int workInPass = 2;
37913797
3792 void operator+=(Impl const & o)
3798 void operator+=(Impl const &)
37933799 {
37943800 vigra_precondition(false,
37953801 "Principal<...>::operator+=(): not supported.");
37963802 }
37973803
37983804 template <class T>
3799 void update(T const & t)
3805 void update(T const &)
38003806 {
38013807 ImplType::update(getDependency<PrincipalProjection>(*this));
38023808 }
38033809
38043810 template <class T>
3805 void update(T const & t, double weight)
3811 void update(T const &, double weight)
38063812 {
38073813 ImplType::update(getDependency<PrincipalProjection>(*this), weight);
38083814 }
39363942 struct Impl
39373943 : public SumBaseImpl<BASE, T, double, double>
39383944 {
3939 void update(T const & t)
3945 void update(T const &)
39403946 {
39413947 ++this->value_;
39423948 }
39433949
3944 void update(T const & t, double weight)
3950 void update(T const &, double weight)
39453951 {
39463952 this->value_ += weight;
39473953 }
43554361 }
43564362 }
43574363
4358 void update(U const & t)
4364 void update(U const &)
43594365 {
43604366 using namespace vigra::multi_math;
43614367 this->value_ += pow(getDependency<Centralize>(*this), 3);
43624368 }
43634369
4364 void update(U const & t, double weight)
4370 void update(U const &, double weight)
43654371 {
43664372 using namespace vigra::multi_math;
43674373 this->value_ += weight*pow(getDependency<Centralize>(*this), 3);
44164422 }
44174423 }
44184424
4419 void update(U const & t)
4425 void update(U const &)
44204426 {
44214427 using namespace vigra::multi_math;
44224428 this->value_ += pow(getDependency<Centralize>(*this), 4);
44234429 }
44244430
4425 void update(U const & t, double weight)
4431 void update(U const &, double weight)
44264432 {
44274433 using namespace vigra::multi_math;
44284434 this->value_ += weight*pow(getDependency<Centralize>(*this), 4);
53255331 value_ = t;
53265332 }
53275333
5328 void update(U const & t, double weight)
5334 void update(U const & t, double)
53295335 {
53305336 update(t);
53315337 }
54245430 }
54255431 }
54265432
5427 void update(U const & t)
5433 void update(U const &)
54285434 {
54295435 vigra_precondition(false, "ArgMinWeight::update() needs weights.");
54305436 }
54995505 }
55005506 }
55015507
5502 void update(U const & t)
5508 void update(U const &)
55035509 {
55045510 vigra_precondition(false, "ArgMaxWeight::update() needs weights.");
55055511 }
58145820 ++this->value_[index];
58155821 }
58165822
5817 void update(int index, double weight)
5823 void update(int, double)
58185824 {
58195825 // cannot compute quantile from weighted integer histograms,
58205826 // so force people to use UserRangeHistogram or AutoRangeHistogram
61376143 }
61386144
61396145 template <class U, class NEXT>
6140 void update(CoupledHandle<U, NEXT> const & t, double weight)
6146 void update(CoupledHandle<U, NEXT> const & t, double)
61416147 {
61426148 update(t);
61436149 }
61446150
6145 void operator+=(Impl const & o)
6151 void operator+=(Impl const &)
61466152 {
61476153 vigra_precondition(false,
61486154 "RegionContour::operator+=(): RegionContour cannot be merged.");
62556261 };
62566262 };
62576263
6258 template <int N>
6259 struct feature_ConvexHull_can_only_be_computed_for_2D_arrays
6260 : vigra::staticAssert::AssertBool<N==2>
6261 {};
6262
6263 /** \brief Compute the contour of a 2D region.
6264
6265 AccumulatorChain must be used with CoupledIterator in order to have access to pixel coordinates.
6264 // Compile only if lemon is available
6265 #ifdef WITH_LEMON
6266
6267 /** \brief Compute the convex hull of a region.
6268
6269 AccumulatorChain must be used with CoupledIterator in order to have access
6270 to pixel coordinates.
6271
6272 The result type is the ConvexPolytop class.
62666273 */
62676274 class ConvexHull
62686275 {
62696276 public:
6270 typedef Select<BoundingBox, RegionContour, RegionCenter> Dependencies;
6277 typedef Select<RegionCenter> Dependencies;
62716278
62726279 static std::string name()
62736280 {
62746281 return std::string("ConvexHull");
6275 // static const std::string n = std::string("ConvexHull");
6276 // return n;
62776282 }
62786283
62796284 template <class T, class BASE>
62806285 struct Impl
62816286 : public BASE
62826287 {
6283 static const unsigned int workInPass = 2;
6284
6285 typedef HandleArgSelector<T, LabelArgTag, BASE> LabelHandle;
6286 typedef TinyVector<double, 2> point_type;
6287 typedef Polygon<point_type> polygon_type;
6288 typedef Impl value_type;
6289 typedef value_type const & result_type;
6290
6291 polygon_type convex_hull_;
6292 point_type input_center_, convex_hull_center_, defect_center_;
6293 double convexity_, rugosity_, mean_defect_displacement_,
6294 defect_area_mean_, defect_area_variance_, defect_area_skewness_, defect_area_kurtosis_;
6295 int convexity_defect_count_;
6296 ArrayVector<MultiArrayIndex> convexity_defect_area_;
6297 bool features_computed_;
6288 static const unsigned int workInPass = 2;
6289 static const unsigned int dimensions = T::dimensions;
6290
6291 typedef ConvexPolytope<dimensions, double> polytope_type;
6292 typedef polytope_type value_type;
6293 typedef value_type const & result_type;
6294 typedef TinyVector<double, dimensions> point_type;
6295 typedef HandleArgSelector<T, CoordArgTag, BASE> coord_handle_type;
6296 typedef typename coord_handle_type::value_type coord_type;
6297
6298 polytope_type convex_hull_;
6299 bool initialized_;
62986300
62996301 Impl()
63006302 : convex_hull_()
6301 , input_center_()
6302 , convex_hull_center_()
6303 , defect_center_()
6304 , convexity_()
6305 , rugosity_()
6306 , mean_defect_displacement_()
6307 , defect_area_mean_()
6308 , defect_area_variance_()
6309 , defect_area_skewness_()
6310 , defect_area_kurtosis_()
6311 , convexity_defect_count_()
6312 , convexity_defect_area_()
6313 , features_computed_(false)
6303 , initialized_(false)
63146304 {}
63156305
63166306 template <class U, class NEXT>
63176307 void update(CoupledHandle<U, NEXT> const & t)
63186308 {
6319 VIGRA_STATIC_ASSERT((feature_ConvexHull_can_only_be_computed_for_2D_arrays<
6320 CoupledHandle<U, NEXT>::dimensions>));
6321 if(!features_computed_)
6309 if (!initialized_)
63226310 {
6323 using namespace functor;
6324 Shape2 start = getDependency<Coord<Minimum> >(*this),
6325 stop = getDependency<Coord<Maximum> >(*this) + Shape2(1);
6326 point_type offset(start);
6327 input_center_ = getDependency<RegionCenter>(*this);
6328 MultiArrayIndex label = LabelHandle::getValue(t);
6329
6330 convex_hull_.clear();
6331 convexHull(getDependency<RegionContour>(*this), convex_hull_);
6332 convex_hull_center_ = centroid(convex_hull_);
6333
6334 convexity_ = getDependency<RegionContour>(*this).area() / convex_hull_.area();
6335 rugosity_ = getDependency<RegionContour>(*this).length() / convex_hull_.length();
6336
6337 MultiArray<2, UInt8> convex_hull_difference(stop-start);
6338 fillPolygon(convex_hull_ - offset, convex_hull_difference, 1);
6339 combineTwoMultiArrays(convex_hull_difference,
6340 LabelHandle::getHandle(t).arrayView().subarray(start, stop),
6341 convex_hull_difference,
6342 ifThenElse(Arg2() == Param(label), Param(0), Arg1()));
6343
6344 MultiArray<2, UInt32> convexity_defects(stop-start);
6345 convexity_defect_count_ =
6346 labelImageWithBackground(convex_hull_difference, convexity_defects, false, 0);
6347
6348 if (convexity_defect_count_ != 0)
6311 initialize();
6312 }
6313 point_type vec(t.point().begin());
6314 convex_hull_.addExtremeVertex(vec);
6315 }
6316
6317 template <class U, class NEXT>
6318 void update(CoupledHandle<U, NEXT> const & t, double)
6319 {
6320 update(t);
6321 }
6322
6323 void initialize()
6324 {
6325 convex_hull_.addVertex(getDependency<RegionCenter>(*this));
6326 for (int dim = 0; dim < dimensions; dim++)
6327 {
6328 coord_type vec;
6329 vec[dim] = .5;
6330 convex_hull_.addVertex(
6331 vec + getDependency<RegionCenter>(*this));
6332 }
6333 initialized_ = true;
6334 }
6335
6336 void operator+=(Impl const &)
6337 {
6338 vigra_precondition(
6339 false,
6340 "ConvexHull::operator+=(): ConvexHull features cannot be merged.");
6341 }
6342
6343 result_type operator()() const
6344 {
6345 return convex_hull_;
6346 }
6347 };
6348 };
6349
6350 /** \brief Compute object features related to the convex hull.
6351
6352 AccumulatorChain must be used with CoupledIterator in order to have access
6353 to pixel coordinates. The convex hull features are only available when
6354 `WITH_LEMON` is set.
6355
6356 Minimal example how to calculate the features:
6357 \code
6358 // "labels" is the array with the region labels
6359 MultiArrayView<2, int> labels = ...;
6360
6361 // Set up the accumulator chain and ignore the zero label
6362 AccumulatorChainArray<
6363 CoupledArrays<2, int>,
6364 Select<LabelArg<1>, ConvexHullFeatures> > chain;
6365 chain.ignoreLabel(0);
6366
6367 // Extract the features
6368 extractFeatures(labels, chain);
6369
6370 // Finalize the calculation for label 1
6371 getAccumulator<ConvexHullFeatures>(chain, 1).finalize();
6372
6373 // Get the features
6374 ... = getAccumulator<ConvexHullFeatures>(chain, 1).inputCenter();
6375 \endcode
6376
6377 */
6378 class ConvexHullFeatures
6379 {
6380 public:
6381 typedef Select<BoundingBox, RegionCenter, Count, ConvexHull> Dependencies;
6382
6383 static std::string name()
6384 {
6385 return std::string("ConvexHullFeatures");
6386 }
6387
6388 /** \brief Result type of the covex hull feature calculation
6389 */
6390 template <class T, class BASE>
6391 struct Impl
6392 : public BASE
6393 {
6394 static const unsigned int workInPass = 3;
6395 static const unsigned int dimensions = T::dimensions;
6396
6397 typedef ConvexPolytope<dimensions, double> polytope_type;
6398 typedef Impl<T, BASE> value_type;
6399 typedef value_type const & result_type;
6400 typedef TinyVector<double, dimensions> point_type;
6401 typedef HandleArgSelector<T, CoordArgTag, BASE> coord_handle_type;
6402 typedef typename coord_handle_type::value_type coord_type;
6403
6404 typedef MultiArray<dimensions, unsigned int> array_type;
6405
6406 array_type label_array_;
6407 point_type hull_center_;
6408 int hull_volume_;
6409 point_type defect_center_;
6410 double defect_displacement_mean_;
6411 double defect_volume_mean_;
6412 double defect_volume_variance_;
6413 double defect_volume_skewness_;
6414 double defect_volume_kurtosis_;
6415 int defect_count_;
6416 bool initialized_;
6417 bool finalized_;
6418 int num_values_;
6419
6420 Impl()
6421 : hull_center_()
6422 , hull_volume_()
6423 , defect_center_()
6424 , defect_volume_mean_()
6425 , defect_volume_variance_()
6426 , defect_volume_skewness_()
6427 , defect_volume_kurtosis_()
6428 , defect_count_()
6429 , initialized_(false)
6430 , finalized_(false)
6431 , num_values_(0)
6432 {}
6433
6434 template <class U, class NEXT>
6435 void update(CoupledHandle<U, NEXT> const & t)
6436 {
6437 vigra_precondition(
6438 finalized_ == false,
6439 "ConvexHullFeatures::update(): "
6440 "Finalize must not be called before update");
6441 if (!initialized_)
6442 {
6443 initialize();
6444 }
6445 const coord_type & coord_min = getDependency<Coord<Minimum> >(*this);
6446 // Update label array
6447 label_array_[coord_handle_type::getValue(t) - coord_min] = 0;
6448 }
6449
6450 template <class U, class NEXT>
6451 void update(CoupledHandle<U, NEXT> const & t, double)
6452 {
6453 update(t);
6454 }
6455
6456 void initialize()
6457 {
6458 // Get hull and bounding box
6459 const polytope_type & hull = getDependency<ConvexHull>(*this);
6460 const coord_type & coord_min = getDependency<Coord<Minimum> >(*this);
6461 coord_type coord_max = getDependency<Coord<Maximum> >(*this);
6462 coord_max += coord_type(1);
6463 // Get offset
6464 point_type offset;
6465 std::copy(coord_min.begin(), coord_min.end(), offset.begin());
6466 // Create the label array
6467 label_array_.reshape(coord_max - coord_min, 0);
6468 hull.fill(label_array_, 1, offset);
6469 // Extract convex hull features
6470 AccumulatorChainArray<
6471 CoupledArrays<dimensions, unsigned int>,
6472 Select<LabelArg<1>, Count, RegionCenter> > hull_acc;
6473 hull_acc.ignoreLabel(0);
6474 extractFeatures(label_array_, hull_acc);
6475 hull_center_ = get<RegionCenter>(hull_acc, 1) + coord_min;
6476 hull_volume_ = get<Count>(hull_acc, 1);
6477 // Set initialized flag
6478 initialized_ = true;
6479 }
6480
6481 /* \brief Finalize the calculation of the convex hull features.
6482
6483 Finalize must be called in order to trigger the calculation of
6484 the convexity defect features.
6485 */
6486 void finalize()
6487 {
6488 if (!finalized_)
6489 {
6490 dofinalize();
6491 finalized_ = true;
6492 }
6493 }
6494
6495 void dofinalize()
6496 {
6497 vigra_precondition(
6498 initialized_,
6499 "ConvexHullFeatures::finalize(): "
6500 "Feature computation was not initialized.");
6501 const coord_type & coord_min = getDependency<Coord<Minimum> >(*this);
6502 // Calculate defect center
6503 AccumulatorChainArray<
6504 CoupledArrays<dimensions, unsigned int>,
6505 Select<LabelArg<1>, RegionCenter, Count> > defect_acc;
6506 extractFeatures(label_array_, defect_acc);
6507 defect_center_ = get<RegionCenter>(defect_acc, 1) + coord_min;
6508 // Calculate defect stats
6509 array_type defects_array(label_array_.shape());
6510 defect_count_ = labelMultiArrayWithBackground(
6511 label_array_,
6512 defects_array);
6513 defect_volume_mean_ = 0.0;
6514 defect_volume_variance_ = 0.0;
6515 defect_volume_skewness_ = 0.0;
6516 defect_volume_kurtosis_ = 0.0;
6517 if (defect_count_ != 0)
6518 {
6519 AccumulatorChainArray<
6520 CoupledArrays<dimensions, unsigned int>,
6521 Select<LabelArg<1>, Count, RegionCenter> > defects_acc;
6522 extractFeatures(defects_array, defects_acc);
6523 ArrayVector<double> defect_volumes;
6524 point_type center = getDependency<RegionCenter>(*this)
6525 -getDependency<Coord<Minimum> >(*this);
6526 for (int k = 1; k <= defect_count_; k++)
63496527 {
6350 AccumulatorChainArray<CoupledArrays<2, UInt32>,
6351 Select<LabelArg<1>, Count, RegionCenter> > convexity_defects_stats;
6352 convexity_defects_stats.ignoreLabel(0);
6353 extractFeatures(convexity_defects, convexity_defects_stats);
6354
6355 double total_defect_area = 0.0;
6356 mean_defect_displacement_ = 0.0;
6357 defect_center_ = point_type();
6358 for (int k = 1; k <= convexity_defect_count_; ++k)
6359 {
6360 double area = get<Count>(convexity_defects_stats, k);
6361 point_type center = get<RegionCenter>(convexity_defects_stats, k) + offset;
6362
6363 convexity_defect_area_.push_back(area);
6364 total_defect_area += area;
6365 defect_center_ += area*center;
6366 mean_defect_displacement_ += area*norm(input_center_ - center);
6367 }
6368 sort(convexity_defect_area_.begin(), convexity_defect_area_.end(),
6369 std::greater<MultiArrayIndex>());
6370 mean_defect_displacement_ /= total_defect_area;
6371 defect_center_ /= total_defect_area;
6372
6373 AccumulatorChain<MultiArrayIndex,
6374 Select<Mean, UnbiasedVariance, UnbiasedSkewness, UnbiasedKurtosis> > defect_area_stats;
6375 extractFeatures(convexity_defect_area_.begin(),
6376 convexity_defect_area_.end(), defect_area_stats);
6377
6378 defect_area_mean_ = convexity_defect_count_ > 0
6379 ? get<Mean>(defect_area_stats)
6380 : 0.0;
6381 defect_area_variance_ = convexity_defect_count_ > 1
6382 ? get<UnbiasedVariance>(defect_area_stats)
6383 : 0.0;
6384 defect_area_skewness_ = convexity_defect_count_ > 2
6385 ? get<UnbiasedSkewness>(defect_area_stats)
6386 : 0.0;
6387 defect_area_kurtosis_ = convexity_defect_count_ > 3
6388 ? get<UnbiasedKurtosis>(defect_area_stats)
6389 : 0.0;
6528 defect_volumes.push_back(get<Count>(defects_acc, k));
6529 defect_displacement_mean_ += get<Count>(defects_acc, k)
6530 * norm(get<RegionCenter>(defects_acc, k) - center);
63906531 }
6391 /**********************************************/
6392 features_computed_ = true;
6532 defect_displacement_mean_ /= get<Count>(defect_acc, 1);
6533 AccumulatorChain<
6534 MultiArrayIndex,
6535 Select< Mean,
6536 UnbiasedVariance,
6537 UnbiasedSkewness,
6538 UnbiasedKurtosis> > volumes_acc;
6539 extractFeatures(
6540 defect_volumes.begin(),
6541 defect_volumes.end(),
6542 volumes_acc);
6543 defect_volume_mean_ = get<Mean>(volumes_acc);
6544 if (defect_count_ > 1)
6545 {
6546 defect_volume_variance_ = get<UnbiasedVariance>(volumes_acc);
6547 }
6548 if (defect_count_ > 2)
6549 {
6550 defect_volume_skewness_ = get<UnbiasedSkewness>(volumes_acc);
6551 }
6552 if (defect_count_ > 3)
6553 {
6554 defect_volume_kurtosis_ = get<UnbiasedKurtosis>(volumes_acc);
6555 }
63936556 }
63946557 }
63956558
6396 template <class U, class NEXT>
6397 void update(CoupledHandle<U, NEXT> const & t, double weight)
6398 {
6399 update(t);
6400 }
6401
6402 void operator+=(Impl const & o)
6403 {
6404 vigra_precondition(false,
6405 "ConvexHull::operator+=(): ConvexHull features cannot be merged.");
6559 void operator+=(Impl const &)
6560 {
6561 vigra_precondition(
6562 false,
6563 "ConvexHullFeatures::operator+=(): features cannot be merged.");
64066564 }
64076565
64086566 result_type operator()() const
64096567 {
6568 vigra_precondition(
6569 finalized_,
6570 "ConvexHullFeatures::operator(): "
6571 "Finalize must be called before operator()");
64106572 return *this;
64116573 }
64126574
6413 /*
6414 * Returns the convex hull polygon.
6415 */
6416 polygon_type const & hull() const
6417 {
6418 return convex_hull_;
6419 }
6420
6421 /*
6422 * Returns the area enclosed by the input polygon.
6423 */
6424 double inputArea() const
6425 {
6426 vigra_precondition(features_computed_,
6427 "ConvexHull: features must be calculated first.");
6428 return getDependency<RegionContour>(*this).area();
6429 }
6430
6431 /*
6432 * Returns the area enclosed by the convex hull polygon.
6433 */
6434 double hullArea() const
6435 {
6436 vigra_precondition(features_computed_,
6437 "ConvexHull: features must be calculated first.");
6438 return convex_hull_.area();
6439 }
6440
6441 /*
6442 * Returns the perimeter of the input polygon.
6443 */
6444 double inputPerimeter() const
6445 {
6446 vigra_precondition(features_computed_,
6447 "ConvexHull: features must be calculated first.");
6448 return getDependency<RegionContour>(*this).length();
6449 }
6450
6451 /*
6452 * Returns the perimeter of the convex hull polygon.
6453 */
6454 double hullPerimeter() const
6455 {
6456 vigra_precondition(features_computed_,
6457 "ConvexHull: features must be calculated first.");
6458 return convex_hull_.length();
6459 }
6460
6461 /*
6462 * Center of the original region.
6463 */
6464 point_type const & inputCenter() const
6465 {
6466 return input_center_;
6467 }
6468
6469 /*
6470 * Center of the region enclosed by the convex hull.
6471 */
6472 point_type const & hullCenter() const
6473 {
6474 return convex_hull_center_;
6475 }
6476
6477 /*
6478 * Center of difference between the convex hull and the original region.
6479 */
6480 point_type const & convexityDefectCenter() const
6481 {
6575 /** \brief Center of the input region.
6576 */
6577 const point_type & inputCenter() const {
6578 return getDependency<RegionCenter>(*this);
6579 }
6580
6581 /** \brief Center of the convex hull of the input region.
6582 */
6583 const point_type & hullCenter() const {
6584 return hull_center_;
6585 }
6586
6587 /** \brief Volume of the input region.
6588 */
6589 int inputVolume() const {
6590 return getDependency<Count>(*this);
6591 }
6592
6593 /** \brief Volume of the convex hull of the input region.
6594 */
6595 int hullVolume() const {
6596 return hull_volume_;
6597 }
6598
6599 /** \brief Weighted center of mass of the convexity defects.
6600 */
6601 const point_type & defectCenter() const {
64826602 return defect_center_;
64836603 }
64846604
6485 /*
6486 * Returns the ratio between the input area and the convex hull area.
6487 * This is always <tt><= 1</tt>, and the smaller the value is,
6488 * the less convex is the input polygon.
6489 */
6490 double convexity() const
6491 {
6492 vigra_precondition(features_computed_,
6493 "ConvexHull: features must be calculated first.");
6494 return convexity_;
6495 }
6496
6497 /*
6498 * Returns the ratio between the input perimeter and the convex perimeter.
6499 * This is always <tt>>= 1</tt>, and the higher the value is, the less
6500 * convex is the input polygon.
6501 */
6502 double rugosity() const
6503 {
6504 vigra_precondition(features_computed_,
6505 "ConvexHull: features must be calculated first.");
6506 return rugosity_;
6507 }
6508
6509 /*
6510 * Returns the number of convexity defects (i.e. number of connected components
6511 * of the difference between convex hull and input region).
6512 */
6513 int convexityDefectCount() const
6514 {
6515 vigra_precondition(features_computed_,
6516 "ConvexHull: features must be calculated first.");
6517 return convexity_defect_count_;
6518 }
6519
6520 /*
6521 * Returns the mean area of the convexity defects.
6522 */
6523 double convexityDefectAreaMean() const
6524 {
6525 vigra_precondition(features_computed_,
6526 "ConvexHull: features must be calculated first.");
6527 return defect_area_mean_;
6528 }
6529
6530 /*
6531 * Returns the variance of the convexity defect areas.
6532 */
6533 double convexityDefectAreaVariance() const
6534 {
6535 vigra_precondition(features_computed_,
6536 "ConvexHull: features must be calculated first.");
6537 return defect_area_variance_;
6538 }
6539
6540 /*
6541 * Returns the skewness of the convexity defect areas.
6542 */
6543 double convexityDefectAreaSkewness() const
6544 {
6545 vigra_precondition(features_computed_,
6546 "ConvexHull: features must be calculated first.");
6547 return defect_area_skewness_;
6548 }
6549
6550 /*
6551 * Returns the kurtosis of the convexity defect areas.
6552 */
6553 double convexityDefectAreaKurtosis() const
6554 {
6555 vigra_precondition(features_computed_,
6556 "ConvexHull: features must be calculated first.");
6557 return defect_area_kurtosis_;
6558 }
6559
6560 /*
6561 * Returns the mean distance between the defect areas and the center of
6562 * the input region, weighted by the area of each defect region.
6563 */
6564 double meanDefectDisplacement() const
6565 {
6566 vigra_precondition(features_computed_,
6567 "ConvexHull: features must be calculated first.");
6568 return mean_defect_displacement_;
6569 }
6570
6571 /*
6572 * Returns the areas of the convexity defect regions (ordered descending).
6573 */
6574 ArrayVector<MultiArrayIndex> const & defectAreaList() const
6575 {
6576 vigra_precondition(features_computed_,
6577 "ConvexHull: features must be calculated first.");
6578 return convexity_defect_area_;
6605 /** \brief Average volume of the convexity defects.
6606 */
6607 double defectVolumeMean() const {
6608 return defect_volume_mean_;
6609 }
6610
6611 /** \brief Variance of the volumes of the convexity defects.
6612 */
6613 double defectVolumeVariance() const {
6614 return defect_volume_variance_;
6615 }
6616
6617 /** \brief Skewness of the volumes of the convexity defects.
6618 */
6619 double defectVolumeSkewness() const {
6620 return defect_volume_skewness_;
6621 }
6622
6623 /** \brief Kurtosis of the volumes of the convexity defects.
6624 */
6625 double defectVolumeKurtosis() const {
6626 return defect_volume_kurtosis_;
6627 }
6628
6629 /** \brief Number of convexity defects.
6630 */
6631 int defectCount() const {
6632 return defect_count_;
6633 }
6634
6635 /** \brief Average displacement of the convexity defects from the input
6636 region center weighted by their size.
6637 */
6638 double defectDisplacementMean() const {
6639 return defect_displacement_mean_;
6640 }
6641
6642 /** \brief Convexity of the input region
6643
6644 The convexity is the ratio of the input volume to the convex hull
6645 volume: \f[c = \frac{V_\mathrm{input}}{V_\mathrm{hull}}\f]
6646 */
6647 double convexity() const {
6648 return static_cast<double>(inputVolume()) / hullVolume();
65796649 }
65806650 };
65816651 };
6582
6652 #endif // WITH_LEMON
65836653
65846654 }} // namespace vigra::acc
65856655
7272 typedef typename G::index_type index_type;
7373
7474 public:
75 ItemIter(const lemon::Invalid & iv = lemon::INVALID)
75 ItemIter(const lemon::Invalid & /*iv*/ = lemon::INVALID)
7676 : graph_(NULL),
7777 id_(-1),
7878 item_(lemon::INVALID)
142142 typedef typename Graph::Arc Arc;
143143 typedef typename Graph::Edge Edge;
144144 typedef typename Graph::EdgeIt EdgeIt;
145 ArcIt(const lemon::Invalid invalid = lemon::INVALID )
145 ArcIt(const lemon::Invalid /*invalid*/ = lemon::INVALID )
146146 : graph_(NULL),
147147 pos_(),
148148 inFirstHalf_(false),
573573 }
574574
575575 template<class ITER>
576 void deserialize(ITER begin, ITER end){
576 void deserialize(ITER begin, ITER){
577577
578578
579579 nodeNum_ = *begin; ++begin;
687687
688688 inline AdjacencyListGraph::Node
689689 AdjacencyListGraph::addNode(const AdjacencyListGraph::index_type id){
690 if(id == nodes_.size()){
690 if((std::size_t)id == nodes_.size()){
691691 nodes_.push_back(NodeStorage(id));
692692 ++nodeNum_;
693693 return Node(id);
506506 double & correlation_coefficent,
507507 Diff2D border = Diff2D(0,0))
508508 {
509 ignore_argument(d_lr);
509510 typename SrcIterator::difference_type s_shape = s_lr - s_ul;
510511
511512 //determine matrix by using 5 quater-matches and a maximum likelihood decision:
2828 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
2929 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
3030 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
3232 /* */
3333 /************************************************************************/
34
34
3535 #ifndef VIGRA_AFFINEGEOMETRY_HXX
3636 #define VIGRA_AFFINEGEOMETRY_HXX
3737
4545
4646 namespace vigra {
4747
48 /** \addtogroup GeometricTransformations Geometric Transformations
48 /** \addtogroup GeometricTransformations
4949 */
5050 //@{
5151
5656 /********************************************************/
5757
5858 /** \brief Create homogeneous matrix representing a 2D translation.
59
59
6060 For use with \ref affineWarpImage().
6161 */
6262 inline
6969 }
7070
7171 /** \brief Create homogeneous matrix representing a 2D uniform scaling about the coordinate origin.
72
72
7373 For use with \ref affineWarpImage().
7474 */
7575 inline
8282 }
8383
8484 /** \brief Create homogeneous matrix representing a 2D non-uniform scaling about the coordinate origin.
85
85
8686 For use with \ref affineWarpImage().
8787 */
8888 inline
9595 }
9696
9797 /** \brief Create homogeneous matrix representing a 2D shearing.
98
98
9999 For use with \ref affineWarpImage().
100100 */
101101 inline
108108 }
109109
110110 /** \brief Create homogeneous matrix representing a 2D rotation about the coordinate origin.
111
111
112112 For use with \ref affineWarpImage(). Angle must be in radians.
113113 */
114114 inline
125125 }
126126
127127 /** \brief Create homogeneous matrix representing a 2D rotation about the coordinate origin.
128
128
129129 For use with \ref affineWarpImage(). Angle must be in degrees.
130130 */
131131 inline
135135 }
136136
137137 /** \brief Create homogeneous matrix representing a 2D rotation about the given point.
138
138
139139 For use with \ref affineWarpImage(). Angle must be in radians.
140140 */
141141 inline
145145 }
146146
147147 /** \brief Create homogeneous matrix representing a 2D rotation about the given point.
148
148
149149 For use with \ref affineWarpImage(). Angle must be in degrees.
150150 */
151151 inline
161161 /********************************************************/
162162
163163 // documentation is in basicgeometry.hxx
164 template <int ORDER, class T,
164 template <int ORDER, class T,
165165 class DestIterator, class DestAccessor>
166166 void rotateImage(SplineImageView<ORDER, T> const & src,
167 DestIterator id, DestAccessor dest,
167 DestIterator id, DestAccessor dest,
168168 double angleInDegree, TinyVector<double, 2> const & center)
169169 {
170170 int w = src.width();
171171 int h = src.height();
172
172
173173 double angle = angleInDegree/180.0;
174174 double c = cos_pi(angle); // avoid round-off errors for simple rotations
175175 double s = sin_pi(angle);
176
176
177177 for(int y = 0; y < h; ++y, ++id.y)
178178 {
179179 typename DestIterator::row_iterator rd = id.rowIterator();
187187 }
188188 }
189189
190 template <int ORDER, class T,
190 template <int ORDER, class T,
191191 class DestIterator, class DestAccessor>
192 inline void
192 inline void
193193 rotateImage(SplineImageView<ORDER, T> const & src,
194 pair<DestIterator, DestAccessor> dest,
194 pair<DestIterator, DestAccessor> dest,
195195 double angleInDegree, TinyVector<double, 2> const & center)
196196 {
197197 rotateImage(src, dest.first, dest.second, angleInDegree, center);
198198 }
199199
200 template <int ORDER, class T,
200 template <int ORDER, class T,
201201 class DestIterator, class DestAccessor>
202 inline void
202 inline void
203203 rotateImage(SplineImageView<ORDER, T> const & src,
204 DestIterator id, DestAccessor dest,
204 DestIterator id, DestAccessor dest,
205205 double angleInDegree)
206206 {
207207 TinyVector<double, 2> center((src.width()-1.0) / 2.0, (src.height()-1.0) / 2.0);
208208 rotateImage(src, id, dest, angleInDegree, center);
209209 }
210210
211 template <int ORDER, class T,
211 template <int ORDER, class T,
212212 class DestIterator, class DestAccessor>
213 inline void
213 inline void
214214 rotateImage(SplineImageView<ORDER, T> const & src,
215 pair<DestIterator, DestAccessor> dest,
215 pair<DestIterator, DestAccessor> dest,
216216 double angleInDegree)
217217 {
218218 TinyVector<double, 2> center((src.width()-1.0) / 2.0, (src.height()-1.0) / 2.0);
219219 rotateImage(src, dest.first, dest.second, angleInDegree, center);
220220 }
221221
222 template <int ORDER, class T,
222 template <int ORDER, class T,
223223 class T2, class S2>
224 inline void
224 inline void
225225 rotateImage(SplineImageView<ORDER, T> const & src,
226 MultiArrayView<2, T2, S2> dest,
226 MultiArrayView<2, T2, S2> dest,
227227 double angleInDegree, TinyVector<double, 2> const & center)
228228 {
229229 rotateImage(src, destImage(dest), angleInDegree, center);
230230 }
231231
232 template <int ORDER, class T,
232 template <int ORDER, class T,
233233 class T2, class S2>
234 inline void
234 inline void
235235 rotateImage(SplineImageView<ORDER, T> const & src,
236 MultiArrayView<2, T2, S2> dest,
236 MultiArrayView<2, T2, S2> dest,
237237 double angleInDegree)
238238 {
239239 TinyVector<double, 2> center((src.width()-1.0) / 2.0, (src.height()-1.0) / 2.0);
249249 /** \brief Warp an image according to an affine transformation.
250250
251251 <b> Declarations:</b>
252
252
253253 pass 2D array views:
254254 \code
255255 namespace vigra {
256 template <int ORDER, class T,
256 template <int ORDER, class T,
257257 class T2, class S2,
258258 class C>
259259 void
260260 affineWarpImage(SplineImageView<ORDER, T> const & src,
261 MultiArrayView<2, T2, S2> dest,
261 MultiArrayView<2, T2, S2> dest,
262262 MultiArrayView<2, double, C> const & affineMatrix);
263263 }
264264 \endcode
265
265
266266 \deprecatedAPI{affineWarpImage}
267267 pass \ref ImageIterators and \ref DataAccessors :
268268 \code
269269 namespace vigra {
270 template <int ORDER, class T,
270 template <int ORDER, class T,
271271 class DestIterator, class DestAccessor,
272272 class C>
273273 void affineWarpImage(SplineImageView<ORDER, T> const & src,
274 DestIterator dul, DestIterator dlr, DestAccessor dest,
274 DestIterator dul, DestIterator dlr, DestAccessor dest,
275275 MultiArrayView<2, double, C> const & affineMatrix);
276276 }
277277 \endcode
278278 use argument objects in conjunction with \ref ArgumentObjectFactories :
279279 \code
280280 namespace vigra {
281 template <int ORDER, class T,
281 template <int ORDER, class T,
282282 class DestIterator, class DestAccessor,
283283 class C>
284284 void affineWarpImage(SplineImageView<ORDER, T> const & src,
285 triple<DestIterator, DestIterator, DestAccessor> dest,
285 triple<DestIterator, DestIterator, DestAccessor> dest,
286286 MultiArrayView<2, double, C> const & affineMatrix);
287287 }
288288 \endcode
289289 \deprecatedEnd
290
290
291291 The algorithm applies the given \a affineMatrix to the <i>destination coordinates</i> and copies
292 the image value from the resulting source coordinates, using the given SplineImageView \a src for interpolation.
292 the image value from the resulting source coordinates, using the given SplineImageView \a src for interpolation.
293293 If the resulting coordinate is outside the source image, nothing will be written at that destination point.
294
294
295295 \code
296296 for all dest pixels:
297297 currentSrcCoordinate = affineMatrix * currentDestCoordinate;
298298 if src.isInside(currentSrcCoordinate):
299299 dest[currentDestCoordinate] = src[currentSrcCoordinate]; // copy an interpolated value
300300 \endcode
301
301
302302 The matrix represents a 2-dimensional affine transform by means of homogeneous coordinates,
303303 i.e. it must be a 3x3 matrix whose last row is (0,0,1).
304
304
305305 <b> Usage:</b>
306
306
307307 <b>\#include</b> \<vigra/affinegeometry.hxx\><br>
308308 Namespace: vigra
309309
310310 \code
311311 MultiArray<2, float> src(width, height);
312312 SplineImageView<3, float> spline(src);
313
313
314314 MultiArray<2, float> dest1(src.shape());
315
316 // equivalent (up to round-off errors) to
315
316 // equivalent (up to round-off errors) to
317317 // rotateImage(spline, dest1, 45.0);
318318 TinyVector<double, 2> center((width-1.0)/2.0, (height-1.0)/2.0);
319319 affineWarpImage(spline, dest1, rotationMatrix2DDegrees(45.0, center));
320
320
321321 MultiArray<2, float> dest2(2*width-1, 2*height-1);
322
323 // equivalent (up to round-off errors) to
322
323 // equivalent (up to round-off errors) to
324324 // resizeImageSplineInterpolation(img, dest2);
325325 // note that scaleFactor = 0.5, because we must pass the transformation from destination to source
326326 affineWarpImage(spline, dest2, scalingMatrix2D(0.5));
330330 \code
331331 FImage src(width, height);
332332 SplineImageView<3, Image::value_type> spline(srcImageRange(src));
333
333
334334 FImage dest1(width, height);
335
336 // equivalent (up to round-off errors) with
335
336 // equivalent (up to round-off errors) with
337337 // rotateImage(spline, destImage(dest1), 45.0);
338338 TinyVector<double, 2> center((width-1.0)/2.0, (height-1.0)/2.0);
339339 affineWarpImage(spline, destImageRange(dest1), rotationMatrix2DDegrees(45.0, center));
340
340
341341 FImage dest2(2*width-1, 2*height-1);
342
343 // equivalent (up to round-off errors) with
342
343 // equivalent (up to round-off errors) with
344344 // resizeImageSplineInterpolation(srcImageRange(img), destImageRange(dest2));
345345 // note that scaleFactor = 0.5, because we must pass the transformation from destination to source
346346 affineWarpImage(spline, destImageRange(dest2), scalingMatrix2D(0.5));
348348 <b> Required Interface:</b>
349349 \code
350350 DestImageIterator dest_upperleft;
351
351
352352 double x = ..., y = ...;
353
353
354354 if (spline.isInside(x,y))
355355 dest_accessor.set(spline(x, y), dest_upperleft);
356356 \endcode
357357 \deprecatedEnd
358
359 <b>See also:</b> Functions to specify affine transformation: \ref translationMatrix2D(), \ref scalingMatrix2D(),
358
359 <b>See also:</b> Functions to specify affine transformation: \ref translationMatrix2D(), \ref scalingMatrix2D(),
360360 \ref shearMatrix2D(), \ref rotationMatrix2DRadians(), \ref rotationMatrix2DDegrees()
361361 */
362362 doxygen_overloaded_function(template <...> void affineWarpImage)
363363
364 template <int ORDER, class T,
364 template <int ORDER, class T,
365365 class DestIterator, class DestAccessor,
366366 class C>
367367 void affineWarpImage(SplineImageView<ORDER, T> const & src,
368 DestIterator dul, DestIterator dlr, DestAccessor dest,
368 DestIterator dul, DestIterator dlr, DestAccessor dest,
369369 MultiArrayView<2, double, C> const & affineMatrix)
370370 {
371 vigra_precondition(rowCount(affineMatrix) == 3 && columnCount(affineMatrix) == 3 &&
371 vigra_precondition(rowCount(affineMatrix) == 3 && columnCount(affineMatrix) == 3 &&
372372 affineMatrix(2,0) == 0.0 && affineMatrix(2,1) == 0.0 && affineMatrix(2,2) == 1.0,
373373 "affineWarpImage(): matrix doesn't represent an affine transformation with homogeneous 2D coordinates.");
374
375
374
375
376376 double w = dlr.x - dul.x;
377377 double h = dlr.y - dul.y;
378
378
379379 for(double y = 0.0; y < h; ++y, ++dul.y)
380380 {
381381 typename DestIterator::row_iterator rd = dul.rowIterator();
389389 }
390390 }
391391
392 template <int ORDER, class T,
392 template <int ORDER, class T,
393393 class DestIterator, class DestAccessor,
394394 class C>
395395 inline void
396396 affineWarpImage(SplineImageView<ORDER, T> const & src,
397 triple<DestIterator, DestIterator, DestAccessor> dest,
397 triple<DestIterator, DestIterator, DestAccessor> dest,
398398 MultiArrayView<2, double, C> const & affineMatrix)
399399 {
400400 affineWarpImage(src, dest.first, dest.second, dest.third, affineMatrix);
401401 }
402402
403 template <int ORDER, class T,
403 template <int ORDER, class T,
404404 class T2, class S2,
405405 class C>
406406 inline void
407407 affineWarpImage(SplineImageView<ORDER, T> const & src,
408 MultiArrayView<2, T2, S2> dest,
408 MultiArrayView<2, T2, S2> dest,
409409 MultiArrayView<2, double, C> const & affineMatrix)
410410 {
411411 affineWarpImage(src, destImageRange(dest), affineMatrix);
498498 static bool isLittleEndian()
499499 {
500500 static const UIntBiggest testint = 0x01;
501 return ((UInt8 *)&testint)[0] == 0x01;
501 return reinterpret_cast<const UInt8 *>(&testint)[0] == 0x01;
502502 }
503503
504504 template <class INT>
734734 if(isLittleEndian() && size > 3)
735735 {
736736 // take care of alignment
737 for(; (std::size_t)i % 4 != 0; ++i)
737 for(; reinterpret_cast<std::size_t>(i) % 4 != 0; ++i)
738738 {
739739 crc = (crc >> 8) ^ table0[(crc ^ *i) & 0xFF];
740740 }
741741 for(; i < end-3; i+=4)
742742 {
743 crc ^= *((UInt32 *)i);
743 crc ^= *(reinterpret_cast<const UInt32 *>(i));
744744 crc = table3[crc & 0xFF] ^
745745 table2[(crc >> 8) & 0xFF] ^
746746 table1[(crc >> 16) & 0xFF] ^
214214 assert(a.cast<double>() == 20.0);
215215
216216 // delete the stored value
217 a.release();
217 a.destroy();
218218 assert(a.empty());
219219 assert(a == false);
220220
274274
275275 /** Delete the contained object (make this 'Any' object empty).
276276 */
277 void release()
278 {
279 handle_.release();
277 void destroy()
278 {
279 handle_.reset((detail::AnyHandle*)0);
280280 }
281281
282282 /** Exchange the value of this object with other's.
701701 template <class T, class Alloc>
702702 inline void ArrayVector<T, Alloc>::push_back( value_type const & t )
703703 {
704 size_type old_capacity = this->capacity_;
705 pointer old_data = reserveImpl(false);
704 size_type old_capacity = this->capacity_;
705 pointer old_data = reserveImpl(false);
706706 alloc_.construct(this->data_ + this->size_, t);
707707 // deallocate old data _after_ construction of new element, so that
708708 // 't' can refer to the old data as in 'push_back(front())'
955955 template <class T>
956956 ostream & operator<<(ostream & s, vigra::ArrayVectorView<T> const & a)
957957 {
958 for(std::size_t k=0; k<a.size()-1; ++k)
958 for(std::ptrdiff_t k=0; k<(std::ptrdiff_t)a.size()-1; ++k)
959959 s << a[k] << ", ";
960960 if(a.size())
961961 s << a.back();
356356
357357 AxisTags(std::string const & tags)
358358 {
359 for(int k=0; k<tags.size(); ++k)
359 for(std::string::size_type k=0; k<tags.size(); ++k)
360360 {
361361 switch(tags[k])
362362 {
2828 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
2929 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
3030 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
3232 /* */
3333 /************************************************************************/
34
34
3535 #ifndef VIGRA_BASICGEOMETRY_HXX
3636 #define VIGRA_BASICGEOMETRY_HXX
3737
4444
4545 namespace vigra {
4646
47 /** \addtogroup GeometricTransformations Geometric Transformations
47 /** \addtogroup GeometricTransformations
4848 */
4949 //@{
5050
5757 /** \brief Rotate an image by a multiple of 90 degrees or by an arbitrary angle.
5858
5959 If you specify the angle as an integer which is a multiple of 90 degrees, rotateImage()
60 just copies the pixels in the appropriate new order. It expects the destination image to
60 just copies the pixels in the appropriate new order. It expects the destination image to
6161 have the correct shape for the desired rotation. That is, when the rotation is a multiple
6262 of 180 degrees, source and destination must have the same shape, otherwise destination
6363 must have the transposed shape of the source.
64
64
6565 If you want to rotate by an arbitrary angle and around an arbitrary center point,
6666 you must specify the source image as a \ref vigra::SplineImageView, which is used for
67 interpolation at the required subpixel positions. If no center point is provided, the image
67 interpolation at the required subpixel positions. If no center point is provided, the image
6868 center is used by default. The destination image must have the same size
69 as the source SplineImageView.
70
69 as the source SplineImageView.
70
7171 Positive angles refer to counter-clockwise rotation, negative ones to clockwise rotation.
7272 All angles must be given in degrees.
73
73
7474 <b> Declarations:</b>
75
75
7676 pass 2D array views:
7777 \code
7878 namespace vigra {
7979 // rotate by a multiple of 90 degrees
8080 template <class T1, class S1,
8181 class T2, class S2>
82 void
82 void
8383 rotateImage(MultiArrayView<2, T1, S1> const & src,
8484 MultiArrayView<2, T2, S2> dest,
8585 int rotation);
86
86
8787 // rotate by an arbitrary angle around the given center point
88 template <int ORDER, class T,
88 template <int ORDER, class T,
8989 class T2, class S2>
90 void
90 void
9191 rotateImage(SplineImageView<ORDER, T> const & src,
92 MultiArrayView<2, T2, S2> dest,
93 double angleInDegree,
92 MultiArrayView<2, T2, S2> dest,
93 double angleInDegree,
9494 TinyVector<double, 2> const & center = (src.shape() - Shape2(1)) / 2.0);
9595 }
9696 \endcode
97
97
9898 \deprecatedAPI{rotateImage}
9999 pass \ref ImageIterators and \ref DataAccessors :
100100 \code
102102 // rotate by a multiple of 90 degrees
103103 template <class SrcIterator, class SrcAccessor,
104104 class DestIterator, class DestAccessor>
105 void
105 void
106106 rotateImage(SrcIterator is, SrcIterator end, SrcAccessor as,
107107 DestIterator id, DestAccessor ad, int rotation);
108108
109109 // rotate by an arbitrary angle around the given center point
110 template <int ORDER, class T,
110 template <int ORDER, class T,
111111 class DestIterator, class DestAccessor>
112112 void rotateImage(SplineImageView<ORDER, T> const & src,
113 DestIterator id, DestAccessor dest,
113 DestIterator id, DestAccessor dest,
114114 double angleInDegree, TinyVector<double, 2> const & center = (src.shape() - Shape2(1)) / 2.0);
115115 }
116116 \endcode
120120 // rotate by a multiple of 90 degrees
121121 template <class SrcImageIterator, class SrcAccessor,
122122 class DestImageIterator, class DestAccessor>
123 void
123 void
124124 rotateImage(triple<SrcImageIterator, SrcImageIterator, SrcAccessor> src,
125125 pair<DestImageIterator, DestAccessor> dest, int rotation);
126
126
127127 // rotate by an arbitrary angle around the given center point
128 template <int ORDER, class T,
128 template <int ORDER, class T,
129129 class DestIterator, class DestAccessor>
130 void
130 void
131131 rotateImage(SplineImageView<ORDER, T> const & src,
132 pair<DestImageIterator, DestAccessor> dest,
132 pair<DestImageIterator, DestAccessor> dest,
133133 double angleInDegree, TinyVector<double, 2> const & center = (src.shape() - Shape2(1)) / 2.0);
134134 }
135135 \endcode
136136 \deprecatedEnd
137
137
138138 <b> Usage:</b>
139
139
140140 <b>\#include</b> \<vigra/basicgeometry.hxx\><br>
141141 Namespace: vigra
142
142
143143 \code
144144 // rotate counter-clockwise by 90 degrees (no interpolation required)
145145 MultiArray<2, float> src(width, height),
146146 dest(height, width); // note that width and height are exchanged
147147 ... // fill src
148148 rotateImage(src, dest, 90);
149
149
150150 // rotate clockwise by 38.5 degrees, using a SplieImageView for cubic interpolation
151151 SplineImageView<3, float> spline(srcImageRange(src));
152152 MultiArray<2, float> dest2(src.shape());
153
153
154154 vigra::rotateImage(spline, dest2, -38.5);
155155 \endcode
156156
160160 BImage src(width, height),
161161 dest(height, width); // note that width and height are exchanged
162162 ... // fill src
163
163
164164 rotateImage(srcImageRange(src), destImage(dest), 90);
165
165
166166 // rotate clockwise by 38.5 degrees, using a SplieImageView for cubic interpolation
167167 SplineImageView<3, float> spline(srcImageRange(src));
168168 FImage dest2(width, height);
169
169
170170 rotateImage(spline, destImage(dest), -38.5);
171171 \endcode
172172 <b> Required Interface:</b>
173173 \code
174174 SrcImageIterator src_upperleft, src_lowerright;
175175 DestImageIterator dest_upperleft;
176
176
177177 SrcAccessor src_accessor;
178
178
179179 dest_accessor.set(src_accessor(src_upperleft), dest_upperleft);
180180 \endcode
181181 \deprecatedEnd
182
182
183183 <b> Preconditions:</b>
184184 \code
185 src.shape(0) > 1 && src.shape(1) > 1
185 src.shape(0) > 1 && src.shape(1) > 1
186186 \endcode
187187 */
188188 doxygen_overloaded_function(template <...> void rotateImage)
189189
190 template <class SrcIterator, class SrcAccessor,
190 template <class SrcIterator, class SrcAccessor,
191191 class DestIterator, class DestAccessor>
192192 void rotateImage(SrcIterator is, SrcIterator end, SrcAccessor as,
193193 DestIterator id, DestAccessor ad, int rotation)
196196 int ws = end.x - is.x;
197197 int hs = end.y - is.y;
198198
199 vigra_precondition(rotation % 90 == 0,
199 vigra_precondition(rotation % 90 == 0,
200200 "rotateImage(): "
201201 "This function rotates images only about multiples of 90 degree");
202202
203 rotation = rotation%360;
203 rotation = rotation%360;
204204 if (rotation < 0)
205205 rotation += 360;
206
206
207207 switch(rotation)
208208 {
209209 case 0:
210210 copyImage(is, end, as, id, ad);
211211 break;
212 case 90:
212 case 90:
213213 is.x += (ws-1);
214214 for(x=0; x != ws; x++, is.x--, id.y++)
215215 {
219219 {
220220 ad.set(as(cs), rd);
221221 }
222
222
223223 }
224224 break;
225225
234234 {
235235 ad.set(as(cs), cd);
236236 }
237
237
238238 }
239239 break;
240240
241 case 270:
241 case 270:
242242 is.y += (hs-1);
243243 for(x=0; x != ws; x++, is.x++, id.y++)
244244 {
248248 {
249249 ad.set(as(cs), rd);
250250 }
251
251
252252 }
253253 break;
254 default: //not needful, because of the exception handig in if-statement
255 vigra_fail("internal error");
254 default: //not needful, because of the exception handig in if-statement
255 vigra_fail("internal error");
256256 }
257257 }
258258
259259 template <class SrcImageIterator, class SrcAccessor,
260260 class DestImageIterator, class DestAccessor>
261 inline void
261 inline void
262262 rotateImage(triple<SrcImageIterator, SrcImageIterator, SrcAccessor> src,
263263 pair<DestImageIterator, DestAccessor> dest, int rotation)
264264 {
267267
268268 template <class T1, class S1,
269269 class T2, class S2>
270 inline void
270 inline void
271271 rotateImage(MultiArrayView<2, T1, S1> const & src,
272272 MultiArrayView<2, T2, S2> dest,
273273 int rotation)
289289
290290 enum Reflect {horizontal = 1, vertical = 2};
291291
292 inline
292 inline
293293 Reflect operator|(Reflect l, Reflect r)
294294 {
295295 return Reflect((unsigned int)l | (unsigned int)r);
300300 The reflection direction refers to the reflection axis, i.e.
301301 horizontal reflection turns the image upside down, vertical reflection
302302 changes left for right. The directions are selected by the enum values
303 <tt>vigra::horizontal</tt> and <tt>vigra::vertical</tt>. The two directions
304 can also be "or"ed together to perform both reflections simultaneously
305 (see example below) -- this is the same as a 180 degree rotation.
306
303 <tt>vigra::horizontal</tt> and <tt>vigra::vertical</tt>. The two directions
304 can also be "or"ed together to perform both reflections simultaneously
305 (see example below) -- this is the same as a 180 degree rotation.
306
307307 <b> Declarations:</b>
308
308
309309 pass 2D array views:
310310 \code
311311 namespace vigra {
312312 template <class T1, class S1,
313313 class T2, class S2>
314 void
314 void
315315 reflectImage(MultiArrayView<2, T1, S1> const & src,
316316 MultiArrayView<2, T2, S2> dest, Reflect reflect);
317317 }
318318 \endcode
319
319
320320 \deprecatedAPI{reflectImage}
321321 pass \ref ImageIterators and \ref DataAccessors :
322322 \code
323323 namespace vigra {
324324 template <class SrcIterator, class SrcAccessor,
325325 class DestIterator, class DestAccessor>
326 void
326 void
327327 reflectImage(SrcIterator is, SrcIterator end, SrcAccessor as,
328328 DestIterator id, DestAccessor ad, Reflect axis);
329329 }
333333 namespace vigra {
334334 template <class SrcImageIterator, class SrcAccessor,
335335 class DestImageIterator, class DestAccessor>
336 void
336 void
337337 reflectImage(triple<SrcImageIterator, SrcImageIterator, SrcAccessor> src,
338338 pair<DestImageIterator, DestAccessor> dest, Reflect axis);
339339 }
340340 \endcode
341341 \deprecatedEnd
342
342
343343 <b> Usage:</b>
344
344
345345 <b>\#include</b> \<vigra/basicgeometry.hxx\><br>
346346 Namespace: vigra
347
347
348348 \code
349349 MultiArray<2, float> src(width, height),
350350 dest(width, height);
351351 ... // fill src
352352 // reflect about both dimensions
353353 vigra::reflectImage(src, dest, vigra::horizontal | vigra::vertical);
354
354
355355 \endcode
356356
357357 \deprecatedUsage{reflectImage}
359359 BImage src(width, height),
360360 dest(width, height);
361361 ... // fill src
362
362
363363 vigra::reflectImage(srcImageRange(src), destImage(dest), vigra::horizontal | vigra::vertical);
364364 \endcode
365365 <b> Required Interface:</b>
366366 \code
367367 SrcImageIterator src_upperleft, src_lowerright;
368368 DestImageIterator dest_upperleft;
369
369
370370 SrcAccessor src_accessor;
371
371
372372 dest_accessor.set(src_accessor(src_upperleft), dest_upperleft);
373373 \endcode
374374 \deprecatedEnd
375
375
376376 <b> Preconditions:</b>
377377 \code
378 src.shape(0) > 1 && src.shape(1) > 1
378 src.shape(0) > 1 && src.shape(1) > 1
379379 \endcode
380380 */
381381 doxygen_overloaded_function(template <...> void reflectImage)
382382
383 template <class SrcIterator, class SrcAccessor,
383 template <class SrcIterator, class SrcAccessor,
384384 class DestIterator, class DestAccessor>
385385 void reflectImage(SrcIterator is, SrcIterator end, SrcAccessor as,
386386 DestIterator id, DestAccessor ad, Reflect reflect)
387387 {
388
388
389389 int ws = end.x - is.x;
390390 int hs = end.y - is.y;
391391
394394 if(reflect == horizontal)
395395 {//flipImage
396396 is.y += (hs-1);
397 for(x=0; x<ws; ++x, ++is.x, ++id.x)
397 for(x=0; x<ws; ++x, ++is.x, ++id.x)
398398 {
399399 typename SrcIterator::column_iterator cs = is.columnIterator();
400400 typename DestIterator::column_iterator cd = id.columnIterator();
407407 else if(reflect == vertical)
408408 {//flopImage
409409 is.x += (ws-1);
410 for(x=0; x < ws; ++x, --is.x, ++id.x)
410 for(x=0; x < ws; ++x, --is.x, ++id.x)
411411 {
412412
413413 typename SrcIterator::column_iterator cs = is.columnIterator();
432432 }
433433 }
434434 }
435 else
435 else
436436 vigra_fail("reflectImage(): "
437437 "This function reflects horizontal or vertical,"
438438 " 'and' is included");
440440
441441 template <class SrcImageIterator, class SrcAccessor,
442442 class DestImageIterator, class DestAccessor>
443 inline void
443 inline void
444444 reflectImage(triple<SrcImageIterator, SrcImageIterator, SrcAccessor> src,
445445 pair<DestImageIterator, DestAccessor> dest, Reflect reflect)
446446 {
449449
450450 template <class T1, class S1,
451451 class T2, class S2>
452 inline void
452 inline void
453453 reflectImage(MultiArrayView<2, T1, S1> const & src,
454454 MultiArrayView<2, T2, S2> dest, Reflect reflect)
455455 {
470470 /** \brief Transpose an image over the major or minor diagonal.
471471
472472 The transposition direction refers to the axis, i.e.
473 major transposition turns the upper right corner into the lower left one,
474 whereas minor transposition changes the upper left corner into the lower right one.
473 major transposition turns the upper right corner into the lower left one,
474 whereas minor transposition changes the upper left corner into the lower right one.
475475 The directions are selected by the enum values
476 <tt>vigra::major</tt> and <tt>vigra::minor</tt>. The two directions
477 can also be "or"ed together to perform both reflections simultaneously
476 <tt>vigra::major</tt> and <tt>vigra::minor</tt>. The two directions
477 can also be "or"ed together to perform both reflections simultaneously
478478 (see example below) -- this is the same as a 180 degree rotation.
479479 (Caution: When doing multi-platform development, you should be
480480 aware that some <sys/types.h> define major/minor, too. Do not omit
481481 the vigra namespace prefix.)
482
482
483483 Note that a similar effect can be chieved by MultiArrayView::transpose(). However,
484484 the latter can only transpose about the major diagonal, and it doesn't rearrange the data
485485 - it just creates a view with transposed axis ordering. It depends on the context
486486 which function is more appropriate.
487
487
488488 <b> Declarations:</b>
489
489
490490 pass 2D array views:
491491 \code
492492 namespace vigra {
493493 template <class T1, class S1,
494494 class T2, class S2>
495 void
495 void
496496 transposeImage(MultiArrayView<2, T1, S1> const & src,
497497 MultiArrayView<2, T2, S2> dest, Transpose axis);
498498 }
499499 \endcode
500
500
501501 \deprecatedAPI{transposeImage}
502502 pass \ref ImageIterators and \ref DataAccessors :
503503 \code
504504 namespace vigra {
505505 template <class SrcIterator, class SrcAccessor,
506506 class DestIterator, class DestAccessor>
507 void
507 void
508508 transposeImage(SrcIterator is, SrcIterator end, SrcAccessor as,
509509 DestIterator id, DestAccessor ad, Transpose axis);
510510 }
514514 namespace vigra {
515515 template <class SrcImageIterator, class SrcAccessor,
516516 class DestImageIterator, class DestAccessor>
517 void
517 void
518518 transposeImage(triple<SrcImageIterator, SrcImageIterator, SrcAccessor> src,
519519 pair<DestImageIterator, DestAccessor> dest, Transpose axis);
520520 }
521521 \endcode
522522 \deprecatedEnd
523
523
524524 <b> Usage:</b>
525
525
526526 <b>\#include</b> \<vigra/basicgeometry.hxx\><br>
527527 Namespace: vigra
528
528
529529 \code
530530 MultiArray<2, float> src(width, height),
531531 dest(height, width); // note that dimensions are transposed
532532 ... // fill src
533
533
534534 // transpose about the major diagonal
535535 vigra::transposeImage(src, dest, vigra::major);
536
536
537537 // this produces the same data as transposing the view
538538 assert(dest == src.transpose());
539
539
540540 // transposition about the minor diagonal has no correspondence in MultiArrayView
541541 vigra::transposeImage(src, dest, vigra::minor);
542542 \endcode
546546 BImage src(width, height),
547547 dest(width, height);
548548 ... // fill src
549
549
550550 // transpose about both diagonals simultaneously
551551 vigra::transposeImage(srcImageRange(src), destImage(dest), vigra::major | vigra::minor);
552552 \endcode
554554 \code
555555 SrcImageIterator src_upperleft, src_lowerright;
556556 DestImageIterator dest_upperleft;
557
557
558558 SrcAccessor src_accessor;
559
559
560560 dest_accessor.set(src_accessor(src_upperleft), dest_upperleft);
561561 \endcode
562562 \deprecatedEnd
563
563
564564 <b> Preconditions:</b>
565565 \code
566 src.shape(0) > 1 && src.shape(1) > 1
566 src.shape(0) > 1 && src.shape(1) > 1
567567 \endcode
568568 */
569569 doxygen_overloaded_function(template <...> void transposeImage)
570570
571 template <class SrcIterator, class SrcAccessor,
571 template <class SrcIterator, class SrcAccessor,
572572 class DestIterator, class DestAccessor>
573573 void transposeImage(SrcIterator is, SrcIterator end, SrcAccessor as,
574574 DestIterator id, DestAccessor ad, Transpose transpose)
619619 ad.set(as(cs), cd);
620620 }
621621 }
622
623 }
624 else
622
623 }
624 else
625625 vigra_fail("transposeImage(): "
626626 "This function transposes major or minor,"
627627 " 'and' is included");
630630
631631 template <class SrcImageIterator, class SrcAccessor,
632632 class DestImageIterator, class DestAccessor>
633 inline void
633 inline void
634634 transposeImage(triple<SrcImageIterator, SrcImageIterator, SrcAccessor> src,
635635 pair<DestImageIterator, DestAccessor> dest, Transpose transpose)
636636 {
639639
640640 template <class T1, class S1,
641641 class T2, class S2>
642 inline void
642 inline void
643643 transposeImage(MultiArrayView<2, T1, S1> const & src,
644644 MultiArrayView<2, T2, S2> dest, Transpose transpose)
645645 {
655655 /********************************************************/
656656
657657 /*
658 * Vergroessert eine Linie um einen Faktor.
658 * Vergroessert eine Linie um einen Faktor.
659659 * Ist z.B. der Faktor = 4 so werden in der
660 * neuen Linie(Destination) jedes Pixel genau 4 mal
661 * vorkommen, also es findet auch keine Glaetung
660 * neuen Linie(Destination) jedes Pixel genau 4 mal
661 * vorkommen, also es findet auch keine Glaetung
662662 * statt (NoInterpolation). Als Parameter sollen
663663 * der Anfangs-, der Enditerator und der Accessor
664664 * der Ausgangslinie (Source line), der Anfangsiterator
665665 * und Accessor der Ziellinie (destination line) und
666666 * anschliessend der Faktor um den die Linie (Zeile)
667 * vergroessert bzw. verkleinert werden soll.
667 * vergroessert bzw. verkleinert werden soll.
668668 */
669 template <class SrcIterator, class SrcAccessor,
669 template <class SrcIterator, class SrcAccessor,
670670 class DestIterator, class DestAccessor>
671671 void resampleLine(SrcIterator src_iter, SrcIterator src_iter_end, SrcAccessor src_acc,
672672 DestIterator dest_iter, DestAccessor dest_acc, double factor)
673673 {
674 // The width of the src line.
674 // The width of the src line.
675675 int src_width = src_iter_end - src_iter;
676
676
677677 vigra_precondition(src_width > 0,
678678 "resampleLine(): input image too small.");
679679 vigra_precondition(factor > 0.0,
680680 "resampleLine(): factor must be positive.");
681
681
682682 if (factor >= 1.0)
683683 {
684684 int int_factor = (int)factor;
700700 }
701701 else
702702 {
703 DestIterator dest_end = dest_iter + (int)VIGRA_CSTD::ceil(src_width*factor);
703 DestIterator dest_end = dest_iter + (int)VIGRA_CSTD::ceil(src_width*factor);
704704 factor = 1.0/factor;
705705 int int_factor = (int)factor;
706706 double dx = factor - int_factor;
707707 double saver = dx;
708708 src_iter_end -= 1;
709 for ( ; src_iter != src_iter_end && dest_iter != dest_end ;
709 for ( ; src_iter != src_iter_end && dest_iter != dest_end ;
710710 ++dest_iter, src_iter += int_factor, saver += dx)
711711 {
712712 if (saver >= 1.0)
726726 inline int sizeForResamplingFactor(int oldsize, double factor)
727727 {
728728 return (factor < 1.0)
729 ? (int)VIGRA_CSTD::ceil(oldsize * factor)
729 ? (int)VIGRA_CSTD::ceil(oldsize * factor)
730730 : (int)(oldsize * factor);
731731 }
732732
739739
740740 /** \brief Resample image by a given factor.
741741
742 This algorithm is very fast and does not require any arithmetic on the pixel types.
742 This algorithm is very fast and does not require any arithmetic on the pixel types.
743743 The input image must have a size of at
744744 least 2x2. Destiniation pixels are directly copied from the appropriate
745745 source pixels. The size of the result image is the product of <tt>factor</tt>
746746 and the original size, where we round up if <tt>factor < 1.0</tt> and down otherwise.
747 This size calculation is the main difference to the convention used in the similar
747 This size calculation is the main difference to the convention used in the similar
748748 function \ref resizeImageNoInterpolation():
749 there, the result size is calculated as <tt>n*(old_width-1)+1</tt> and
750 <tt>n*(old_height-1)+1</tt>. This is because \ref resizeImageNoInterpolation()
749 there, the result size is calculated as <tt>n*(old_width-1)+1</tt> and
750 <tt>n*(old_height-1)+1</tt>. This is because \ref resizeImageNoInterpolation()
751751 does not replicate the last pixel of every row/column in order to make it compatible
752752 with the other functions of the <tt>resizeImage...</tt> family.
753
753
754754 The function can be called with different resampling factors for x and y, or
755755 with a single factor to be used for both directions.
756756
757757 It should also be noted that resampleImage() is implemented so that an enlargement followed
758 by the corresponding shrinking reproduces the original image.
759
758 by the corresponding shrinking reproduces the original image.
759
760760 <b> Declarations:</b>
761
761
762762 pass 2D array views:
763763 \code
764764 namespace vigra {
765765 template <class T1, class S1,
766766 class T2, class S2>
767 void
767 void
768768 resampleImage(MultiArrayView<2, T1, S1> const & src,
769769 MultiArrayView<2, T2, S2> dest, double factor);
770770
771771 template <class T1, class S1,
772772 class T2, class S2>
773 void
773 void
774774 resampleImage(MultiArrayView<2, T1, S1> const & src,
775775 MultiArrayView<2, T2, S2> dest, double xfactor, double yfactor);
776776 }
777777 \endcode
778
778
779779 \deprecatedAPI{resampleImage}
780780 pass \ref ImageIterators and \ref DataAccessors :
781781 \code
782782 namespace vigra {
783783 template <class SrcIterator, class SrcAccessor,
784784 class DestIterator, class DestAccessor>
785 void
785 void
786786 resampleImage(SrcIterator is, SrcIterator iend, SrcAccessor sa,
787787 DestIterator id, DestAccessor ad, double factor);
788
788
789789 template <class SrcIterator, class SrcAccessor,
790790 class DestIterator, class DestAccessor>
791 void
791 void
792792 resampleImage(SrcIterator is, SrcIterator iend, SrcAccessor sa,
793793 DestIterator id, DestAccessor ad, double xfactor, double yfactor);
794794 }
798798 namespace vigra {
799799 template <class SrcImageIterator, class SrcAccessor,
800800 class DestImageIterator, class DestAccessor>
801 void
801 void
802802 resampleImage(triple<SrcImageIterator, SrcImageIterator, SrcAccessor> src,
803803 pair<DestImageIterator, DestAccessor> dest, double factor);
804
804
805805 template <class SrcImageIterator, class SrcAccessor,
806806 class DestImageIterator, class DestAccessor>
807 void
807 void
808808 resampleImage(triple<SrcImageIterator, SrcImageIterator, SrcAccessor> src,
809809 pair<DestImageIterator, DestAccessor> dest, double xfactor, double yfactor);
810810 }
811811 \endcode
812812 \deprecatedEnd
813
813
814814 <b> Usage:</b>
815
815
816816 <b>\#include</b> \<vigra/basicgeometry.hxx\><br>
817817 Namespace: vigra
818
818
819819 \code
820820 double factor = 2.0;
821821 MultiArray<2, float> src(width, height),
822822 dest((int)(factor*width), (int)(factor*height)); // enlarge image by factor
823823 ... // fill src
824
824
825825 resampleImage(src, dest, factor);
826826 \endcode
827
827
828828 \deprecatedUsage{resampleImage}
829829 \code
830830 // use old API
831 vigra::resampleImage(srcImageRange(src), destImage(dest), factor);
831 vigra::resampleImage(srcImageRange(src), destImage(dest), factor);
832832 \endcode
833833 <b> Required Interface:</b>
834834 \code
835835 SrcImageIterator src_upperleft, src_lowerright;
836836 DestImageIterator dest_upperleft;
837
837
838838 SrcAccessor src_accessor;
839
839
840840 dest_accessor.set(src_accessor(src_upperleft), dest_upperleft);
841841 \endcode
842842 \deprecatedEnd
843
843
844844 <b> Preconditions:</b>
845845 \code
846 src.shape(0) > 1 && src.shape(1) > 1
846 src.shape(0) > 1 && src.shape(1) > 1
847847 \endcode
848848 */
849849 doxygen_overloaded_function(template <...> void resampleImage)
850850
851851 template <class SrcIterator, class SrcAccessor,
852852 class DestIterator, class DestAccessor>
853 void
853 void
854854 resampleImage(SrcIterator is, SrcIterator iend, SrcAccessor sa,
855855 DestIterator id, DestAccessor ad, double xfactor, double yfactor)
856856 {
857857 int width_old = iend.x - is.x;
858858 int height_old = iend.y - is.y;
859
859
860860 //Bei Verkleinerung muss das dest-Bild ceiling(src*factor), da z.B.
861861 //aus 6x6 grossem Bild wird eins 18x18 grosses gemacht bei Vergroesserungsfaktor 3.1
862862 //umgekehrt damit wir vom 18x18 zu 6x6 (und nicht 5x5) bei Vergroesserung von 1/3.1
863863 //muss das kleinste Integer das groesser als 18/3.1 ist genommen werden.
864864 int height_new = sizeForResamplingFactor(height_old, yfactor);
865865 int width_new = sizeForResamplingFactor(width_old, xfactor);
866
866
867867 vigra_precondition((width_old > 1) && (height_old > 1),
868868 "resampleImage(): "
869869 "Source image too small.\n");
870870 vigra_precondition((width_new > 1) && (height_new > 1),
871871 "resampleImage(): "
872872 "Destination image too small.\n");
873
873
874874 typedef typename SrcAccessor::value_type SRCVT;
875875 typedef BasicImage<SRCVT> TmpImage;
876876 typedef typename TmpImage::traverser TmpImageIterator;
877877
878878 BasicImage<SRCVT> tmp(width_old, height_new);
879
879
880880 int x,y;
881
881
882882 typename BasicImage<SRCVT>::Iterator yt = tmp.upperLeft();
883883
884 for(x=0; x<width_old; ++x, ++is.x, ++yt.x)
884 for(x=0; x<width_old; ++x, ++is.x, ++yt.x)
885885 {
886886 typename SrcIterator::column_iterator c1 = is.columnIterator();
887887 typename TmpImageIterator::column_iterator ct = yt.columnIterator();
890890
891891 yt = tmp.upperLeft();
892892
893 for(y=0; y < height_new; ++y, ++yt.y, ++id.y)
893 for(y=0; y < height_new; ++y, ++yt.y, ++id.y)
894894 {
895895 typename DestIterator::row_iterator rd = id.rowIterator();
896896 typename TmpImageIterator::row_iterator rt = yt.rowIterator();
901901
902902 template <class SrcIterator, class SrcAccessor,
903903 class DestIterator, class DestAccessor>
904 void
904 void
905905 resampleImage(SrcIterator is, SrcIterator iend, SrcAccessor sa,
906906 DestIterator id, DestAccessor ad, double factor)
907907 {
910910
911911 template <class SrcImageIterator, class SrcAccessor,
912912 class DestImageIterator, class DestAccessor>
913 inline void
913 inline void
914914 resampleImage(triple<SrcImageIterator, SrcImageIterator, SrcAccessor> src,
915915 pair<DestImageIterator, DestAccessor> dest, double factor)
916916 {
919919
920920 template <class SrcImageIterator, class SrcAccessor,
921921 class DestImageIterator, class DestAccessor>
922 inline void
922 inline void
923923 resampleImage(triple<SrcImageIterator, SrcImageIterator, SrcAccessor> src,
924924 pair<DestImageIterator, DestAccessor> dest, double xfactor, double yfactor)
925925 {
928928
929929 template <class T1, class S1,
930930 class T2, class S2>
931 inline void
931 inline void
932932 resampleImage(MultiArrayView<2, T1, S1> const & src,
933933 MultiArrayView<2, T2, S2> dest, double factor)
934934 {
938938 else
939939 vigra_precondition(ceil(factor*src.shape()) == dest.shape(),
940940 "resampleImage(): shape mismatch between input and output.");
941
941
942942 resampleImage(srcImageRange(src), destImage(dest), factor);
943943 }
944944
945945 template <class T1, class S1,
946946 class T2, class S2>
947 inline void
947 inline void
948948 resampleImage(MultiArrayView<2, T1, S1> const & src,
949949 MultiArrayView<2, T2, S2> dest, double xfactor, double yfactor)
950950 {
960960 else
961961 vigra_precondition(ceil(yfactor*src.shape(1)) == dest.shape(1),
962962 "resampleImage(): shape mismatch between input and output.");
963
963
964964 resampleImage(srcImageRange(src), destImage(dest), xfactor, yfactor);
965965 }
966966
462462
463463 /** \brief Fundamental class template for images.
464464
465 <b>deprecated</b>, use \ref vigra::MultiArray instead
466
465467 A customized memory allocator can be specified as a templated argument
466468 and passed in the constructor.
467469
5858
5959 /** \brief BasicImage using foreign memory.
6060
61 <b>deprecated</b>, use \ref vigra::MultiArrayView instead
62
6163 This class provides the same interface as \ref vigra::BasicImage
6264 (with the exception of <tt>resize()</tt>) but the image's
6365 memory is provided from the outside instead of allocated internally.
0 /************************************************************************/
1 /* */
2 /* Copyright 2014-2015 by Ullrich Koethe and Philip Schill */
3 /* */
4 /* This file is part of the VIGRA computer vision library. */
5 /* The VIGRA Website is */
6 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
7 /* Please direct questions, bug reports, and contributions to */
8 /* ullrich.koethe@iwr.uni-heidelberg.de or */
9 /* vigra@informatik.uni-hamburg.de */
10 /* */
11 /* Permission is hereby granted, free of charge, to any person */
12 /* obtaining a copy of this software and associated documentation */
13 /* files (the "Software"), to deal in the Software without */
14 /* restriction, including without limitation the rights to use, */
15 /* copy, modify, merge, publish, distribute, sublicense, and/or */
16 /* sell copies of the Software, and to permit persons to whom the */
17 /* Software is furnished to do so, subject to the following */
18 /* conditions: */
19 /* */
20 /* The above copyright notice and this permission notice shall be */
21 /* included in all copies or substantial portions of the */
22 /* Software. */
23 /* */
24 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
25 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
26 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
27 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
28 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
29 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
30 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
32 /* */
33 /************************************************************************/
34 #ifndef VIGRA_BINARY_FOREST_HXX
35 #define VIGRA_BINARY_FOREST_HXX
36
37 #include <vector>
38 #include "graphs.hxx"
39
40 namespace vigra
41 {
42
43 /** \addtogroup GraphDataStructures
44 */
45 //@{
46
47 /********************************************************/
48 /* */
49 /* BinaryForest */
50 /* */
51 /********************************************************/
52
53 /**
54 * @brief BinaryForest stores a collection of rooted binary trees.
55 *
56 * Each connected component of the BinaryForest is thus a tree, and all edges are
57 * directed away from the root node of the corresponding tree.
58 *
59 * @note If there is an arc from node <tt>u</tt> to node <tt>v</tt>, then the
60 * arc ID is <tt>2*id(u)</tt> when <tt>v</tt> is the left child and <tt>2*id(u)+1</tt>
61 * when <tt>v</tt> is the right child.
62 */
63 class BinaryForest
64 {
65 public:
66
67 typedef Int64 index_type;
68 /// Node descriptor type of the present graph.
69 typedef detail::NodeDescriptor<index_type> Node;
70 /// Arc descriptor type of the present graph.
71 typedef detail::ArcDescriptor<index_type> Arc;
72
73 /// @brief Create an empty forest.
74 BinaryForest();
75
76 /// @brief Add a new node (its node ID will be selected automatically).
77 Node addNode();
78 /// @brief Add a new arc from node \a u to node \a v.
79 /// The arc ID is <tt>2*id(u)</tt> if \a v is the left child of \a u, <tt>2*id(u)+1</tt> otherwise.
80 Arc addArc(Node const & u, Node const & v);
81 /// @brief Check if \a node exists.
82 bool valid(Node const & node) const;
83 /// @brief Check if \a arc exists.
84 bool valid(Arc const & arc) const;
85 /// @brief Find start node of \a arc.
86 Node source(Arc const & arc) const;
87 /// @brief Find end node of \a arc.
88 Node target(Arc const & arc) const;
89 /// @brief Get ID for node descriptor \a node.
90 index_type id(Node const & node) const;
91 /// @brief Get ID for arc descriptor \a arc.
92 index_type id(Arc const & arc) const;
93 /// @brief Get node descriptor for \a id.
94 Node nodeFromId(index_type const & id) const;
95 /// @brief Get arc descriptor for \a id.
96 Arc arcFromId(index_type const & id) const;
97 /// @brief Return the highest existing node ID.
98 index_type maxNodeId() const;
99 /// @brief Return the highest possible arc ID (equivalent to <tt>2*maxNodeId() + 1</tt>).
100 index_type maxArcId() const;
101 /// @brief Return the number of nodes (equivalent to <tt>maxNodeId()+1</tt>).
102 size_t numNodes() const;
103 /// @brief Return the number of arcs.
104 /// Always less than <tt>maxArcId()</tt> because not all arcs actually exist.
105 size_t numArcs() const;
106 /// @brief Return the number of incoming edges of \a node.
107 /// <tt>0</tt> for a root node, <tt>1</tt> otherwise.
108 size_t inDegree(Node const & node) const;
109 /// @brief Return the number of outgoing edges of \a node.
110 /// <tt>0</tt> for a leaf node, <tt>1</tt> or <tt>2</tt> otherwise.
111 size_t outDegree(Node const & node) const;
112 /// @brief Return the number of parents of \a node (equivalent to <tt>inDegree()</tt>).
113 size_t numParents(Node const & node) const;
114 /// @brief Return the number of children of \a node (equivalent to <tt>outDegree()</tt>).
115 size_t numChildren(Node const & node) const;
116 /// @brief Return the number of trees in the forest.
117 size_t numRoots() const;
118 /// @brief Create node cescriptor for ID \a i, or <tt>lemon::INVALID</tt> if
119 /// \a i is not a valid ID.
120 Node getNode(size_t i) const;
121 /// @brief Get the parent node descriptor of \a node, or <tt>lemon::INVALID</tt>
122 /// if \a node is a root or \a i is non-zero.
123 Node getParent(Node const & node, size_t i = 0) const;
124 /// @brief Get child number \a i of \a node.
125 /// Returns the left child if <tt>i=0</tt>, the right child if <tt>i=1</tt>,
126 /// and <tt>lemon::INVALID</tt> for other values of \a i or when the respective
127 /// is undefined.
128 Node getChild(Node const & node, size_t i = 0) const;
129 /// @brief Get the root node descriptor of tree \a i in the forest, or
130 /// <tt>lemon::INVALID</tt> if \a i is invalid.
131 Node getRoot(size_t i = 0) const;
132 /// @brief Merge two forests and increase the IDs of \a other to avoid ID clashes.
133 /// The function returns the offset that has been added to these IDs.
134 size_t merge(BinaryForest const & other);
135
136 private:
137
138 struct NodeT
139 {
140 NodeT()
141 :
142 parent(-1),
143 left_child(-1),
144 right_child(-1)
145 {}
146 index_type parent, left_child, right_child;
147 };
148
149 std::vector<NodeT> nodes_;
150
151 // Sorted vector with the node ids of the roots.
152 std::vector<index_type> root_nodes_;
153
154 size_t num_arcs_;
155 };
156
157
158
159 inline BinaryForest::BinaryForest()
160 :
161 nodes_(),
162 root_nodes_(),
163 num_arcs_(0)
164 {}
165
166 inline BinaryForest::Node BinaryForest::addNode()
167 {
168 Node n = Node(nodes_.size());
169 nodes_.push_back(NodeT());
170 root_nodes_.push_back(n.id());
171 return n;
172 }
173
174 inline BinaryForest::Arc BinaryForest::addArc(
175 Node const & u,
176 Node const & v
177 ){
178 NodeT & u_node = nodes_[u.id()];
179 NodeT & v_node = nodes_[v.id()];
180 index_type arc_id = 2*u.id();
181
182 // Make sure that the arc is not inserted twice.
183 if (u_node.left_child == v.id())
184 return Arc(arc_id);
185 if (u_node.right_child == v.id())
186 return Arc(arc_id+1);
187
188 // Add v as child of u.
189 if (u_node.left_child == -1)
190 {
191 u_node.left_child = v.id();
192 }
193 else if (u_node.right_child == -1)
194 {
195 u_node.right_child = v.id();
196 ++arc_id;
197 }
198 else
199 {
200 vigra_fail("BinaryForest::addArc(): The node u already has two children.");
201 }
202
203 // Add u as parent of v.
204 v_node.parent = u.id();
205
206 // If v was a root node, remove it from the list.
207 auto it = std::lower_bound(root_nodes_.begin(), root_nodes_.end(), v.id());
208 if (it != root_nodes_.end() && !(v.id() < *it))
209 root_nodes_.erase(it);
210
211 ++num_arcs_;
212 return Arc(arc_id);
213 }
214
215 inline bool BinaryForest::valid(
216 Node const & node
217 ) const {
218 // NOTE: The conversion to size_t is valid since we first check for >= 0.
219 return node.id() >= 0 && (size_t)node.id() < nodes_.size();
220 }
221
222 inline bool BinaryForest::valid(
223 Arc const & arc
224 ) const {
225 if (arc == lemon::INVALID)
226 return false;
227
228 index_type const uid = arc.id()/2;
229 if (!valid(Node(uid)))
230 return false;
231
232 if (arc.id() % 2 == 0)
233 return nodes_[uid].left_child != -1;
234 else
235 return nodes_[uid].right_child != -1;
236 }
237
238 inline BinaryForest::Node BinaryForest::source(
239 Arc const & arc
240 ) const {
241 return Node(arc.id()/2);
242 }
243
244 inline BinaryForest::Node BinaryForest::target(
245 Arc const & arc
246 ) const {
247 NodeT const & u_node = nodes_[arc.id()/2];
248 if (arc.id() % 2 == 0)
249 return Node(u_node.left_child);
250 else
251 return Node(u_node.right_child);
252 }
253
254 inline BinaryForest::index_type BinaryForest::id(
255 Node const & node
256 ) const {
257 return node.id();
258 }
259
260 inline BinaryForest::index_type BinaryForest::id(
261 Arc const & arc
262 ) const {
263 return arc.id();
264 }
265
266 inline BinaryForest::Node BinaryForest::nodeFromId(
267 index_type const & id
268 ) const {
269 return Node(id);
270 }
271
272 inline BinaryForest::Arc BinaryForest::arcFromId(
273 index_type const & id
274 ) const {
275 return Arc(id);
276 }
277
278 inline BinaryForest::index_type BinaryForest::maxNodeId() const
279 {
280 return nodes_.size()-1;
281 }
282
283 inline BinaryForest::index_type BinaryForest::maxArcId() const
284 {
285 return 2*maxNodeId() + 1;
286 }
287
288 inline size_t BinaryForest::numNodes() const
289 {
290 return nodes_.size();
291 }
292
293 inline size_t BinaryForest::numArcs() const
294 {
295 return num_arcs_;
296 }
297
298 inline size_t BinaryForest::inDegree(
299 Node const & node
300 ) const {
301 if (nodes_[node.id()].parent == -1)
302 return 0;
303 else
304 return 1;
305 }
306
307 inline size_t BinaryForest::outDegree(
308 Node const & node
309 ) const {
310 NodeT const & n = nodes_[node.id()];
311 if (n.left_child == -1 && n.right_child == -1)
312 return 0;
313 else if (n.left_child == -1 || n.right_child == -1)
314 return 1;
315 else
316 return 2;
317 }
318
319 inline size_t BinaryForest::numParents(
320 Node const & node
321 ) const {
322 return inDegree(node);
323 }
324
325 inline size_t BinaryForest::numChildren(
326 Node const & node
327 ) const {
328 return outDegree(node);
329 }
330
331 inline size_t BinaryForest::numRoots() const
332 {
333 return root_nodes_.size();
334 }
335
336 inline BinaryForest::Node BinaryForest::getNode(
337 size_t i
338 ) const {
339 if (i >= numNodes())
340 return Node(lemon::INVALID);
341 else
342 return Node(i);
343 }
344
345 inline BinaryForest::Node BinaryForest::getParent(
346 Node const & node,
347 size_t i
348 ) const {
349 NodeT const & n = nodes_[node.id()];
350 if (n.parent == -1 || i != 0)
351 return Node(lemon::INVALID);
352 else
353 return Node(n.parent);
354 }
355
356 inline BinaryForest::Node BinaryForest::getChild(
357 Node const & node,
358 size_t i
359 ) const {
360 NodeT const & n = nodes_[node.id()];
361 if (i == 0)
362 return Node(n.left_child);
363 else if (i == 1)
364 return Node(n.right_child);
365 else
366 return Node(lemon::INVALID);
367 }
368
369 inline BinaryForest::Node BinaryForest::getRoot(
370 size_t i
371 ) const {
372 if (i >= root_nodes_.size())
373 return Node(lemon::INVALID);
374 else
375 return Node(root_nodes_[i]);
376 }
377
378 inline size_t BinaryForest::merge(
379 BinaryForest const & other
380 ){
381 num_arcs_ += other.num_arcs_;
382 size_t const offset = nodes_.size();
383 nodes_.insert(nodes_.end(), other.nodes_.begin(), other.nodes_.end());
384 for (size_t i = offset; i < nodes_.size(); ++i)
385 {
386 NodeT & n = nodes_[i];
387 if (n.parent != -1)
388 n.parent += offset;
389 if (n.left_child != -1)
390 n.left_child += offset;
391 if (n.right_child != -1)
392 n.right_child += offset;
393 }
394
395 size_t const root_offset = root_nodes_.size();
396 root_nodes_.insert(root_nodes_.end(), other.root_nodes_.begin(), other.root_nodes_.end());
397 for (size_t i = root_offset; i < root_nodes_.size(); ++i)
398 {
399 root_nodes_[i] += offset;
400 }
401 return offset;
402 }
403
404 //@}
405
406 } // namespace vigra
407
408 #endif
8383 Shape current_block_begin,
8484 Shape current_block_end,
8585 Shape current_block_pos,
86 Shape block_shape)
86 Shape /*block_shape*/)
8787 {
8888 blocks[current_block_pos] = source.subarray(current_block_begin, current_block_end);
8989 }
148148 vigra_precondition(blocks_shape == label_blocks_begin.shape() &&
149149 blocks_shape == mapping.shape(),
150150 "shapes of blocks of blocks do not match");
151 vigra_precondition(std::distance(data_blocks_begin,data_blocks_end) == std::distance(label_blocks_begin,label_blocks_end),
152 "the sizes of input ranges are different");
151153
152154 static const unsigned int Dimensions = DataBlocksIterator::dimension + 1;
153155 MultiArray<Dimensions, Label> label_offsets(label_blocks_begin.shape());
171173 //std::iota(ids.begin(), ids.end(), 0 );
172174
173175 parallel_foreach(options.getNumThreads(), d,
174 [&](const int threadId, const uint64_t i){
176 [&](const int /*threadId*/, const uint64_t i){
175177 Label resVal = labelMultiArray(data_blocks_it[i], label_blocks_it[i],
176178 options, equal);
177179 if(has_background) // FIXME: reversed condition?
301303 /* */
302304 /*************************************************************/
303305
306 /** \weakgroup ParallelProcessing
307 \sa labelMultiArrayBlockwise <B>(...)</B>
308 */
309
304310 /** \brief Connected components labeling for MultiArrays and ChunkedArrays.
305311
306312 <b> Declarations:</b>
4040 #include "multi_gridgraph.hxx"
4141 #include "blockify.hxx"
4242 #include "blockwise_labeling.hxx"
43 #include "metaprogramming.hxx"
4344 #include "overlapped_blocks.hxx"
4445
4546 #include <limits>
4748 namespace vigra
4849 {
4950
50 /** \addtogroup SeededRegionGrowing
51 /** \addtogroup Superpixels
5152 */
5253 //@{
5354
6061 BlockwiseLabelOptions const & options)
6162 {
6263 static const unsigned int N = DataArray::actual_dimension;
64 ignore_argument(N);
6365 typedef typename MultiArrayShape<DataArray::actual_dimension>::type Shape;
6466 typedef typename DirectionsBlocksIterator::value_type DirectionsBlock;
6567 Shape shape = overlaps.shape();
7173
7274 parallel_foreach(options.getNumThreads(),
7375 itBegin,end,
74 [&](const int threadId, const Coordinate iterVal){
76 [&](const int /*threadId*/, const Coordinate iterVal){
7577
7678 DirectionsBlock directions_block = directions_blocks_begin[iterVal];
7779 OverlappingBlock<DataArray> data_block = overlaps[iterVal];
136138 /* */
137139 /*************************************************************/
138140
141 /** \weakgroup ParallelProcessing
142 \sa unionFindWatershedsBlockwise <B>(...)</B>
143 */
144
139145 /** \brief Blockwise union-find watersheds transform for MultiArrays and ChunkedArrays.
140146
141147 <b> Declaration:</b>
355355
356356 } // namespace detail
357357
358 /** \addtogroup CommonConvolutionFilters Common Filters
358 /** \addtogroup ConvolutionFilters
359359 */
360360 //@{
361361
678678
679679 /** \brief Boundary tensor variant.
680680
681 This function implements a variant of the boundary tensor where the
682 0th-order Riesz transform has been dropped, so that the tensor is no
683 longer sensitive to blobs. See \ref boundaryTensor() for more detailed
681 This function implements a variant of the boundary tensor where the
682 0th-order Riesz transform has been dropped, so that the tensor is no
683 longer sensitive to blobs. See \ref boundaryTensor() for more detailed
684684 documentation.
685685
686686 <b> Declarations:</b>
3838
3939 #define VIGRA_VERSION_MAJOR 1
4040 #define VIGRA_VERSION_MINOR 11
41 #define VIGRA_VERSION_PATCH 0
42 #define VIGRA_VERSION "1.11.0"
41 #define VIGRA_VERSION_PATCH 1
42 #define VIGRA_VERSION "1.11.1"
4343
4444 #endif /* VIGRA_CONFIG_VERSION_HXX */
4545 #include "multi_shape.hxx"
4646
4747
48 /** \page Convolution Functions to Convolve Images and Signals
49
50 1D and 2D filters, including separable and recursive convolution, and non-linear diffusion
51
52 <b>\#include</b> \<vigra/convolution.hxx\><br>
53 Namespace: vigra
54
55 <UL style="list-style-image:url(documents/bullet.gif)">
56 <LI> \ref CommonConvolutionFilters
57 <BR>&nbsp;&nbsp;&nbsp;<em>Short-hands for many common 2D convolution filters (including normalized convolution)</em>
58 <LI> \ref MultiArrayConvolutionFilters
59 <BR>&nbsp;&nbsp;&nbsp;<em>Convolution filters for arbitrary dimensional arrays (MultiArray etc.)</em>
60 <LI> \ref ResamplingConvolutionFilters
61 <BR>&nbsp;&nbsp;&nbsp;<em>Resampling convolution filters</em>
62 <LI> \ref vigra::Kernel2D
63 <BR>&nbsp;&nbsp;&nbsp;<em>Generic 2-dimensional discrete convolution kernel </em>
64 <LI> \ref SeparableConvolution
65 <BR>&nbsp;&nbsp;&nbsp;<em>1D convolution and separable filters in 2 dimensions </em>
66 <LI> \ref vigra::Kernel1D
67 <BR>&nbsp;&nbsp;&nbsp;<em>Generic 1-dimensional discrete convolution kernel </em>
68 <LI> \ref RecursiveConvolution
69 <BR>&nbsp;&nbsp;&nbsp;<em>Recursive filters (1st and 2nd order)</em>
70 <LI> \ref NonLinearDiffusion
71 <BR>&nbsp;&nbsp;&nbsp;<em>Edge-preserving smoothing </em>
72 <LI> \ref BorderTreatmentMode
73 <BR>&nbsp;&nbsp;&nbsp;<em>Choose between different border treatment modes </em>
74 <LI> \ref KernelArgumentObjectFactories
75 <BR>&nbsp;&nbsp;&nbsp;<em>Factory functions to create argument objects to simplify passing kernels</em>
76 </UL>
77 */
78
7948 /** \page KernelArgumentObjectFactories Kernel Argument Object Factories
8049
8150 These factory functions allow to create argument objects for 1D
173142
174143 namespace vigra {
175144
176
177
178145 /********************************************************/
179146 /* */
180147 /* Common convolution filters */
181148 /* */
182149 /********************************************************/
183150
184 /** \addtogroup CommonConvolutionFilters Common Filters
185
186 These functions calculate common filters by appropriate sequences of calls
187 to \ref separableConvolveX() and \ref separableConvolveY() or explicit 2-dimensional
188 convolution.
151 /** \addtogroup ConvolutionFilters
189152 */
190153 //@{
191154
10681031 gaussianGradient(srcImageRange(src),
10691032 destImage(dest), scale);
10701033 }
1034
1035 /** \weakgroup ParallelProcessing
1036 \sa gaussianGradientMagnitude <B>(...,</B> BlockwiseConvolutionOptions<B>)</B>
1037 */
10711038
10721039 /** \brief Calculate the gradient magnitude by means of a 1st derivatives of
10731040 Gaussian filter.
148148
149149 } // namespace detail
150150
151 /** \addtogroup MathFunctions
151 /** \addtogroup RangesAndPoints
152152 */
153153 //@{
154154
4141 #else
4242 #define VIGRA_DELEGATE_CLASS_NAME VIGRA_DELEGATE_JOIN_MACRO(delegate,VIGRA_DELEGATE_PARAM_COUNT)
4343 #define VIGRA_DELEGATE_INVOKER_CLASS_NAME VIGRA_DELEGATE_JOIN_MACRO(delegate_invoker,VIGRA_DELEGATE_PARAM_COUNT)
44 template <typename R VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_TEMPLATE_PARAMS>
45 class VIGRA_DELEGATE_INVOKER_CLASS_NAME;
44 template <typename R VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_TEMPLATE_PARAMS>
45 class VIGRA_DELEGATE_INVOKER_CLASS_NAME;
4646 #endif
4747
48 template <typename R VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_TEMPLATE_PARAMS>
48 template <typename R VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_TEMPLATE_PARAMS>
4949 #ifdef VIGRA_DELEGATE_PREFERRED_SYNTAX
50 class VIGRA_DELEGATE_CLASS_NAME<R (VIGRA_DELEGATE_TEMPLATE_ARGS)>
50 class VIGRA_DELEGATE_CLASS_NAME<R (VIGRA_DELEGATE_TEMPLATE_ARGS)>
5151 #else
52 class VIGRA_DELEGATE_CLASS_NAME
52 class VIGRA_DELEGATE_CLASS_NAME
5353 #endif
54 {
55 public:
56 typedef R return_type;
54 {
55 public:
56 typedef R return_type;
5757 #ifdef VIGRA_DELEGATE_PREFERRED_SYNTAX
58 typedef return_type (VIGRA_DELEGATE_CALLTYPE *signature_type)(VIGRA_DELEGATE_TEMPLATE_ARGS);
59 typedef VIGRA_DELEGATE_INVOKER_CLASS_NAME<signature_type> invoker_type;
58 typedef return_type (VIGRA_DELEGATE_CALLTYPE *signature_type)(VIGRA_DELEGATE_TEMPLATE_ARGS);
59 typedef VIGRA_DELEGATE_INVOKER_CLASS_NAME<signature_type> invoker_type;
6060 #else
61 typedef VIGRA_DELEGATE_INVOKER_CLASS_NAME<R VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_TEMPLATE_ARGS> invoker_type;
61 typedef VIGRA_DELEGATE_INVOKER_CLASS_NAME<R VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_TEMPLATE_ARGS> invoker_type;
6262 #endif
6363
64 VIGRA_DELEGATE_CLASS_NAME()
65 : object_ptr(0)
66 , stub_ptr(0)
67 {}
64 VIGRA_DELEGATE_CLASS_NAME()
65 : object_ptr(0)
66 , stub_ptr(0)
67 {}
6868
69 template <return_type (*TMethod)(VIGRA_DELEGATE_TEMPLATE_ARGS)>
70 static VIGRA_DELEGATE_CLASS_NAME from_function()
71 {
72 return from_stub(0, &function_stub<TMethod>);
73 }
69 template <return_type (*TMethod)(VIGRA_DELEGATE_TEMPLATE_ARGS)>
70 static VIGRA_DELEGATE_CLASS_NAME from_function()
71 {
72 return from_stub(0, &function_stub<TMethod>);
73 }
7474
75 template <class T, return_type (T::*TMethod)(VIGRA_DELEGATE_TEMPLATE_ARGS)>
76 static VIGRA_DELEGATE_CLASS_NAME from_method(T* object_ptr)
77 {
78 return from_stub(object_ptr, &method_stub<T, TMethod>);
79 }
75 template <class T, return_type (T::*TMethod)(VIGRA_DELEGATE_TEMPLATE_ARGS)>
76 static VIGRA_DELEGATE_CLASS_NAME from_method(T* object_ptr)
77 {
78 return from_stub(object_ptr, &method_stub<T, TMethod>);
79 }
8080
81 template <class T, return_type (T::*TMethod)(VIGRA_DELEGATE_TEMPLATE_ARGS) const>
82 static VIGRA_DELEGATE_CLASS_NAME from_const_method(T const* object_ptr)
83 {
84 return from_stub(const_cast<T*>(object_ptr), &const_method_stub<T, TMethod>);
85 }
81 template <class T, return_type (T::*TMethod)(VIGRA_DELEGATE_TEMPLATE_ARGS) const>
82 static VIGRA_DELEGATE_CLASS_NAME from_const_method(T const* object_ptr)
83 {
84 return from_stub(const_cast<T*>(object_ptr), &const_method_stub<T, TMethod>);
85 }
8686
87 return_type operator()(VIGRA_DELEGATE_PARAMS) const
88 {
89 return (*stub_ptr)(object_ptr VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_ARGS);
90 }
87 return_type operator()(VIGRA_DELEGATE_PARAMS) const
88 {
89 return (*stub_ptr)(object_ptr VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_ARGS);
90 }
9191
92 operator bool () const
93 {
94 return stub_ptr != 0;
95 }
92 operator bool () const
93 {
94 return stub_ptr != 0;
95 }
9696
97 bool operator!() const
98 {
99 return !(operator bool());
100 }
97 bool operator!() const
98 {
99 return !(operator bool());
100 }
101101
102 private:
103
104 typedef return_type (VIGRA_DELEGATE_CALLTYPE *stub_type)(void* object_ptr VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_PARAMS);
102 private:
103
104 typedef return_type (VIGRA_DELEGATE_CALLTYPE *stub_type)(void* object_ptr VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_PARAMS);
105105
106 void* object_ptr;
107 stub_type stub_ptr;
106 void* object_ptr;
107 stub_type stub_ptr;
108108
109 static VIGRA_DELEGATE_CLASS_NAME from_stub(void* object_ptr, stub_type stub_ptr)
110 {
111 VIGRA_DELEGATE_CLASS_NAME d;
112 d.object_ptr = object_ptr;
113 d.stub_ptr = stub_ptr;
114 return d;
115 }
109 static VIGRA_DELEGATE_CLASS_NAME from_stub(void* object_ptr, stub_type stub_ptr)
110 {
111 VIGRA_DELEGATE_CLASS_NAME d;
112 d.object_ptr = object_ptr;
113 d.stub_ptr = stub_ptr;
114 return d;
115 }
116116
117 template <return_type (*TMethod)(VIGRA_DELEGATE_TEMPLATE_ARGS)>
118 static return_type VIGRA_DELEGATE_CALLTYPE function_stub(void* VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_PARAMS)
119 {
120 return (TMethod)(VIGRA_DELEGATE_ARGS);
121 }
117 template <return_type (*TMethod)(VIGRA_DELEGATE_TEMPLATE_ARGS)>
118 static return_type VIGRA_DELEGATE_CALLTYPE function_stub(void* VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_PARAMS)
119 {
120 return (TMethod)(VIGRA_DELEGATE_ARGS);
121 }
122122
123 template <class T, return_type (T::*TMethod)(VIGRA_DELEGATE_TEMPLATE_ARGS)>
124 static return_type VIGRA_DELEGATE_CALLTYPE method_stub(void* object_ptr VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_PARAMS)
125 {
126 T* p = static_cast<T*>(object_ptr);
127 return (p->*TMethod)(VIGRA_DELEGATE_ARGS);
128 }
123 template <class T, return_type (T::*TMethod)(VIGRA_DELEGATE_TEMPLATE_ARGS)>
124 static return_type VIGRA_DELEGATE_CALLTYPE method_stub(void* object_ptr VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_PARAMS)
125 {
126 T* p = static_cast<T*>(object_ptr);
127 return (p->*TMethod)(VIGRA_DELEGATE_ARGS);
128 }
129129
130 template <class T, return_type (T::*TMethod)(VIGRA_DELEGATE_TEMPLATE_ARGS) const>
131 static return_type VIGRA_DELEGATE_CALLTYPE const_method_stub(void* object_ptr VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_PARAMS)
132 {
133 T const* p = static_cast<T*>(object_ptr);
134 return (p->*TMethod)(VIGRA_DELEGATE_ARGS);
135 }
136 };
130 template <class T, return_type (T::*TMethod)(VIGRA_DELEGATE_TEMPLATE_ARGS) const>
131 static return_type VIGRA_DELEGATE_CALLTYPE const_method_stub(void* object_ptr VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_PARAMS)
132 {
133 T const* p = static_cast<T*>(object_ptr);
134 return (p->*TMethod)(VIGRA_DELEGATE_ARGS);
135 }
136 };
137137
138 template <typename R VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_TEMPLATE_PARAMS>
138 template <typename R VIGRA_DELEGATE_SEPARATOR VIGRA_DELEGATE_TEMPLATE_PARAMS>
139139 #ifdef VIGRA_DELEGATE_PREFERRED_SYNTAX
140 class VIGRA_DELEGATE_INVOKER_CLASS_NAME<R (VIGRA_DELEGATE_TEMPLATE_ARGS)>
140 class VIGRA_DELEGATE_INVOKER_CLASS_NAME<R (VIGRA_DELEGATE_TEMPLATE_ARGS)>
141141 #else
142 class VIGRA_DELEGATE_INVOKER_CLASS_NAME
142 class VIGRA_DELEGATE_INVOKER_CLASS_NAME
143143 #endif
144 {
145 VIGRA_DELEGATE_INVOKER_DATA
144 {
145 VIGRA_DELEGATE_INVOKER_DATA
146146
147 public:
148 VIGRA_DELEGATE_INVOKER_CLASS_NAME(VIGRA_DELEGATE_PARAMS)
147 public:
148 VIGRA_DELEGATE_INVOKER_CLASS_NAME(VIGRA_DELEGATE_PARAMS)
149149 #if VIGRA_DELEGATE_PARAM_COUNT > 0
150 :
150 :
151151 #endif
152 VIGRA_DELEGATE_INVOKER_INITIALIZATION_LIST
153 {
154 }
152 VIGRA_DELEGATE_INVOKER_INITIALIZATION_LIST
153 {
154 }
155155
156 template <class TDelegate>
157 R operator()(TDelegate d) const
158 {
159 return d(VIGRA_DELEGATE_ARGS);
160 }
161 };
156 template <class TDelegate>
157 R operator()(TDelegate d) const
158 {
159 return d(VIGRA_DELEGATE_ARGS);
160 }
161 };
162162 }
163163
164164 #undef VIGRA_DELEGATE_CLASS_NAME
2828 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
2929 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
3030 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
3232 /* */
3333 /************************************************************************/
34
35
34
35
3636 #ifndef VIGRA_DISTANCETRANSFORM_HXX
3737 #define VIGRA_DISTANCETRANSFORM_HXX
3838
4343 namespace vigra {
4444
4545 /*
46 * functors to determine the distance norm
46 * functors to determine the distance norm
4747 * these functors assume that dx and dy are positive
4848 * (this is OK for use in internalDistanceTransform())
4949 */
50
50
5151 // chessboard metric
5252 struct InternalDistanceTransformLInifinityNormFunctor
5353 {
8080 class DestImageIterator, class DestAccessor,
8181 class ValueType, class NormFunctor>
8282 void
83 internalDistanceTransform(SrcImageIterator src_upperleft,
83 internalDistanceTransform(SrcImageIterator src_upperleft,
8484 SrcImageIterator src_lowerright, SrcAccessor sa,
8585 DestImageIterator dest_upperleft, DestAccessor da,
8686 ValueType background, NormFunctor norm)
8787 {
88 int w = src_lowerright.x - src_upperleft.x;
89 int h = src_lowerright.y - src_upperleft.y;
90
88 int w = src_lowerright.x - src_upperleft.x;
89 int h = src_lowerright.y - src_upperleft.y;
90
9191 FImage xdist(w,h), ydist(w,h);
92
92
9393 xdist = (FImage::value_type)w; // init x and
9494 ydist = (FImage::value_type)h; // y distances with 'large' values
9595
101101 DestImageIterator rx = ry;
102102 FImage::Iterator xdx = xdy;
103103 FImage::Iterator ydx = ydy;
104
104
105105 const Diff2D left(-1, 0);
106106 const Diff2D right(1, 0);
107107 const Diff2D top(0, -1);
108108 const Diff2D bottom(0, 1);
109
109
110110 int x,y;
111111 if(sa(sx) != background) // first pixel
112112 {
118118 {
119119 da.set(norm(*xdx, *ydx), rx);
120120 }
121
122
123 for(x=1, ++xdx.x, ++ydx.x, ++sx.x, ++rx.x;
124 x<w;
121
122
123 for(x=1, ++xdx.x, ++ydx.x, ++sx.x, ++rx.x;
124 x<w;
125125 ++x, ++xdx.x, ++ydx.x, ++sx.x, ++rx.x) // first row left to right
126126 {
127127 if(sa(sx) != background)
137137 da.set(norm(*xdx, *ydx), rx); // calculate distance from x and y components
138138 }
139139 }
140 for(x=w-2, xdx.x -= 2, ydx.x -= 2, sx.x -= 2, rx.x -= 2;
141 x>=0;
140 for(x=w-2, xdx.x -= 2, ydx.x -= 2, sx.x -= 2, rx.x -= 2;
141 x>=0;
142142 --x, --xdx.x, --ydx.x, --sx.x, --rx.x) // first row right to left
143143 {
144144 float d = norm(xdx[right] + 1.0f, ydx[right]);
145
145
146146 if(da(rx) < d) continue;
147
147
148148 *xdx = xdx[right] + 1.0f;
149149 *ydx = ydx[right];
150150 da.set(d, rx);
151151 }
152 for(y=1, ++xdy.y, ++ydy.y, ++sy.y, ++ry.y;
152 for(y=1, ++xdy.y, ++ydy.y, ++sy.y, ++ry.y;
153153 y<h;
154154 ++y, ++xdy.y, ++ydy.y, ++sy.y, ++ry.y) // top to bottom
155155 {
157157 rx = ry;
158158 xdx = xdy;
159159 ydx = ydy;
160
160
161161 if(sa(sx) != background) // first pixel of current row
162162 {
163163 *xdx = 0.0f;
170170 *ydx = ydx[top] + 1.0f;
171171 da.set(norm(*xdx, *ydx), rx);
172172 }
173
174 for(x=1, ++xdx.x, ++ydx.x, ++sx.x, ++rx.x;
175 x<w;
173
174 for(x=1, ++xdx.x, ++ydx.x, ++sx.x, ++rx.x;
175 x<w;
176176 ++x, ++xdx.x, ++ydx.x, ++sx.x, ++rx.x) // current row left to right
177177 {
178178 if(sa(sx) != background)
185185 {
186186 float d1 = norm(xdx[left] + 1.0f, ydx[left]);
187187 float d2 = norm(xdx[top], ydx[top] + 1.0f);
188
188
189189 if(d1 < d2)
190190 {
191191 *xdx = xdx[left] + 1.0f;
200200 }
201201 }
202202 }
203 for(x=w-2, xdx.x -= 2, ydx.x -= 2, sx.x -= 2, rx.x -= 2;
204 x>=0;
203 for(x=w-2, xdx.x -= 2, ydx.x -= 2, sx.x -= 2, rx.x -= 2;
204 x>=0;
205205 --x, --xdx.x, --ydx.x, --sx.x, --rx.x) // current row right to left
206206 {
207207 float d1 = norm(xdx[right] + 1.0f, ydx[right]);
208
208
209209 if(da(rx) < d1) continue;
210
210
211211 *xdx = xdx[right] + 1.0f;
212212 *ydx = ydx[right];
213213 da.set(d1, rx);
214214 }
215215 }
216 for(y=h-2, xdy.y -= 2, ydy.y -= 2, sy.y -= 2, ry.y -= 2;
216 for(y=h-2, xdy.y -= 2, ydy.y -= 2, sy.y -= 2, ry.y -= 2;
217217 y>=0;
218218 --y, --xdy.y, --ydy.y, --sy.y, --ry.y) // bottom to top
219219 {
221221 rx = ry;
222222 xdx = xdy;
223223 ydx = ydy;
224
224
225225 float d = norm(xdx[bottom], ydx[bottom] + 1.0f);
226226 if(d < da(rx)) // first pixel of current row
227 {
227 {
228228 *xdx = xdx[bottom];
229229 *ydx = ydx[bottom] + 1.0f;
230230 da.set(d, rx);
231231 }
232
233 for(x=1, ++xdx.x, ++ydx.x, ++sx.x, ++rx.x;
232
233 for(x=1, ++xdx.x, ++ydx.x, ++sx.x, ++rx.x;
234234 x<w;
235235 ++x, ++xdx.x, ++ydx.x, ++sx.x, ++rx.x) // current row left to right
236236 {
237237 float d1 = norm(xdx[left] + 1.0f, ydx[left]);
238238 float d2 = norm(xdx[bottom], ydx[bottom] + 1.0f);
239
239
240240 if(d1 < d2)
241241 {
242242 if(da(rx) < d1) continue;
252252 da.set(d2, rx);
253253 }
254254 }
255 for(x=w-2, xdx.x -= 2, ydx.x -= 2, sx.x -= 2, rx.x -= 2;
256 x>=0;
255 for(x=w-2, xdx.x -= 2, ydx.x -= 2, sx.x -= 2, rx.x -= 2;
256 x>=0;
257257 --x, --xdx.x, --ydx.x, --sx.x, --rx.x) // current row right to left
258258 {
259259 float d1 = norm(xdx[right] + 1.0f, ydx[right]);
273273 /********************************************************/
274274
275275 /** \addtogroup DistanceTransform Distance Transform
276 Perform a distance transform using either the Euclidean, Manhattan,
277 or chessboard metrics.
278
279 See also: \ref MultiArrayDistanceTransform "multi-dimensional distance transforms"
280276 */
281277 //@{
282278
283 /** For all background pixels, calculate the distance to
284 the nearest object or contour. The label of the pixels to be considered
279 /** For all background pixels, calculate the distance to
280 the nearest object or contour. The label of the pixels to be considered
285281 background in the source image is passed in the parameter 'background'.
286 Source pixels with other labels will be considered objects. In the
287 destination image, all pixels corresponding to background will be assigned
282 Source pixels with other labels will be considered objects. In the
283 destination image, all pixels corresponding to background will be assigned
288284 the their distance value, all pixels corresponding to objects will be
289285 assigned 0.
290
286
291287 The parameter 'norm' gives the distance norm to be used:
292
288
293289 <ul>
294290
295291 <li> norm == 0: use chessboard distance (L-infinity norm)
296292 <li> norm == 1: use Manhattan distance (L1 norm)
297293 <li> norm == 2: use Euclidean distance (L2 norm)
298
294
299295 </ul>
300
296
301297 If you use the L2 norm, the destination pixels must be real valued to give
302298 correct results.
303
299
304300 <b> Declarations:</b>
305
301
306302 pass 2D array views:
307303 \code
308304 namespace vigra {
315311 ValueType background, int norm);
316312 }
317313 \endcode
318
314
319315 \deprecatedAPI{distanceTransform}
320316 pass \ref ImageIterators and \ref DataAccessors :
321317 \code
323319 template <class SrcImageIterator, class SrcAccessor,
324320 class DestImageIterator, class DestAccessor,
325321 class ValueType>
326 void distanceTransform(SrcImageIterator src_upperleft,
322 void distanceTransform(SrcImageIterator src_upperleft,
327323 SrcImageIterator src_lowerright, SrcAccessor sa,
328324 DestImageIterator dest_upperleft, DestAccessor da,
329325 ValueType background, int norm);
341337 }
342338 \endcode
343339 \deprecatedEnd
344
340
345341 <b> Usage:</b>
346
342
347343 <b>\#include</b> \<vigra/distancetransform.hxx\><br>
348344 Namespace: vigra
349345
354350
355351 // detect edges in src image (edges will be marked 1, background 0)
356352 differenceOfExponentialEdgeImage(src, edges, 0.8, 4.0);
357
353
358354 // find distance of all pixels from nearest edge
359355 distanceTransform(edges, distance, 0, 2);
360356 // ^ background label ^ norm (Euclidean)
362358
363359 \deprecatedUsage{distanceTransform}
364360 \code
365
361
366362 vigra::BImage src(w,h), edges(w,h);
367363 vigra::FImage distance(w, h);
368364
371367 ...
372368
373369 // detect edges in src image (edges will be marked 1, background 0)
374 vigra::differenceOfExponentialEdgeImage(srcImageRange(src), destImage(edges),
370 vigra::differenceOfExponentialEdgeImage(srcImageRange(src), destImage(edges),
375371 0.8, 4.0);
376
372
377373 // find distance of all pixels from nearest edge
378374 vigra::distanceTransform(srcImageRange(edges), destImage(distance),
379375 0, 2);
381377 \endcode
382378 <b> Required Interface:</b>
383379 \code
384
380
385381 SrcImageIterator src_upperleft, src_lowerright;
386382 DestImageIterator dest_upperleft;
387
383
388384 SrcAccessor sa;
389385 DestAccessor da;
390
386
391387 ValueType background;
392388 float distance;
393
389
394390 sa(src_upperleft) != background;
395391 da(dest_upperleft) < distance;
396392 da.set(distance, dest_upperleft);
397
393
398394 \endcode
399395 \deprecatedEnd
400396 */
404400 class DestImageIterator, class DestAccessor,
405401 class ValueType>
406402 inline void
407 distanceTransform(SrcImageIterator src_upperleft,
403 distanceTransform(SrcImageIterator src_upperleft,
408404 SrcImageIterator src_lowerright, SrcAccessor sa,
409405 DestImageIterator dest_upperleft, DestAccessor da,
410406 ValueType background, int norm)
5252 {
5353
5454
55 template <class Graph, class WeightType,
55 template <class Graph, class WeightType,
5656 class EdgeMap, class Shape>
57 TinyVector<MultiArrayIndex, Shape::static_size>
57 TinyVector<MultiArrayIndex, Shape::static_size>
5858 eccentricityCentersOneRegionImpl(ShortestPathDijkstra<Graph, WeightType> & pathFinder,
5959 const EdgeMap & weights, WeightType maxWeight,
6060 Shape anchor, Shape const & start, Shape const & stop)
6666 anchor = pathFinder.target();
6767 // FIXME: implement early stopping when source and target don't change anymore
6868 }
69
69
7070 Polygon<TinyVector<float, Shape::static_size> > path;
7171 path.push_back_unsafe(anchor);
7272 while(pathFinder.predecessors()[path.back()] != path.back())
7676
7777 template <unsigned int N, class T, class S, class Graph,
7878 class ACCUMULATOR, class DIJKSTRA, class Array>
79 void
79 void
8080 eccentricityCentersImpl(const MultiArrayView<N, T, S> & src,
8181 Graph const & g,
8282 ACCUMULATOR const & r,
8888 typedef typename Graph::Node Node;
8989 typedef typename Graph::EdgeIt EdgeIt;
9090 typedef float WeightType;
91
91
9292 typename Graph::template EdgeMap<WeightType> weights(g);
9393 WeightType maxWeight = 0.0,
9494 minWeight = N;
9595 {
9696 AccumulatorChainArray<CoupledArrays<N, WeightType, T>,
9797 Select< DataArg<1>, LabelArg<2>, Maximum> > a;
98
98
9999 MultiArray<N, WeightType> distances(src.shape());
100100 boundaryMultiDistance(src, distances, true);
101101 extractFeatures(distances, src, a);
109109 }
110110 else
111111 {
112 WeightType weight = norm(u - v) *
112 WeightType weight = norm(u - v) *
113113 (get<Maximum>(a, label) + minWeight - 0.5*(distances[u] + distances[v]));
114114 weights[*edge] = weight;
115115 maxWeight = std::max(weight, maxWeight);
117117 }
118118 }
119119 maxWeight *= src.size();
120
120
121121 T maxLabel = r.maxRegionLabel();
122122 centers.resize(maxLabel+1);
123123
125125 {
126126 if(get<Count>(r, i) == 0)
127127 continue;
128 centers[i] = eccentricityCentersOneRegionImpl(pathFinder, weights, maxWeight,
129 get<RegionAnchor>(r, i),
128 centers[i] = eccentricityCentersOneRegionImpl(pathFinder, weights, maxWeight,
129 get<RegionAnchor>(r, i),
130130 get<Coord<Minimum> >(r, i),
131131 get<Coord<Maximum> >(r, i) + Shape(1));
132132 }
133133 }
134134
135 /** \addtogroup MultiArrayDistanceTransform
135 /** \addtogroup DistanceTransform
136136 */
137137 //@{
138138
139139 /** \brief Find the (approximate) eccentricity center in each region of a labeled image.
140
140
141141 <b> Declarations:</b>
142142
143143 pass arbitrary-dimensional array views:
144144 \code
145145 namespace vigra {
146146 template <unsigned int N, class T, class S, class Array>
147 void
147 void
148148 eccentricityCenters(MultiArrayView<N, T, S> const & src,
149149 Array & centers);
150150 }
151151 \endcode
152
152
153153 \param[in] src : labeled array
154 \param[out] centers : list of eccentricity centers (required interface:
155 <tt>centers[k] = TinyVector<int, N>()</tt> must be supported)
156
154 \param[out] centers : list of eccentricity centers (required interface:
155 <tt>centers[k] = TinyVector<int, N>()</tt> must be supported)
156
157157 <b> Usage:</b>
158158
159159 <b>\#include</b> \<vigra/eccentricitytransform.hxx\><br/>
169169 \endcode
170170 */
171171 template <unsigned int N, class T, class S, class Array>
172 void
172 void
173173 eccentricityCenters(const MultiArrayView<N, T, S> & src,
174174 Array & centers)
175175 {
176176 using namespace acc;
177177 typedef GridGraph<N> Graph;
178178 typedef float WeightType;
179
179
180180 Graph g(src.shape(), IndirectNeighborhood);
181181 ShortestPathDijkstra<Graph, WeightType> pathFinder(g);
182182
184184 Select< DataArg<1>, LabelArg<1>,
185185 Count, BoundingBox, RegionAnchor> > a;
186186 extractFeatures(src, a);
187
187
188188 eccentricityCentersImpl(src, g, a, pathFinder, centers);
189189 }
190190
191191 /** \brief Computes the (approximate) eccentricity transform on each region of a labeled image.
192
192
193193 <b> Declarations:</b>
194194
195195 pass arbitrary-dimensional array views:
197197 namespace vigra {
198198 // compute only the accentricity transform
199199 template <unsigned int N, class T, class S>
200 void
200 void
201201 eccentricityTransformOnLabels(MultiArrayView<N, T> const & src,
202202 MultiArrayView<N, S> dest);
203
203
204204 // also return the eccentricity center of each region
205205 template <unsigned int N, class T, class S, class Array>
206 void
206 void
207207 eccentricityTransformOnLabels(MultiArrayView<N, T> const & src,
208208 MultiArrayView<N, S> dest,
209209 Array & centers);
210210 }
211211 \endcode
212
212
213213 \param[in] src : labeled array
214214 \param[out] dest : eccentricity transform of src
215 \param[out] centers : (optional) list of eccentricity centers (required interface:
216 <tt>centers[k] = TinyVector<int, N>()</tt> must be supported)
217
215 \param[out] centers : (optional) list of eccentricity centers (required interface:
216 <tt>centers[k] = TinyVector<int, N>()</tt> must be supported)
217
218218 <b> Usage:</b>
219219
220220 <b>\#include</b> \<vigra/eccentricitytransform.hxx\><br/>
231231 \endcode
232232 */
233233 template <unsigned int N, class T, class S, class Array>
234 void
234 void
235235 eccentricityTransformOnLabels(MultiArrayView<N, T> const & src,
236236 MultiArrayView<N, S> dest,
237237 Array & centers)
242242 typedef typename Graph::Node Node;
243243 typedef typename Graph::EdgeIt EdgeIt;
244244 typedef float WeightType;
245
246 vigra_precondition(src.shape() == dest.shape(),
245
246 vigra_precondition(src.shape() == dest.shape(),
247247 "eccentricityTransformOnLabels(): Shape mismatch between src and dest.");
248
248
249249 Graph g(src.shape(), IndirectNeighborhood);
250250 ShortestPathDijkstra<Graph, WeightType> pathFinder(g);
251251
252 using namespace acc;
252 using namespace acc;
253253 AccumulatorChainArray<CoupledArrays<N, T>,
254254 Select< DataArg<1>, LabelArg<1>,
255255 Count, BoundingBox, RegionAnchor> > a;
256256 extractFeatures(src, a);
257
257
258258 eccentricityCentersImpl(src, g, a, pathFinder, centers);
259259
260260 typename Graph::template EdgeMap<WeightType> weights(g);
276276 }
277277
278278 template <unsigned int N, class T, class S>
279 inline void
279 inline void
280280 eccentricityTransformOnLabels(MultiArrayView<N, T> const & src,
281281 MultiArrayView<N, S> dest)
282282 {
203203 {}
204204 };
205205
206 //#ifndef NDEBUG
207
208 #if 1
209
210206 inline
211207 void throw_invariant_error(bool predicate, char const * message, char const * file, int line)
212208 {
267263
268264 #define vigra_precondition(PREDICATE, MESSAGE) vigra::throw_precondition_error((PREDICATE), MESSAGE, __FILE__, __LINE__)
269265
270 #define vigra_assert(PREDICATE, MESSAGE) vigra_precondition(PREDICATE, MESSAGE)
266 // Compile assertions only in debug mode
267 #ifdef NDEBUG
268 #define vigra_assert(PREDICATE, MESSAGE)
269 #else
270 #define vigra_assert(PREDICATE, MESSAGE) vigra_precondition(PREDICATE, MESSAGE)
271 #endif
271272
272273 #define vigra_postcondition(PREDICATE, MESSAGE) vigra::throw_postcondition_error((PREDICATE), MESSAGE, __FILE__, __LINE__)
273274
275276
276277 #define vigra_fail(MESSAGE) vigra::throw_runtime_error(MESSAGE, __FILE__, __LINE__)
277278
278 #else // NDEBUG
279
280 inline
281 void throw_invariant_error(bool predicate, char const * message)
282 {
283 if(!predicate)
284 throw vigra::InvariantViolation(message);
285 }
286
287 inline
288 void throw_precondition_error(bool predicate, char const * message)
289 {
290 if(!predicate)
291 throw vigra::PreconditionViolation(message);
292 }
293
294 inline
295 void throw_postcondition_error(bool predicate, char const * message)
296 {
297 if(!predicate)
298 throw vigra::PostconditionViolation(message);
299 }
300
301 inline
302 void throw_invariant_error(bool predicate, std::string message)
303 {
304 if(!predicate)
305 throw vigra::InvariantViolation(message.c_str());
306 }
307
308 inline
309 void throw_precondition_error(bool predicate, std::string message)
310 {
311 if(!predicate)
312 throw vigra::PreconditionViolation(message.c_str());
313 }
314
315 inline
316 void throw_postcondition_error(bool predicate, std::string message)
317 {
318 if(!predicate)
319 throw vigra::PostconditionViolation(message.c_str());
320 }
321
322 #define vigra_precondition(PREDICATE, MESSAGE) vigra::throw_precondition_error((PREDICATE), MESSAGE)
323
324 #define vigra_assert(PREDICATE, MESSAGE)
325
326 #define vigra_postcondition(PREDICATE, MESSAGE) vigra::throw_postcondition_error((PREDICATE), MESSAGE)
327
328 #define vigra_invariant(PREDICATE, MESSAGE) vigra::throw_invariant_error((PREDICATE), MESSAGE)
329
330 #define vigra_fail(MESSAGE) throw std::runtime_error(MESSAGE)
331
332 #endif // NDEBUG
333
334279 } // namespace vigra
335280
336281 #endif // VIGRA_ERROR_HXX
692692 {}
693693
694694 template<class Other>
695 FFTWAllocator(const FFTWAllocator<Other>& right) throw()
695 FFTWAllocator(const FFTWAllocator<Other>& /*right*/) throw()
696696 {}
697697
698698 template<class Other>
699 FFTWAllocator& operator=(const FFTWAllocator<Other>& right)
699 FFTWAllocator& operator=(const FFTWAllocator<Other>& /*right*/)
700700 {
701701 return *this;
702702 }
706706 return (pointer)fftw_malloc(count * sizeof(Ty));
707707 }
708708
709 void deallocate(pointer ptr, size_type count)
709 void deallocate(pointer ptr, size_type /*count*/)
710710 {
711711 fftw_free(ptr);
712712 }
760760 {}
761761
762762 template<class Other>
763 allocator(const allocator<Other>& right) throw()
763 allocator(const allocator<Other>& /*right*/) throw()
764764 {}
765765
766766 template<class Other>
767 allocator& operator=(const allocator<Other>& right)
767 allocator& operator=(const allocator<Other>& /*right*/)
768768 {
769769 return *this;
770770 }
23992399 void applyFourierFilterFamilyImpl(
24002400 FFTWComplexImage::const_traverser srcUpperLeft,
24012401 FFTWComplexImage::const_traverser srcLowerRight,
2402 FFTWComplexImage::ConstAccessor sa,
2402 FFTWComplexImage::ConstAccessor /*sa*/,
24032403 const ImageArray<FilterType> &filters,
24042404 ImageArray<DestImage> &results)
24052405 {
0 /************************************************************************/
1 /* */
2 /* Copyright 2014-2015 by Ullrich Koethe and Philip Schill */
3 /* */
4 /* This file is part of the VIGRA computer vision library. */
5 /* The VIGRA Website is */
6 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
7 /* Please direct questions, bug reports, and contributions to */
8 /* ullrich.koethe@iwr.uni-heidelberg.de or */
9 /* vigra@informatik.uni-hamburg.de */
10 /* */
11 /* Permission is hereby granted, free of charge, to any person */
12 /* obtaining a copy of this software and associated documentation */
13 /* files (the "Software"), to deal in the Software without */
14 /* restriction, including without limitation the rights to use, */
15 /* copy, modify, merge, publish, distribute, sublicense, and/or */
16 /* sell copies of the Software, and to permit persons to whom the */
17 /* Software is furnished to do so, subject to the following */
18 /* conditions: */
19 /* */
20 /* The above copyright notice and this permission notice shall be */
21 /* included in all copies or substantial portions of the */
22 /* Software. */
23 /* */
24 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
25 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
26 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
27 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
28 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
29 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
30 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
32 /* */
33 /************************************************************************/
34 #ifndef VIGRA_FILTER_ITERATOR_HXX
35 #define VIGRA_FILTER_ITERATOR_HXX
36
37 #include <type_traits>
38 #include <iterator>
39
40 #include "iteratorfacade.hxx"
41
42 namespace vigra
43 {
44
45 namespace detail
46 {
47 template <typename T>
48 struct is_const_pointer
49 {
50 static bool const value = false;
51 };
52
53 template <typename T>
54 struct is_const_pointer<T const *>
55 {
56 static bool const value = true;
57 };
58
59 template <typename ITER>
60 struct is_const_iterator
61 {
62 typedef typename std::iterator_traits<ITER>::pointer pointer;
63 static bool const value = is_const_pointer<pointer>::value;
64 };
65 }
66
67
68 /********************************************************/
69 /* */
70 /* FilterIterator */
71 /* */
72 /********************************************************/
73
74 /**
75 * @brief This iterator creates a view of another iterator and skips elements that
76 * do not fulfill a given predicate.
77 *
78 * The iterator is compatible to an STL forward iterator as defined in the C++ standard.
79 *
80 * @note The equality comparison only checks, whether the iterators point to the same place. The predicate is not checked.
81 */
82 template <typename PREDICATE, typename ITER>
83 class FilterIterator
84 : public ForwardIteratorFacade<FilterIterator<PREDICATE, ITER>,
85 typename std::iterator_traits<ITER>::value_type,
86 detail::is_const_iterator<ITER>::value>
87 {
88 public:
89
90 typedef PREDICATE Predicate;
91 typedef ITER Iter;
92 typedef typename std::iterator_traits<Iter>::value_type IterValueType;
93 typedef FilterIterator<Predicate, Iter> SelfType;
94 typedef ForwardIteratorFacade<SelfType,
95 IterValueType,
96 detail::is_const_iterator<ITER>::value> Parent;
97 typedef typename Parent::value_type value_type;
98 typedef typename Parent::reference reference;
99 typedef reference const const_reference;
100
101 /// Construct a filter iterator with the given predicate for
102 /// a base iterator range \a iter to \a end.
103 FilterIterator(Predicate pred, Iter iter, Iter end = Iter())
104 :
105 pred_(pred),
106 iter_(iter),
107 end_(end)
108 {
109 satisfy_predicate();
110 }
111
112 private:
113
114 void satisfy_predicate()
115 {
116 while (iter_ != end_ && !pred_(*iter_))
117 ++iter_;
118 }
119
120 void increment()
121 {
122 ++iter_;
123 satisfy_predicate();
124 }
125
126 reference dereference() const
127 {
128 return *iter_;
129 }
130
131 bool equal(FilterIterator const & other) const
132 {
133 return iter_ == other.iter_;
134 }
135
136 Predicate pred_;
137 Iter iter_;
138 Iter end_;
139
140 friend class vigra::IteratorFacadeCoreAccess;
141
142 };
143
144 template <typename PREDICATE, typename ITER>
145 FilterIterator<PREDICATE, ITER>
146 make_filter_iterator(PREDICATE pred, ITER iter, ITER end = ITER())
147 {
148 return FilterIterator<PREDICATE, ITER>(pred, iter, end);
149 }
150
151
152
153 } // namespace vigra
154
155
156
157 #endif
17061706
17071707 /************************************************************/
17081708
1709 #ifdef __GNUC__
1710 #pragma GCC diagnostic push
1711 #pragma GCC diagnostic ignored "-Wsign-compare"
1712 #endif
1713
17091714 MAKE_FUNCTOR_BINARY_OPERATOR_BOOL(equals, ==)
17101715 MAKE_FUNCTOR_BINARY_OPERATOR_BOOL(differs, !=)
17111716 MAKE_FUNCTOR_BINARY_OPERATOR_BOOL(less, <)
17151720 MAKE_FUNCTOR_BINARY_OPERATOR_BOOL(and, &&)
17161721 MAKE_FUNCTOR_BINARY_OPERATOR_BOOL(or, ||)
17171722
1723 #ifdef __GNUC__
1724 #pragma GCC diagnostic pop
1725 #endif
1726
17181727 #undef MAKE_FUNCTOR_BINARY_OPERATOR_BOOL
17191728
17201729 /************************************************************/
214214
215215 template<unsigned int DIM, class DTAG, class AFF_EDGES>
216216 size_t affiliatedEdgesSerializationSize(
217 const GridGraph<DIM,DTAG> & gridGraph,
217 const GridGraph<DIM,DTAG> &,
218218 const AdjacencyListGraph & rag,
219219 const AFF_EDGES & affEdges
220220 ){
233233
234234 template<class OUT_ITER, unsigned int DIM, class DTAG, class AFF_EDGES>
235235 void serializeAffiliatedEdges(
236 const GridGraph<DIM,DTAG> & gridGraph,
236 const GridGraph<DIM,DTAG> &,
237237 const AdjacencyListGraph & rag,
238238 const AFF_EDGES & affEdges,
239239 OUT_ITER outIter
260260
261261 template<class IN_ITER, unsigned int DIM, class DTAG, class AFF_EDGES>
262262 void deserializeAffiliatedEdges(
263 const GridGraph<DIM,DTAG> & gridGraph,
263 const GridGraph<DIM,DTAG> &,
264264 const AdjacencyListGraph & rag,
265265 AFF_EDGES & affEdges,
266266 IN_ITER begin,
267 IN_ITER end
267 IN_ITER
268268 ){
269269
270270 typedef typename AdjacencyListGraph::EdgeIt EdgeIt;
723723 for(NodeIt n(graph);n!=lemon::INVALID;++n){
724724 Node node(*n);
725725 if(seeds[node]==0){
726 int label = 0 ;
727726 Node pred=predMap[node];
728727 while(seeds[pred]==0){
729728 pred=predMap[pred];
736735 namespace detail_watersheds_segmentation{
737736
738737 struct RawPriorityFunctor{
739 template<class L, class T>
740 T operator()(const L label,const T priority)const{
738 template<class LabelType, class T>
739 T operator()(const LabelType /*label*/,const T priority)const{
741740 return priority;
742741 }
743742 };
980979 nodeSizeAcc[newRepNode] = sizeRu+sizeRv;
981980 }
982981 }
983 if(nodeNum==nodeNumStopCond){
982 if(nodeNumStopCond >= 0 && nodeNum==static_cast<size_t>(nodeNumStopCond)){
984983 break;
985984 }
986985 }
988987 break;
989988 }
990989 else{
991 if(nodeNum>nodeNumStopCond){
990 if(nodeNumStopCond >= 0 && nodeNum>static_cast<size_t>(nodeNumStopCond)){
992991 k *= 1.2f;
993992 }
994993 else{
12011200 const BASE_GRAPH_RAG_LABELS & baseGraphRagLabels,
12021201 const BASE_GRAPH_GT & baseGraphGt,
12031202 RAG_GT & ragGt,
1204 RAG_GT_QT & ragGtQt
1203 RAG_GT_QT &
12051204 ){
12061205 typedef typename BASE_GRAPH::Node BaseGraphNode;
12071206 typedef typename BASE_GRAPH::NodeIt BaseGraphNodeIt;
5050
5151 template<class MAP>
5252 struct GraphMapTypeTraits{
53 typedef typename MAP::Value Value;
54 typedef typename MAP::Reference Reference;
55 typedef typename MAP::ConstReference ConstReference;
53 typedef typename MAP::value_type Value;
54 typedef typename MAP::reference Reference;
55 typedef typename MAP::const_reference ConstReference;
5656 };
5757
5858 // generalizes the iterator begin end accessed
7979 }
8080
8181
82 static NodeIt nodesEnd(const Graph & g){ return NodeIt(lemon::INVALID);}
83 static EdgeIt edgesEnd(const Graph & g){ return EdgeIt(lemon::INVALID);}
84 static ArcIt arcsEnd( const Graph & g){ return ArcIt( lemon::INVALID);}
85 static OutArcIt outArcEnd(const Graph & g,const Node & node){
82 static NodeIt nodesEnd(const Graph &){ return NodeIt(lemon::INVALID);}
83 static EdgeIt edgesEnd(const Graph &){ return EdgeIt(lemon::INVALID);}
84 static ArcIt arcsEnd( const Graph &){ return ArcIt( lemon::INVALID);}
85 static OutArcIt outArcEnd(const Graph &,const Node &){
8686 return OutArcIt(lemon::INVALID);
8787 }
88 static IncEdgeIt incEdgeEnd(const Graph & g,const Node & node){
88 static IncEdgeIt incEdgeEnd(const Graph &,const Node &){
8989 return IncEdgeIt(lemon::INVALID);
9090 }
9191 };
119119 static OutArcIt outArcEnd(const Graph & g,const Node & node){
120120 return g.get_out_edge_end_iterator(node);
121121 }
122 static IncEdgeIt incEdgeEnd(const Graph & g,const Node & node){
122 static IncEdgeIt incEdgeEnd(const Graph &,const Node &){
123123 return IncEdgeIt(lemon::INVALID);
124124 }
125125 };
230230 typedef typename IntrinsicGraphShape<Graph>::IntrinsicArcMapShape IntrinsicArcMapShape;
231231
232232
233 static Node intrinsicNodeCoordinate(const Graph & g,const Node & node){
233 static Node intrinsicNodeCoordinate(const Graph &,const Node & node){
234234 return node;
235235 }
236 static Edge intrinsicEdgeCoordinate(const Graph & g,const Edge & edge){
236 static Edge intrinsicEdgeCoordinate(const Graph &,const Edge & edge){
237237 return edge;
238238 }
239 static Arc intrinsicArcCoordinate (const Graph & g,const Arc & arc){
239 static Arc intrinsicArcCoordinate (const Graph &,const Arc & arc){
240240 return arc;
241241 }
242242
4242 typedef typename GRAPH::NodeStorage::AdjacencyElement AdjacencyElement;
4343
4444 static bool valid(
45 const GRAPH & g,
46 const AdjacencyElement & adj,
47 const typename GRAPH::index_type ownNodeId
45 const GRAPH &,
46 const AdjacencyElement &,
47 const typename GRAPH::index_type /*ownNodeId*/
4848 ){
4949 return true;
5050 }
5353 static ResultType transform(
5454 const GRAPH & g,
5555 const AdjacencyElement & adj,
56 const typename GRAPH::index_type ownNodeId
56 const typename GRAPH::index_type /*ownNodeId*/
5757 ){
5858 return g.nodeFromId(adj.nodeId());
5959 }
6767 typedef typename GRAPH::NodeStorage::AdjacencyElement AdjacencyElement;
6868
6969 static bool valid(
70 const GRAPH & g,
71 const AdjacencyElement & adj,
72 const typename GRAPH::index_type ownNodeId
70 const GRAPH &,
71 const AdjacencyElement &,
72 const typename GRAPH::index_type /*ownNodeId*/
7373 ){
7474 return true;
7575 }
7777 static ResultType transform(
7878 const GRAPH & g,
7979 const AdjacencyElement & adj,
80 const typename GRAPH::index_type ownNodeId
80 const typename GRAPH::index_type /*ownNodeId*/
8181 ){
8282 return g.edgeFromId(adj.edgeId());
8383 }
9191 typedef typename GRAPH::NodeStorage::AdjacencyElement AdjacencyElement;
9292
9393 static bool valid(
94 const GRAPH & g,
94 const GRAPH &,
9595 const AdjacencyElement & adj,
9696 const typename GRAPH::index_type ownNodeId
9797 ){
101101 static ResultType transform(
102102 const GRAPH & g,
103103 const AdjacencyElement & adj,
104 const typename GRAPH::index_type ownNodeId
104 const typename GRAPH::index_type /*ownNodeId*/
105105 ){
106106 return g.edgeFromId(adj.edgeId());
107107 }
114114 typedef typename GRAPH::NodeStorage::AdjacencyElement AdjacencyElement;
115115
116116 static bool valid(
117 const GRAPH & g,
117 const GRAPH &,
118118 const AdjacencyElement & adj,
119119 const typename GRAPH::index_type ownNodeId
120120 ){
136136 typedef typename GRAPH::NodeStorage::AdjacencyElement AdjacencyElement;
137137
138138 static bool valid(
139 const GRAPH & g,
140 const AdjacencyElement & adj,
141 const typename GRAPH::index_type ownNodeId
139 const GRAPH &,
140 const AdjacencyElement &,
141 const typename GRAPH::index_type /*ownNodeId*/
142142 ){
143143 return true;
144144 }
161161 typedef typename GRAPH::NodeStorage::AdjacencyElement AdjacencyElement;
162162
163163 static bool valid(
164 const GRAPH & g,
165 const AdjacencyElement & adj,
166 const typename GRAPH::index_type ownNodeId
164 const GRAPH &,
165 const AdjacencyElement &,
166 const typename GRAPH::index_type /*ownNodeId*/
167167 ){
168168 return true;
169169 }
170170 ResultType static transform(
171171 const GRAPH & g,
172172 const AdjacencyElement & adj,
173 const typename GRAPH::index_type ownNodeId
173 const typename GRAPH::index_type /*ownNodeId*/
174174 ){
175175 return g.direct(g.edgeFromId(adj.edgeId()) ,g.nodeFromId(adj.nodeId()));
176176 }
195195 //typedef typename GraphItemHelper<GRAPH,typename FILTER::ResultType> ResultItem
196196
197197 // default constructor
198 GenericIncEdgeIt(const lemon::Invalid & invalid = lemon::INVALID)
198 GenericIncEdgeIt(const lemon::Invalid & /*invalid*/ = lemon::INVALID)
199199 : nodeImpl_(NULL),
200200 graph_(NULL),
201201 ownNodeId_(-1),
332332 typedef typename SetType::const_iterator AdjIt;
333333 public:
334334
335 GenericNodeImpl(const lemon::Invalid iv=lemon::INVALID)
335 GenericNodeImpl(const lemon::Invalid /*iv*/=lemon::INVALID)
336336 : id_(-1){
337337 }
338338
405405 public:
406406 typedef INDEX_TYPE index_type;
407407
408 GenericEdgeImpl(const lemon::Invalid iv=lemon::INVALID)
408 GenericEdgeImpl(const lemon::Invalid /*iv*/=lemon::INVALID)
409409 : vigra::TinyVector<INDEX_TYPE,3>(-1){
410410 }
411411
429429 public:
430430 typedef INDEX_TYPE index_type;
431431
432 GenericArc(const lemon::Invalid & iv = lemon::INVALID)
432 GenericArc(const lemon::Invalid & /*iv*/ = lemon::INVALID)
433433 : id_(-1),
434434 edgeId_(-1){
435435
477477 public:
478478 typedef INDEX_TYPE index_type;
479479
480 GenericEdge(const lemon::Invalid & iv = lemon::INVALID)
480 GenericEdge(const lemon::Invalid & /*iv*/ = lemon::INVALID)
481481 : id_(-1){
482482
483483 }
523523 public:
524524 typedef INDEX_TYPE index_type;
525525
526 GenericNode(const lemon::Invalid & iv = lemon::INVALID)
526 GenericNode(const lemon::Invalid & /*iv*/ = lemon::INVALID)
527527 : id_(-1){
528528
529529 }
119119 : DenseReferenceMapType(ItemHelper::itemNum(g)==0 ? 0: ItemHelper::maxItemId(g) ){
120120
121121 }
122 DenseGraphItemReferenceMap(const Graph & g,typename DenseReferenceMapType::ConstReference value)
122 DenseGraphItemReferenceMap(const Graph & g,typename DenseReferenceMapType::ConstReference)
123123 : DenseReferenceMapType(ItemHelper::itemNum(g)==0 ? 0: ItemHelper::maxItemId(g)){
124124
125125 }
251251 ZeroNodeMap()
252252 {}
253253
254 value_type operator[](const Key & key) const
254 value_type operator[](const Key &) const
255255 {
256256 return value_type();
257257 }
00 /************************************************************************/
11 /* */
2 /* Copyright 2011-2012 Stefan Schmidt and Ullrich Koethe */
2 /* Copyright 2011-2015 by Stefan Schmidt, Philip Schill and */
3 /* Ullrich Koethe */
34 /* */
45 /* This file is part of the VIGRA computer vision library. */
56 /* The VIGRA Website is */
4142 #ifndef VIGRA_GRAPH_HXX
4243 #define VIGRA_GRAPH_HXX
4344
45 #include <stdexcept>
46 #include <vector>
47 #include <map>
48
4449 #include "metaprogramming.hxx"
4550 #include "tinyvector.hxx"
51 #include "filter_iterator.hxx"
4652
4753 #ifdef WITH_BOOST_GRAPH
4854
297303
298304 }} // namespace vigra::lemon_graph
299305
306
307
308 namespace vigra {
309 namespace detail {
310
311 template <typename INDEXTYPE>
312 class NodeDescriptor
313 {
314 public:
315 typedef INDEXTYPE index_type;
316 NodeDescriptor(lemon::Invalid = lemon::INVALID)
317 : id_(-1)
318 {}
319 explicit NodeDescriptor(index_type const & id)
320 : id_(id)
321 {}
322 bool operator!=(NodeDescriptor const & other) const
323 {
324 return id_ != other.id_;
325 }
326 bool operator==(NodeDescriptor const & other) const
327 {
328 return id_ == other.id_;
329 }
330 bool operator<(NodeDescriptor const & other) const
331 {
332 return id_ < other.id_;
333 }
334 index_type id() const
335 {
336 return id_;
337 }
338 protected:
339 index_type id_;
340 };
341
342 template <typename INDEXTYPE>
343 std::ostream & operator << (std::ostream & os, NodeDescriptor<INDEXTYPE> const & item)
344 {
345 return os << item.id();
346 }
347
348 template <typename INDEXTYPE>
349 class ArcDescriptor
350 {
351 public:
352 typedef INDEXTYPE index_type;
353 ArcDescriptor(lemon::Invalid = lemon::INVALID)
354 : id_(-1)
355 {}
356 explicit ArcDescriptor(index_type const & id)
357 : id_(id)
358 {}
359 bool operator!=(ArcDescriptor const & other) const
360 {
361 return id_ != other.id_;
362 }
363 bool operator==(ArcDescriptor const & other) const
364 {
365 return id_ == other.id_;
366 }
367 bool operator<(ArcDescriptor const & other) const
368 {
369 return id_ < other.id_;
370 }
371 index_type id() const
372 {
373 return id_;
374 }
375 protected:
376 index_type id_;
377 };
378
379 template <typename INDEXTYPE>
380 std::ostream & operator << (std::ostream & os, ArcDescriptor<INDEXTYPE> const & item)
381 {
382 return os << item.id();
383 }
384
385 } // namespace detail
386
387
388
389 enum ContainerTag
390 {
391 MapTag,
392 VectorTag,
393 IndexVectorTag
394 };
395
396 /**
397 * @brief The PropertyMap is used to store Node or Arc information of graphs.
398 *
399 * @tparam <KEYTYPE> the key type
400 * @tparam <MAPPEDTYPE> the mapped type
401 * @tparam <ContainerTag = MapTag> whether to use a map or a vector as underlying storage
402 *
403 * @note
404 * In contrast to std::map, operator[] does not insert elements. Use insert() instead.
405 * If ContainerTag == MapTag: at() and operator[] behave like std::map::at().
406 * If ContainerTag == IndexVectorTag: at() behaves like std::map::at(). operator[] does not check if the key exists.
407 */
408 template <typename KEYTYPE, typename MAPPEDTYPE, ContainerTag = MapTag >
409 class PropertyMap
410 {
411 public:
412 typedef KEYTYPE key_type;
413 typedef MAPPEDTYPE mapped_type;
414 typedef std::pair<key_type const, mapped_type> value_type;
415 typedef value_type & reference;
416 typedef value_type const & const_reference;
417 typedef std::map<key_type, mapped_type> Map;
418 typedef typename Map::iterator iterator;
419 typedef typename Map::const_iterator const_iterator;
420
421 mapped_type & at(key_type const & k)
422 {
423 return map_.at(k);
424 }
425 mapped_type const & at(key_type const & k) const
426 {
427 return map_.at(k);
428 }
429 mapped_type & operator[](key_type const & k)
430 {
431 return map_.at(k);
432 }
433 mapped_type const & operator[](key_type const & k) const
434 {
435 return map_.at(k);
436 }
437 void insert(key_type const & k, mapped_type const & v)
438 {
439 map_[k] = v;
440 }
441 iterator begin()
442 {
443 return map_.begin();
444 }
445 const_iterator begin() const
446 {
447 return map_.begin();
448 }
449 const_iterator cbegin() const
450 {
451 return map_.cbegin();
452 }
453 iterator end()
454 {
455 return map_.end();
456 }
457 const_iterator end() const
458 {
459 return map_.end();
460 }
461 const_iterator cend() const
462 {
463 return map_.cend();
464 }
465 void clear()
466 {
467 map_.clear();
468 }
469 iterator find(key_type const & k)
470 {
471 return map_.find(k);
472 }
473 const_iterator find(key_type const & k) const
474 {
475 return map_.find(k);
476 }
477 size_t size() const
478 {
479 return map_.size();
480 }
481 size_t erase(key_type const & k)
482 {
483 return map_.erase(k);
484 }
485
486 protected:
487 Map map_;
488 };
489
490
491
492 namespace detail
493 {
494 template <typename VALUE_TYPE>
495 struct PMapValueSkipper
496 {
497 public:
498 typedef VALUE_TYPE value_type;
499 typedef typename value_type::first_type key_type;
500 PMapValueSkipper(key_type default_key)
501 :
502 default_key_(default_key)
503 {}
504 bool operator()(value_type const & v)
505 {
506 return v.first != default_key_;
507 }
508 private:
509 key_type const default_key_;
510 };
511 }
512
513 /**
514 * @brief Specialization of PropertyMap that stores the elements in a vector (size = max node id of stored elements).
515 */
516 template <typename KEYTYPE, typename MAPPEDTYPE>
517 class PropertyMap<KEYTYPE, MAPPEDTYPE, VectorTag>
518 {
519 public:
520 typedef KEYTYPE key_type;
521 typedef MAPPEDTYPE mapped_type;
522 typedef std::pair<key_type, mapped_type> value_type;
523 typedef value_type & reference;
524 typedef value_type const & const_reference;
525 typedef std::vector<value_type> Map;
526 typedef detail::PMapValueSkipper<value_type> ValueSkipper;
527 typedef FilterIterator<ValueSkipper, typename Map::iterator> iterator;
528 typedef FilterIterator<ValueSkipper, typename Map::const_iterator> const_iterator;
529
530 PropertyMap(key_type default_key = lemon::INVALID)
531 :
532 num_elements_(0),
533 default_key_(default_key)
534 {}
535
536 mapped_type & at(key_type const & k)
537 {
538 #ifdef VIGRA_CHECK_BOUNDS
539 if (k.id() < 0 || k.id() >= map_.size() || map_[k.id()].first == default_key_)
540 throw std::out_of_range("PropertyMap::at(): Key not found.");
541 #endif
542 return map_[k.id()].second;
543 }
544
545 mapped_type const & at(key_type const & k) const
546 {
547 #ifdef VIGRA_CHECK_BOUNDS
548 if (k.id() < 0 || k.id() >= map_.size() || map_[k.id()].first == default_key_)
549 throw std::out_of_range("PropertyMap::at(): Key not found.");
550 #endif
551 return map_[k.id()].second;
552 }
553
554 mapped_type & operator[](key_type const & k)
555 {
556 return map_[k.id()].second;
557 }
558
559 mapped_type const & operator[](key_type const & k) const
560 {
561 return map_[k.id()].second;
562 }
563
564 void insert(key_type const & k, mapped_type const & v)
565 {
566 if (k.id() < 0)
567 throw std::out_of_range("PropertyMap::insert(): Key must not be negative.");
568
569 if ((size_t)k.id() >= map_.size())
570 map_.resize(k.id()+1, value_type(default_key_, mapped_type()));
571
572 auto & elt = map_[k.id()];
573 if (elt.first == default_key_)
574 ++num_elements_;
575
576 elt.first = k;
577 elt.second = v;
578 }
579
580 #define MAKE_ITER(it) make_filter_iterator(ValueSkipper(default_key_), it, map_.end())
581 #define MAKE_CITER(it) make_filter_iterator(ValueSkipper(default_key_), it, map_.cend())
582
583 iterator begin()
584 {
585 return MAKE_ITER(map_.begin());
586 }
587
588 const_iterator begin() const
589 {
590 return MAKE_ITER(map_.begin());
591 }
592
593 const_iterator cbegin() const
594 {
595 return MAKE_CITER(map_.cbegin());
596 }
597
598 iterator end()
599 {
600 return MAKE_ITER(map_.end());
601 }
602
603 const_iterator end() const
604 {
605 return MAKE_ITER(map_.end());
606 }
607
608 const_iterator cend() const
609 {
610 return MAKE_CITER(map_.cend());
611 }
612
613 iterator find(key_type const & k)
614 {
615 if (k.id() < 0 || k.id() >= map_.size() || map_[k.id()].first == default_key_)
616 return end();
617 else
618 return MAKE_ITER(std::next(map_.begin(), k.id()));
619 }
620
621 const_iterator find(key_type const & k) const
622 {
623 if (k.id() < 0 || k.id() >= map_.size() || map_[k.id()].first == default_key_)
624 return end();
625 else
626 return MAKE_ITER(std::next(map_.begin(), k.id()));
627 }
628
629 #undef MAKE_ITER
630 #undef MAKE_CITER
631
632 void clear()
633 {
634 map_.clear();
635 num_elements_ = 0;
636 }
637
638 size_t size() const
639 {
640 return num_elements_;
641 }
642
643 size_t erase(key_type const & k)
644 {
645 if (k.id() < 0 || k.id() >= map_.size() || map_[k.id()].first == default_key_)
646 {
647 return 0;
648 }
649 else
650 {
651 map_[k.id()].first = default_key_;
652 --num_elements_;
653 return 1;
654 }
655 }
656
657 protected:
658 Map map_;
659 size_t num_elements_;
660 key_type default_key_;
661 };
662
663
664
665 /**
666 * @brief
667 * Specialization of PropertyMap that stores the elements in a vector (size = number of stored elements).
668 * An additional index vector is needed for bookkeeping (size = max node id of stored elements).
669 */
670 template <typename KEYTYPE, typename MAPPEDTYPE>
671 class PropertyMap<KEYTYPE, MAPPEDTYPE, IndexVectorTag>
672 {
673 public:
674 typedef KEYTYPE key_type;
675 typedef MAPPEDTYPE mapped_type;
676 typedef std::pair<key_type, mapped_type> value_type;
677 typedef value_type & reference;
678 typedef value_type const & const_reference;
679 typedef std::vector<value_type> Map;
680 typedef typename Map::iterator iterator;
681 typedef typename Map::const_iterator const_iterator;
682
683 mapped_type & at(key_type const & k)
684 {
685 #ifdef VIGRA_CHECK_BOUNDS
686 if (indices_.at(k.id()) == -1)
687 throw std::out_of_range("PropertyMap::at(): Key not found.");
688 #endif
689 return map_[indices_[k.id()]].second;
690 }
691
692 mapped_type const & at(key_type const & k) const
693 {
694 #ifdef VIGRA_CHECK_BOUNDS
695 if (indices_.at(k.id()) == -1)
696 throw std::out_of_range("PropertyMap::at(): Key not found.");
697 #endif
698 return map_[indices_[k.id()]].second;
699 }
700
701 mapped_type & operator[](key_type const & k)
702 {
703 return map_[indices_[k.id()]].second;
704 }
705
706 mapped_type const & operator[](key_type const & k) const
707 {
708 return map_[indices_[k.id()]].second;
709 }
710
711 void insert(key_type const & k, mapped_type const & v)
712 {
713 if (k.id() < 0)
714 throw std::out_of_range("PropertyMap::insert(): Key must not be negative.");
715
716 if (k.id() >= indices_.size())
717 indices_.resize(k.id()+1, -1);
718
719 if (indices_[k.id()] == -1)
720 {
721 indices_[k.id()] = map_.size();
722 map_.push_back(value_type(k, v));
723 }
724 }
725
726 iterator begin()
727 {
728 return map_.begin();
729 }
730
731 const_iterator begin() const
732 {
733 return map_.begin();
734 }
735
736 const_iterator cbegin() const
737 {
738 return map_.cend();
739 }
740
741 iterator end()
742 {
743 return map_.end();
744 }
745
746 const_iterator end() const
747 {
748 return map_.end();
749 }
750
751 const_iterator cend() const
752 {
753 return map_.cend();
754 }
755
756 void clear()
757 {
758 map_.clear();
759 indices_.clear();
760 }
761
762 iterator find(key_type const & k)
763 {
764 if (k.id() < 0 || k.id() >= indices_.size() || indices_[k.id()] == -1)
765 return map_.end();
766 else
767 return std::next(map_.begin(), indices_[k.id()]);
768 }
769
770 const_iterator find(key_type const & k) const
771 {
772 if (k.id() < 0 || k.id() >= indices_.size() || indices_[k.id()] == -1)
773 return map_.end();
774 else
775 return std::next(map_.begin(), indices_[k.id()]);
776 }
777
778 size_t size() const
779 {
780 return map_.size();
781 }
782
783 size_t erase(key_type const & k)
784 {
785 if (k.id() < 0 || k.id() >= indices_.size() || indices_[k.id()] == -1)
786 {
787 return 0;
788 }
789 else
790 {
791 // Erase the element from the index vector and the map.
792 size_t ind = indices_[k.id()];
793 indices_[k.id()] = -1;
794 map_.erase(std::next(map_.begin(), ind));
795
796 // Adjust the indices.
797 for (size_t i = 0; i < indices_.size(); ++i)
798 if (indices_[i] > ind)
799 --indices_[i];
800 return 1;
801 }
802 }
803
804 protected:
805 Map map_;
806 std::vector<int> indices_;
807 };
808
809
810
811 } // namespace vigra
812
813
814
300815 #endif // VIGRA_GRAPH_HXX
4444 #define H5Dopen_vers 2
4545 #define H5Dcreate_vers 2
4646 #define H5Acreate_vers 2
47 #define H5Eset_auto_vers 2
48 #define H5Eget_auto_vers 2
4749
4850 #include <hdf5.h>
4951
126128 */
127129 class HDF5DisableErrorOutput
128130 {
129 H5E_auto2_t old_func_;
131 H5E_auto1_t old_func1_;
132 H5E_auto2_t old_func2_;
130133 void *old_client_data_;
134 int error_handler_version_;
131135
132136 HDF5DisableErrorOutput(HDF5DisableErrorOutput const &);
133137 HDF5DisableErrorOutput & operator=(HDF5DisableErrorOutput const &);
134138
135139 public:
136140 HDF5DisableErrorOutput()
137 : old_func_(0)
141 : old_func1_(0)
142 , old_func2_(0)
138143 , old_client_data_(0)
139 {
140 H5Eget_auto2(H5E_DEFAULT, &old_func_, &old_client_data_);
141 H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
144 , error_handler_version_(-1)
145 {
146 if(H5Eget_auto2(H5E_DEFAULT, &old_func2_, &old_client_data_) >= 0)
147 {
148 // prefer new-style error handling
149 H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
150 error_handler_version_ = 2;
151 }
152 else if(H5Eget_auto1(&old_func1_, &old_client_data_) >= 0)
153 {
154 // fall back to old-style if another module (e.g. h5py)
155 // prevents us from using new-style (i.e. H5Eget_auto2()
156 // returned a negative error code)
157 H5Eset_auto1(NULL, NULL);
158 error_handler_version_ = 1;
159 }
142160 }
143161
144162 ~HDF5DisableErrorOutput()
145163 {
146 H5Eset_auto2(H5E_DEFAULT, old_func_, old_client_data_);
164 if(error_handler_version_ == 1)
165 H5Eset_auto1(old_func1_, old_client_data_);
166 else if(error_handler_version_ == 2)
167 H5Eset_auto2(H5E_DEFAULT, old_func2_, old_client_data_);
147168 }
148169 };
149170
706727 ordered as 'z', 'y', 'x', this function will return the shape in the order
707728 'x', 'y', 'z'.
708729 */
709 VIGRA_EXPORT ArrayVector<hsize_t> const & shape() const
710 {
711 return m_dims;
712 }
730 VIGRA_EXPORT ArrayVector<hsize_t> const & shape() const;
713731
714732 /** Get the shape (length) of the dataset along dimension \a dim.
715733
10411059 /** \brief Open or create an HDF5File object.
10421060
10431061 Creates or opens HDF5 file with given filename.
1044 The current group is set to "/".
1045
1046 Note that the HDF5File class is not copyable (the copy constructor is
1047 private to enforce this).
1048 */
1049 HDF5File(std::string filePath, OpenMode mode, bool track_creation_times = false)
1062 The current group is set to "/". By default, the files is opened in read-only mode.
1063 */
1064 explicit HDF5File(std::string filePath, OpenMode mode = ReadOnly, bool track_creation_times = false)
10501065 : track_time(track_creation_times ? 1 : 0)
10511066 {
10521067 open(filePath, mode);
1068 }
1069
1070 /** \brief Open or create an HDF5File object.
1071
1072 Creates or opens HDF5 file with given filename.
1073 The current group is set to "/". By default, the files is opened in read-only mode.
1074 */
1075 explicit HDF5File(char const * filePath, OpenMode mode = ReadOnly, bool track_creation_times = false)
1076 : track_time(track_creation_times ? 1 : 0)
1077 {
1078 open(std::string(filePath), mode);
10531079 }
10541080
10551081 /** \brief Initialize an HDF5File object from HDF5 file handle
10651091 \a read_only is 'true', you cannot create new datasets or
10661092 overwrite data.
10671093
1068 \warning In case the underlying HDF5 library used by Vigra is not
1069 exactly the same library used to open the file with the given id,
1070 this method will lead to crashes.
1094 \warning When VIRA is linked against a different HDF5 library than the one
1095 used to open the file with the given id, this method will lead to crashes.
10711096 */
10721097 explicit HDF5File(HDF5HandleShared const & fileHandle,
10731098 const std::string & pathname = "",
11191144
11201145 /** \brief Assign a HDF5File object.
11211146
1122 Calls close() on the present file and The new object will refer to the same file and group as \a other.
1147 Calls close() on the present file and refers to the same file and group as \a other afterwards.
11231148 */
11241149 HDF5File & operator=(HDF5File const & other)
11251150 {
11421167
11431168 bool isOpen() const
11441169 {
1145 return fileHandle_ != 0;
1170 return fileHandle_ != (hid_t)0;
11461171 }
11471172
11481173 bool isReadOnly() const
13761401
13771402 errorMessage = "HDF5File::getDatasetShape(): Unable to access dataspace.";
13781403 HDF5Handle dataspaceHandle(H5Dget_space(datasetHandle), &H5Sclose, errorMessage.c_str());
1379
13801404 //get dimension information
13811405 ArrayVector<hsize_t>::size_type dimensions = H5Sget_simple_extent_ndims(dataspaceHandle);
13821406
13831407 ArrayVector<hsize_t> shape(dimensions);
13841408 ArrayVector<hsize_t> maxdims(dimensions);
13851409 H5Sget_simple_extent_dims(dataspaceHandle, shape.data(), maxdims.data());
1410
1411 // invert the dimensions to guarantee VIGRA-compatible order.
1412 std::reverse(shape.begin(), shape.end());
1413 return shape;
1414 }
1415
1416 /** \brief Get the shape of chunks along each dimension of a certain dataset.
1417
1418 Normally, this function is called after determining the dimension of the
1419 dataset using \ref getDatasetDimensions().
1420 If the first character is a "/", the path will be interpreted as absolute path,
1421 otherwise it will be interpreted as path relative to the current group.
1422
1423 Note that the memory order between VIGRA and HDF5 files differs: VIGRA uses
1424 Fortran-order, while HDF5 uses C-order. This function therefore reverses the axis
1425 order relative to the file contents. That is, when the axes in the file are
1426 ordered as 'z', 'y', 'x', this function will return the shape in the order
1427 'x', 'y', 'z'.
1428 */
1429 ArrayVector<hsize_t> getChunkShape(std::string datasetName) const
1430 {
1431 // make datasetName clean
1432 datasetName = get_absolute_path(datasetName);
1433
1434 //Open dataset and dataspace
1435 std::string errorMessage = "HDF5File::getChunkShape(): Unable to open dataset '" + datasetName + "'.";
1436 HDF5Handle datasetHandle = HDF5Handle(getDatasetHandle_(datasetName), &H5Dclose, errorMessage.c_str());
1437
1438 errorMessage = "HDF5File::getChunkShape(): Unable to access dataspace.";
1439 HDF5Handle dataspaceHandle(H5Dget_space(datasetHandle), &H5Sclose, errorMessage.c_str());
1440 HDF5Handle properties(H5Dget_create_plist(datasetHandle),
1441 &H5Pclose, "HDF5File::read(): failed to get property list");
1442
1443
1444 //get dimension information
1445 ArrayVector<hsize_t>::size_type dimensions = H5Sget_simple_extent_ndims(dataspaceHandle);
1446
1447 ArrayVector<hsize_t> shape(dimensions);
1448 H5Pget_chunk(properties, dimensions, shape.data());
13861449
13871450 // invert the dimensions to guarantee VIGRA-compatible order.
13881451 std::reverse(shape.begin(), shape.end());
18091872 datasetName = get_absolute_path(datasetName);
18101873
18111874 typename MultiArrayShape<N>::type chunkSize;
1812 for(int i = 0; i < N; i++){
1875 for(unsigned i = 0; i < N; i++){
18131876 chunkSize[i] = iChunkSize;
18141877 }
18151878 write_(datasetName, array, detail::getH5DataType<T>(), SIZE, chunkSize, compression);
18571920 datasetName = get_absolute_path(datasetName);
18581921
18591922 typename MultiArrayShape<N>::type chunkSize;
1860 for(int i = 0; i < N; i++){
1923 for(unsigned i = 0; i < N; i++){
18611924 chunkSize[i] = iChunkSize;
18621925 }
18631926 write_(datasetName, array, detail::getH5DataType<T>(), 3, chunkSize, compression);
24122475 groupName = groupName + '/';
24132476 }
24142477
2415 // open or create subgroups one by one
2478 // We determine if the group exists by checking the return value of H5Gopen.
2479 // To do so, we must temporarily disable error reporting.
2480 // Alternatively, we could use H5LTfind_dataset(), but this is much slower.
2481 HDF5DisableErrorOutput disable_error;
2482
2483 // Open or create subgroups one by one
24162484 std::string::size_type begin = 0, end = groupName.find('/');
24172485 while (end != std::string::npos)
24182486 {
24192487 std::string group(groupName.begin()+begin, groupName.begin()+end);
2488
24202489 hid_t prevParent = parent;
2421
2422 if(H5LTfind_dataset(parent, group.c_str()) == 0)
2423 {
2424 if(create)
2425 parent = H5Gcreate(prevParent, group.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
2426 else
2427 parent = -1;
2428 }
2429 else
2430 {
2431 parent = H5Gopen(prevParent, group.c_str(), H5P_DEFAULT);
2432 }
2490 parent = H5Gopen(prevParent, group.c_str(), H5P_DEFAULT);
2491 if(parent < 0 && create) // group doesn't exist, but we are supposed to create it
2492 parent = H5Gcreate(prevParent, group.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
24332493 H5Gclose(prevParent);
24342494
24352495 if(parent < 0)
2436 {
2437 return parent;
2438 }
2496 break;
2497
24392498 begin = end + 1;
24402499 end = groupName.find('/', begin);
24412500 }
29503009 boffset.resize(N);
29513010 }
29523011
2953 for(int i = 0; i < N; ++i)
3012 for(unsigned i = 0; i < N; ++i)
29543013 {
29553014 // vigra and hdf5 use different indexing
29563015 bshape[N-1-i] = array.shape(i);
32033262 boffset.resize(N);
32043263 }
32053264
3206 for(int i = 0; i < N; ++i)
3265 for(unsigned i = 0; i < N; ++i)
32073266 {
32083267 // vigra and hdf5 use different indexing
32093268 bshape[N-1-i] = blockShape[i];
33413400 template<unsigned int N, class T, class StrideTag>
33423401 inline void readHDF5(const HDF5ImportInfo &info, MultiArrayView<N, T, StrideTag> array)
33433402 {
3344 readHDF5(info, array, 0, 0); // last two arguments are not used
3345 }
3346
3347 template<unsigned int N, class T, class StrideTag>
3348 void readHDF5(const HDF5ImportInfo &info, MultiArrayView<N, T, StrideTag> array, const hid_t datatype, const int numBandsOfType)
3349 {
33503403 HDF5File file(info.getFilePath(), HDF5File::OpenReadOnly);
33513404 file.read(info.getPathInFile(), array);
3352 file.close();
33533405 }
33543406
33553407 inline hid_t openGroup(hid_t parent, std::string group_name)
35553607 template<unsigned int N, class T, class StrideTag>
35563608 inline void writeHDF5(const char* filePath, const char* pathInFile, const MultiArrayView<N, T, StrideTag> & array)
35573609 {
3558 //last two arguments are not used
3559 writeHDF5(filePath, pathInFile, array, 0, 0);
3560 }
3561
3562 template<unsigned int N, class T, class StrideTag>
3563 void writeHDF5(const char* filePath, const char* pathInFile, const MultiArrayView<N, T, StrideTag> & array, const hid_t datatype, const int numBandsOfType)
3564 {
35653610 HDF5File file(filePath, HDF5File::Open);
35663611 file.write(pathInFile, array);
3567 file.close();
35683612 }
3569
35703613
35713614 namespace detail
35723615 {
4646 /*vigra*/
4747 #include "priority_queue.hxx"
4848 #include "metrics.hxx"
49 #include "merge_graph_adaptor.hxx"
4950
5051 namespace vigra{
52
53 /** \addtogroup GraphDataStructures
54 */
55 //@{
5156
5257 namespace cluster_operators{
5358
253258 const Node u = mergeGraph_.u(e);
254259 const Node v = mergeGraph_.v(e);
255260
256 const size_t dU = mergeGraph_.degree(u);
257 const size_t dV = mergeGraph_.degree(u);
258261 const BaseGraphEdge ee=EdgeHelper::itemToGraphItem(mergeGraph_,e);
259262 const BaseGraphNode uu=NodeHelper::itemToGraphItem(mergeGraph_,u);
260263 const BaseGraphNode vv=NodeHelper::itemToGraphItem(mergeGraph_,v);
279282 vigra::ChangeablePriorityQueue< ValueType > pq_;
280283 ValueType wardness_;;
281284 };
282
283285 /// \brief This Cluster Operator is a MONSTER.
284286 /// It can really do a lot.
285287 ///
361363 NODE_LABEL_MAP nodeLabelMap,
362364 const ValueType beta,
363365 const metrics::MetricType metricType,
364 const ValueType wardness=1.0,
365 const ValueType gamma = 10000000.0,
366 const ValueType sameLabelMultiplier = 0.8
366 const ValueType wardness=static_cast<ValueType>(1.0),
367 const ValueType gamma = static_cast<ValueType>(10000000.0),
368 const ValueType sameLabelMultiplier = static_cast<ValueType>(0.8)
367369 )
368370 : mergeGraph_(mergeGraph),
369371 edgeIndicatorMap_(edgeIndicatorMap),
377379 wardness_(wardness),
378380 gamma_(gamma),
379381 sameLabelMultiplier_(sameLabelMultiplier),
380 metric_(metricType)
382 metric_(metricType),
383 useStopWeight_(false),
384 stopWeight_()
381385 {
382386 typedef typename MergeGraph::MergeNodeCallBackType MergeNodeCallBackType;
383387 typedef typename MergeGraph::MergeEdgeCallBackType MergeEdgeCallBackType;
408412 /// \brief will be called via callbacks from mergegraph
409413 void mergeEdges(const Edge & a,const Edge & b){
410414 // update features / weigts etc
415 bool done = false;
411416 const BaseGraphEdge aa=EdgeHelper::itemToGraphItem(mergeGraph_,a);
412417 const BaseGraphEdge bb=EdgeHelper::itemToGraphItem(mergeGraph_,b);
413 EdgeIndicatorReference va=edgeIndicatorMap_[aa];
414 EdgeIndicatorReference vb=edgeIndicatorMap_[bb];
415 va*=edgeSizeMap_[aa];
416 vb*=edgeSizeMap_[bb];
417
418
419 va+=vb;
420 edgeSizeMap_[aa]+=edgeSizeMap_[bb];
421 va/=(edgeSizeMap_[aa]);
422 vb/=edgeSizeMap_[bb];
423 // delete b from pq
424 pq_.deleteItem(b.id());
418 if(!isLifted_.empty()){
419 const bool isLiftedA = isLifted_[mergeGraph_.graph().id(aa)];
420 const bool isLiftedB = isLifted_[mergeGraph_.graph().id(bb)];
421 if(isLiftedA && isLiftedB){
422 pq_.deleteItem(b.id());
423 done = true;
424 }
425 isLifted_[mergeGraph_.graph().id(aa)] = isLiftedA && isLiftedB;
426 }
427 if(!done){
428
429 EdgeIndicatorReference va=edgeIndicatorMap_[aa];
430 EdgeIndicatorReference vb=edgeIndicatorMap_[bb];
431 va*=edgeSizeMap_[aa];
432 vb*=edgeSizeMap_[bb];
433
434 va+=vb;
435 edgeSizeMap_[aa]+=edgeSizeMap_[bb];
436 va/=(edgeSizeMap_[aa]);
437 vb/=edgeSizeMap_[bb];
438 // delete b from pq
439 pq_.deleteItem(b.id());
440 }
425441 }
426442
427443 /// \brief will be called via callbacks from mergegraph
496512 pq_.deleteItem(minLabel);
497513 minLabel = pq_.top();
498514 }
515 //std::cout<<"mg e"<<mergeGraph_.edgeNum()<<" mg n"<<mergeGraph_.nodeNum()<<" cw"<< this->contractionWeight()<<"\n";
516 if(!isLifted_.empty()){
517 if(isLifted_[minLabel])
518 throw std::runtime_error("use lifted edges only if you are DerThorsten or know what you are doing\n");
519 }
499520 return Edge(minLabel);
500521 }
501522
517538 }
518539
519540 bool done(){
520
521541 index_type minLabel = pq_.top();
522542 while(mergeGraph_.hasEdgeId(minLabel)==false){
523543 pq_.deleteItem(minLabel);
524544 minLabel = pq_.top();
525545 }
526546 const ValueType p = pq_.topPriority();
527
547 if(useStopWeight_){
548 if(p >= stopWeight_){
549 return true;
550 }
551 }
528552 return p>= gamma_;
529553 }
530554
555 template<class ITER>
556 void setLiftedEdges(ITER idsBegin, ITER idsEnd){
557 if(isLifted_.size()<std::size_t(mergeGraph_.graph().maxEdgeId()+1)){
558 isLifted_.resize(mergeGraph_.graph().maxEdgeId()+1,false);
559 std::fill(isLifted_.begin(), isLifted_.end(), false);
560 }
561 while(idsBegin!=idsEnd){
562 isLifted_[*idsBegin] = true;
563
564 const ValueType currentWeight = this->getEdgeWeight(Edge(*idsBegin));
565 pq_.push(*idsBegin,currentWeight);
566 minWeightEdgeMap_[mergeGraph_.graph().edgeFromId(*idsBegin)]=currentWeight;
567 ++idsBegin;
568 }
569 }
570
571 void enableStopWeight(const ValueType stopWeight){
572 useStopWeight_ = true;
573 stopWeight_ = stopWeight;
574 }
531575 private:
532576 ValueType getEdgeWeight(const Edge & e){
533
577 const BaseGraphEdge ee=EdgeHelper::itemToGraphItem(mergeGraph_,e);
578 if(!isLifted_.empty() && isLifted_[mergeGraph_.graph().id(ee)]){
579 //std::cout<<"found lifted edge\n";
580 return 10000000.0;// std::numeric_limits<ValueType>::infinity();
581 }
534582 const Node u = mergeGraph_.u(e);
535583 const Node v = mergeGraph_.v(e);
536584
537 const size_t dU = mergeGraph_.degree(u);
538 const size_t dV = mergeGraph_.degree(u);
539 const BaseGraphEdge ee=EdgeHelper::itemToGraphItem(mergeGraph_,e);
540585 const BaseGraphNode uu=NodeHelper::itemToGraphItem(mergeGraph_,u);
541586 const BaseGraphNode vv=NodeHelper::itemToGraphItem(mergeGraph_,v);
542587
579624 ValueType gamma_;
580625 ValueType sameLabelMultiplier_;
581626 metrics::Metric<float> metric_;
627
628 std::vector<bool> isLifted_;
629 bool useStopWeight_;
630 ValueType stopWeight_;
582631 };
583632
584633
585634
586635 } // end namespace cluster_operators
587636
588
589
590 /// \brief do hierarchical clustering with a given cluster operator
591 template< class CLUSTER_OPERATOR>
592 class HierarchicalClustering{
593
594 public:
595 typedef CLUSTER_OPERATOR ClusterOperator;
596 typedef typename ClusterOperator::MergeGraph MergeGraph;
597 typedef typename MergeGraph::Graph Graph;
598 typedef typename Graph::Edge BaseGraphEdge;
599 typedef typename Graph::Node BaseGraphNode;
600 typedef typename MergeGraph::Edge Edge;
601 typedef typename MergeGraph::Node Node;
602 typedef typename CLUSTER_OPERATOR::WeightType ValueType;
603 typedef typename MergeGraph::index_type MergeGraphIndexType;
604
605 struct Parameter{
606 Parameter(
607 const size_t nodeNumStopCond = 1,
608 const bool buildMergeTree = true,
609 const bool verbose = false
610 )
611 : nodeNumStopCond_ (nodeNumStopCond),
612 buildMergeTreeEncoding_(buildMergeTree),
613 verbose_(verbose){
614 }
615 size_t nodeNumStopCond_;
616 bool buildMergeTreeEncoding_;
617 bool verbose_;
618 };
619
620 struct MergeItem{
621 MergeItem(
622 const MergeGraphIndexType a,
623 const MergeGraphIndexType b,
624 const MergeGraphIndexType r,
625 const ValueType w
626 ):
627 a_(a),b_(b),r_(r),w_(w){
628 }
629 MergeGraphIndexType a_;
630 MergeGraphIndexType b_;
631 MergeGraphIndexType r_;
632 ValueType w_;
633 };
634
635 typedef std::vector<MergeItem> MergeTreeEncoding;
636
637 /// \brief construct HierarchicalClustering from clusterOperator and an optional parameter object
638 HierarchicalClustering(
639 ClusterOperator & clusterOperator,
640 const Parameter & parameter = Parameter()
641 )
642 :
643 clusterOperator_(clusterOperator),
644 param_(parameter),
645 mergeGraph_(clusterOperator_.mergeGraph()),
646 graph_(mergeGraph_.graph()),
647 timestamp_(graph_.maxNodeId()+1),
648 toTimeStamp_(),
649 timeStampIndexToMergeIndex_(),
650 mergeTreeEndcoding_()
651 {
637 /** \brief Options object for hierarchical clustering.
638
639 <b>\#include</b> \<vigra/hierarchical_clustering.hxx\><br/>
640 Namespace: vigra
641
642 This class allows to set various parameters of \ref hierarchicalClustering().
643 See there for usage examples.
644 */
645 class ClusteringOptions
646 {
647 public:
648
649 ClusteringOptions(
650 const size_t nodeNumStopCond = 1,
651 const bool buildMergeTree = false,
652 const bool verbose = false)
653 : nodeNumStopCond_ (nodeNumStopCond)
654 , maxMergeWeight_(NumericTraits<double>::max())
655 , nodeFeatureImportance_(0.5)
656 , sizeImportance_(1.0)
657 , nodeFeatureMetric_(metrics::ManhattanMetric)
658 , buildMergeTreeEncoding_(buildMergeTree)
659 , verbose_(verbose)
660 {}
661
662 /** Stop merging when the number of clusters reaches this threshold.
663
664 Default: 1 (don't stop early)
665 */
666 ClusteringOptions & minRegionCount(size_t count)
667 {
668 nodeNumStopCond_ = count;
669 return *this;
670 }
671
672 /** Stop merging when the weight of the cheapest edge exceeds this threshold.
673
674 Default: infinity (don't stop early)
675 */
676 ClusteringOptions & maxMergeDistance(double val)
677 {
678 maxMergeWeight_ = val;
679 return *this;
680 }
681
682 /** Importance of node features relative to edge weights.
683
684 Must be between 0 and 1, with 0 meaning that node features
685 are ignored, and 1 meaning that edge weights are ignored.
686
687 Default: 0.5 (equal importance)
688 */
689 ClusteringOptions & nodeFeatureImportance(double val)
690 {
691 vigra_precondition(0.0 <= val && val <= 1.0,
692 "ClusteringOptions::nodePropertyImportance(val): 0 <= val <= 1 required.");
693 nodeFeatureImportance_ = val;
694 return *this;
695 }
696
697 /** Importance of node size.
698
699 Must be between 0 and 1, with 0 meaning that size is ignored,
700 and 1 meaning that the algorithm prefers to keep cluster sizes
701 balanced.
702
703 Default: 1.0 (prefer like-sized clusters)
704 */
705 ClusteringOptions & sizeImportance(double val)
706 {
707 vigra_precondition(0.0 <= val && val <= 1.0,
708 "ClusteringOptions::sizeImportance(val): 0 <= val <= 1 required.");
709 sizeImportance_ = val;
710 return *this;
711 }
712
713
714 /** Metric to be used when transforming node features into cluster distances.
715
716 The cluster (= node) distance is the respective norm of the difference
717 vector between the corresponding node feature vectors.
718
719 Default: metrics::ManhattanMetric (L1-norm of the feature difference)
720 */
721 ClusteringOptions & nodeFeatureMetric(metrics::MetricType metric)
722 {
723 nodeFeatureMetric_ = metric;
724 return *this;
725 }
726
727 ClusteringOptions & buildMergeTreeEncoding(bool val=true)
728 {
729 buildMergeTreeEncoding_ = val;
730 return *this;
731 }
732
733 /** Display progress information.
734
735 Default: false
736 */
737 ClusteringOptions & verbose(bool val=true)
738 {
739 verbose_ = val;
740 return *this;
741 }
742
743 size_t nodeNumStopCond_;
744 double maxMergeWeight_;
745 double nodeFeatureImportance_;
746 double sizeImportance_;
747 metrics::MetricType nodeFeatureMetric_;
748 bool buildMergeTreeEncoding_;
749 bool verbose_;
750 };
751
752 // \brief do hierarchical clustering with a given cluster operator
753 template< class CLUSTER_OPERATOR>
754 class HierarchicalClusteringImpl
755 {
756 public:
757 typedef CLUSTER_OPERATOR ClusterOperator;
758 typedef typename ClusterOperator::MergeGraph MergeGraph;
759 typedef typename MergeGraph::Graph Graph;
760 typedef typename Graph::Edge BaseGraphEdge;
761 typedef typename Graph::Node BaseGraphNode;
762 typedef typename MergeGraph::Edge Edge;
763 typedef typename MergeGraph::Node Node;
764 typedef typename CLUSTER_OPERATOR::WeightType ValueType;
765 typedef typename MergeGraph::index_type MergeGraphIndexType;
766
767 typedef ClusteringOptions Parameter;
768
769 struct MergeItem{
770 MergeItem(
771 const MergeGraphIndexType a,
772 const MergeGraphIndexType b,
773 const MergeGraphIndexType r,
774 const ValueType w
775 ):
776 a_(a),b_(b),r_(r),w_(w){
777 }
778 MergeGraphIndexType a_;
779 MergeGraphIndexType b_;
780 MergeGraphIndexType r_;
781 ValueType w_;
782 };
783
784 typedef std::vector<MergeItem> MergeTreeEncoding;
785
786 /// \brief construct HierarchicalClusteringImpl from clusterOperator and an optional parameter object
787 HierarchicalClusteringImpl(
788 ClusterOperator & clusterOperator,
789 const Parameter & parameter = Parameter()
790 )
791 :
792 clusterOperator_(clusterOperator),
793 param_(parameter),
794 mergeGraph_(clusterOperator_.mergeGraph()),
795 graph_(mergeGraph_.graph()),
796 timestamp_(graph_.maxNodeId()+1),
797 toTimeStamp_(),
798 timeStampIndexToMergeIndex_(),
799 mergeTreeEndcoding_()
800 {
801 if(param_.buildMergeTreeEncoding_){
802 // this can be be made smater since user can pass
803 // stoping condition based on nodeNum
804 mergeTreeEndcoding_.reserve(graph_.nodeNum()*2);
805 toTimeStamp_.resize(graph_.maxNodeId()+1);
806 timeStampIndexToMergeIndex_.resize(graph_.maxNodeId()+1);
807 for(MergeGraphIndexType nodeId=0;nodeId<=mergeGraph_.maxNodeId();++nodeId){
808 toTimeStamp_[nodeId]=nodeId;
809 }
810 }
811
812
813
814 }
815
816 /// \brief start the clustering
817 void cluster(){
818 if(param_.verbose_)
819 std::cout<<"\n";
820 while(mergeGraph_.nodeNum()>param_.nodeNumStopCond_ && mergeGraph_.edgeNum()>0 && !clusterOperator_.done()){
821
822 const Edge edgeToRemove = clusterOperator_.contractionEdge();
652823 if(param_.buildMergeTreeEncoding_){
653 // this can be be made smater since user can pass
654 // stoping condition based on nodeNum
655 mergeTreeEndcoding_.reserve(graph_.nodeNum()*2);
656 toTimeStamp_.resize(graph_.maxNodeId()+1);
657 timeStampIndexToMergeIndex_.resize(graph_.maxNodeId()+1);
658 for(MergeGraphIndexType nodeId=0;nodeId<=mergeGraph_.maxNodeId();++nodeId){
659 toTimeStamp_[nodeId]=nodeId;
660 }
661 }
662
663
664
665 }
666
667 /// \brief start the clustering
668 void cluster(){
669 if(param_.verbose_)
670 std::cout<<"\n";
671 while(mergeGraph_.nodeNum()>param_.nodeNumStopCond_ && mergeGraph_.edgeNum()>0 && !clusterOperator_.done()){
672
673 const Edge edgeToRemove = clusterOperator_.contractionEdge();
674 if(param_.buildMergeTreeEncoding_){
675 const MergeGraphIndexType uid = mergeGraph_.id(mergeGraph_.u(edgeToRemove));
676 const MergeGraphIndexType vid = mergeGraph_.id(mergeGraph_.v(edgeToRemove));
677 const ValueType w = clusterOperator_.contractionWeight();
678 // do the merge
679 mergeGraph_.contractEdge( edgeToRemove);
680 const MergeGraphIndexType aliveNodeId = mergeGraph_.hasNodeId(uid) ? uid : vid;
681 const MergeGraphIndexType deadNodeId = aliveNodeId==vid ? uid : vid;
682 timeStampIndexToMergeIndex_[timeStampToIndex(timestamp_)]=mergeTreeEndcoding_.size();
683 mergeTreeEndcoding_.push_back(MergeItem( toTimeStamp_[aliveNodeId],toTimeStamp_[deadNodeId],timestamp_,w));
684 toTimeStamp_[aliveNodeId]=timestamp_;
685 timestamp_+=1;
686 }
687 else{
688 //std::cout<<"constract\n";
689 // do the merge
690 mergeGraph_.contractEdge( edgeToRemove );
691 }
692 if(param_.verbose_ && mergeGraph_.nodeNum()%1==0){
693 std::cout<<"\rNodes: "<<std::setw(10)<<mergeGraph_.nodeNum()<<std::flush;
694 }
695
696 }
697 if(param_.verbose_)
698 std::cout<<"\n";
699 }
700
701 /// \brief get the encoding of the merge tree
702 const MergeTreeEncoding & mergeTreeEndcoding()const{
703 return mergeTreeEndcoding_;
704 }
705
706 template<class EDGE_MAP>
707 void ucmTransform(EDGE_MAP & edgeMap)const{
708 typedef typename Graph::EdgeIt BaseGraphEdgeIt;
709
710 for(BaseGraphEdgeIt iter(graph()); iter!=lemon::INVALID; ++iter ){
711 const BaseGraphEdge edge=*iter;
712 edgeMap[edge] = edgeMap[mergeGraph().reprGraphEdge(edge)];
713 }
714 }
715
716 /// \brief get the node id's which are the leafes of a treeNodeId
717 template<class OUT_ITER>
718 size_t leafNodeIds(const MergeGraphIndexType treeNodeId, OUT_ITER begin)const{
719 if(treeNodeId<=graph_.maxNodeId()){
720 *begin=treeNodeId;
721 ++begin;
722 return 1;
824 const MergeGraphIndexType uid = mergeGraph_.id(mergeGraph_.u(edgeToRemove));
825 const MergeGraphIndexType vid = mergeGraph_.id(mergeGraph_.v(edgeToRemove));
826 const ValueType w = clusterOperator_.contractionWeight();
827 // do the merge
828 mergeGraph_.contractEdge( edgeToRemove);
829 const MergeGraphIndexType aliveNodeId = mergeGraph_.hasNodeId(uid) ? uid : vid;
830 const MergeGraphIndexType deadNodeId = aliveNodeId==vid ? uid : vid;
831 timeStampIndexToMergeIndex_[timeStampToIndex(timestamp_)]=mergeTreeEndcoding_.size();
832 mergeTreeEndcoding_.push_back(MergeItem( toTimeStamp_[aliveNodeId],toTimeStamp_[deadNodeId],timestamp_,w));
833 toTimeStamp_[aliveNodeId]=timestamp_;
834 timestamp_+=1;
723835 }
724836 else{
725 size_t leafNum=0;
726 std::queue<MergeGraphIndexType> queue;
727 queue.push(treeNodeId);
728
729 while(!queue.empty()){
730
731 const MergeGraphIndexType id = queue.front();
732 queue.pop();
733 const MergeGraphIndexType mergeIndex = timeStampToMergeIndex(id);
734 const MergeGraphIndexType ab[]= { mergeTreeEndcoding_[mergeIndex].a_, mergeTreeEndcoding_[mergeIndex].b_};
735
736 for(size_t i=0;i<2;++i){
737 if(ab[i]<=graph_.maxNodeId()){
738 *begin=ab[i];
739 ++begin;
740 ++leafNum;
741 }
742 else{
743 queue.push(ab[i]);
744 }
837 //std::cout<<"constract\n";
838 // do the merge
839 mergeGraph_.contractEdge( edgeToRemove );
840 }
841 if(param_.verbose_ && mergeGraph_.nodeNum()%1==0){
842 std::cout<<"\rNodes: "<<std::setw(10)<<mergeGraph_.nodeNum()<<std::flush;
843 }
844
845 }
846 if(param_.verbose_)
847 std::cout<<"\n";
848 }
849
850 /// \brief get the encoding of the merge tree
851 const MergeTreeEncoding & mergeTreeEndcoding()const{
852 return mergeTreeEndcoding_;
853 }
854
855 template<class EDGE_MAP>
856 void ucmTransform(EDGE_MAP & edgeMap)const{
857 typedef typename Graph::EdgeIt BaseGraphEdgeIt;
858
859 for(BaseGraphEdgeIt iter(graph()); iter!=lemon::INVALID; ++iter ){
860 const BaseGraphEdge edge=*iter;
861 edgeMap[edge] = edgeMap[mergeGraph().reprGraphEdge(edge)];
862 }
863 }
864
865 /// \brief get the node id's which are the leafes of a treeNodeId
866 template<class OUT_ITER>
867 size_t leafNodeIds(const MergeGraphIndexType treeNodeId, OUT_ITER begin)const{
868 if(treeNodeId<=graph_.maxNodeId()){
869 *begin=treeNodeId;
870 ++begin;
871 return 1;
872 }
873 else{
874 size_t leafNum=0;
875 std::queue<MergeGraphIndexType> queue;
876 queue.push(treeNodeId);
877
878 while(!queue.empty()){
879
880 const MergeGraphIndexType id = queue.front();
881 queue.pop();
882 const MergeGraphIndexType mergeIndex = timeStampToMergeIndex(id);
883 const MergeGraphIndexType ab[]= { mergeTreeEndcoding_[mergeIndex].a_, mergeTreeEndcoding_[mergeIndex].b_};
884
885 for(size_t i=0;i<2;++i){
886 if(ab[i]<=graph_.maxNodeId()){
887 *begin=ab[i];
888 ++begin;
889 ++leafNum;
890 }
891 else{
892 queue.push(ab[i]);
745893 }
746894 }
747 return leafNum;
748 }
749 }
750
751 /// \brief get the graph the merge graph is based on
752 const Graph & graph()const{
753 return graph_;
754 }
755
756 /// \brief get the merge graph
757 const MergeGraph & mergeGraph()const{
758 return mergeGraph_;
759 }
760
761 /// \brief get the representative node id
762 const MergeGraphIndexType reprNodeId(const MergeGraphIndexType id)const{
763 return mergeGraph_.reprNodeId(id);
764 }
765 private:
766
767 MergeGraphIndexType timeStampToIndex(const MergeGraphIndexType timestamp)const{
768 return timestamp- graph_.maxNodeId();
769 }
770
771
772 MergeGraphIndexType timeStampToMergeIndex(const MergeGraphIndexType timestamp)const{
773 return timeStampIndexToMergeIndex_[timeStampToIndex(timestamp)];
774 }
775
776 ClusterOperator & clusterOperator_;
777 Parameter param_;
778 MergeGraph & mergeGraph_;
779 const Graph & graph_;
780 // parameter object
781
782
783 // timestamp
784 MergeGraphIndexType timestamp_;
785 std::vector<MergeGraphIndexType> toTimeStamp_;
786 std::vector<MergeGraphIndexType> timeStampIndexToMergeIndex_;
787 // data which can reconstruct the merge tree
788 MergeTreeEncoding mergeTreeEndcoding_;
789
790
791 };
792
793
895 }
896 return leafNum;
897 }
898 }
899
900 /// \brief get the graph the merge graph is based on
901 const Graph & graph()const{
902 return graph_;
903 }
904
905 /// \brief get the merge graph
906 const MergeGraph & mergeGraph()const{
907 return mergeGraph_;
908 }
909
910 /// \brief get the representative node id
911 const MergeGraphIndexType reprNodeId(const MergeGraphIndexType id)const{
912 return mergeGraph_.reprNodeId(id);
913 }
914 private:
915
916 MergeGraphIndexType timeStampToIndex(const MergeGraphIndexType timestamp)const{
917 return timestamp- graph_.maxNodeId();
918 }
919
920
921 MergeGraphIndexType timeStampToMergeIndex(const MergeGraphIndexType timestamp)const{
922 return timeStampIndexToMergeIndex_[timeStampToIndex(timestamp)];
923 }
924
925
926 ClusterOperator & clusterOperator_;
927 Parameter param_;
928 MergeGraph & mergeGraph_;
929 const Graph & graph_;
930 // parameter object
931
932
933 // timestamp
934 MergeGraphIndexType timestamp_;
935 std::vector<MergeGraphIndexType> toTimeStamp_;
936 std::vector<MergeGraphIndexType> timeStampIndexToMergeIndex_;
937 // data which can reconstruct the merge tree
938 MergeTreeEncoding mergeTreeEndcoding_;
939
940
941
942 };
943
944
945 /********************************************************/
946 /* */
947 /* hierarchicalClustering */
948 /* */
949 /********************************************************/
950
951 /** \brief Reduce the number of nodes in a graph by iteratively contracting
952 the cheapest edge.
953
954 <b> Declarations:</b>
955
956 \code
957 namespace vigra {
958 template <class GRAPH,
959 class EDGE_WEIGHT_MAP, class EDGE_LENGTH_MAP,
960 class NODE_FEATURE_MAP, class NOSE_SIZE_MAP,
961 class NODE_LABEL_MAP>
962 void
963 hierarchicalClustering(GRAPH const & graph,
964 EDGE_WEIGHT_MAP const & edgeWeights, EDGE_LENGTH_MAP const & edgeLengths,
965 NODE_FEATURE_MAP const & nodeFeatures, NOSE_SIZE_MAP const & nodeSizes,
966 NODE_LABEL_MAP & labelMap,
967 ClusteringOptions options = ClusteringOptions());
968 }
969 \endcode
970
971 Hierarchical clustering is a simple and versatile image segmentation
972 algorithm that typically operates either directly on the pixels (e.g. on
973 a \ref vigra::GridGraph) or on a region adjacency graph over suitable
974 superpixels (e.g. on an \ref vigra::AdjacencyListGraph). The graph is
975 passed to the function in its first argument. After clustering is completed,
976 the parameter \a labelMap contains a mapping from original node IDs to
977 the ID of the cluster each node belongs to. Cluster IDs correspond to
978 the ID of an arbitrarily chosen representative node within each cluster,
979 i.e. they form a sparse subset of the original IDs.
980
981 Properties of the graph's edges and nodes are provided in the property maps
982 \a edgeWeights, \a edgeLengths, \a nodeFeatures, and \a nodeSizes. These maps
983 are indexed by edge or node ID and return the corresponding feature. Features
984 must by arithmetic scalars or, in case of node features, scalars or vectors
985 of scalars (precisely: objects that provide <tt>begin()</tt> and <tt>end()</tt>
986 to create an STL range). Edge weights are typically derived from an edge
987 indicator such as the gradient magnitude, and node features are either the
988 responses of a filter family (when clustering on the pixel grid), or region
989 statistics as computed by \ref FeatureAccumulators (when clustering on
990 superpixels).
991
992 In each step, the algorithm merges the two nodes \f$u\f$ and \f$v\f$ whose
993 cluster distance is smallest, where the cluster distance is defined as
994
995 \f[
996 d_{uv} = \left( (1-\beta) w_{uv} + \beta || f_u - f_v ||_M \right)
997 \cdot \frac{2}{s_u^{-\omega} + s_v^{-\omega}}
998 \f]
999
1000 with \f$ w_{uv} \f$ denoting the weight of edge \f$uv\f$, \f$f_u\f$ and \f$f_v\f$
1001 being the node features (possibly vectors to be compared with metric \f$M\f$),
1002 and \f$s_u\f$ and \f$s_v\f$ the corresponding node sizes. The metric is defined
1003 in the option object by calling \ref vigra::ClusteringOptions::nodeFeatureMetric()
1004 and must be selected from the tags defined in \ref vigra::metrics::MetricType.
1005
1006 The parameters \f$0 \le \beta \le 1\f$ and \f$0 \le \omega \le 1\f$ control the
1007 relative influence of the inputs: With \f$\beta = 0\f$, the node features are
1008 ignored, whereas with \f$\beta = 1\f$ the edge weights are ignored. Similarly,
1009 with \f$\omega = 0\f$, the node size is ignored, whereas with \f$\omega = 1\f$,
1010 cluster distances are scaled by the harmonic mean of the cluster sizes, making
1011 the merging of small clusters more favorable. The parameters are defined in the
1012 option object by calling \ref vigra::ClusteringOptions::nodeFeatureImportance() and
1013 \ref vigra::ClusteringOptions::sizeImportance() respectively.
1014
1015 After each merging step, the features of the resulting cluster \f$z\f$ and the weights
1016 of its outgoing edges are updated by mean of the corresponding properties of the original
1017 clusters \f$u\f$ and \f$v\f$, weighted by the respective node sizes \f$s_z\f$ and
1018 edge lengths \f$l_{zy}\f$:
1019
1020 \f{eqnarray*}{
1021 s_z & = & s_u + s_v \\
1022 f_z & = & \frac{s_u f_u + s_v f_v}{s_z} \\
1023 l_{zy} & = & l_{uy} + l_{vy} \textrm{ for all nodes }y\textrm{ connected to }u\textrm{ or }v \\
1024 w_{zy} & = & \frac{l_{uy} w_{uy} + l_{vy} w_{vy}}{l_{zy}}
1025 \f}
1026
1027 Clustering normally stops when only one cluster remains. This default can be overridden
1028 by the option object parameters \ref vigra::ClusteringOptions::minRegionCount()
1029 and \ref vigra::ClusteringOptions::maxMergeDistance() to stop at a particular number of
1030 clusters or a particular cluster distance respectively.
1031
1032 <b> Usage:</b>
1033
1034 <b>\#include</b> \<vigra/hierarchical_clustering.hxx\><br>
1035 Namespace: vigra
1036
1037 A fully worked example can be found in <a href="graph_agglomerative_clustering_8cxx-example.html">graph_agglomerative_clustering.cxx</a>
1038 */
1039 doxygen_overloaded_function(template <...> void hierarchicalClustering)
1040
1041 template <class GRAPH,
1042 class EDGE_WEIGHT_MAP, class EDGE_LENGTH_MAP,
1043 class NODE_FEATURE_MAP, class NOSE_SIZE_MAP,
1044 class NODE_LABEL_MAP>
1045 void
1046 hierarchicalClustering(GRAPH const & graph,
1047 EDGE_WEIGHT_MAP const & edgeWeights, EDGE_LENGTH_MAP const & edgeLengths,
1048 NODE_FEATURE_MAP const & nodeFeatures, NOSE_SIZE_MAP const & nodeSizes,
1049 NODE_LABEL_MAP & labelMap,
1050 ClusteringOptions options = ClusteringOptions())
1051 {
1052 typedef typename NODE_LABEL_MAP::Value LabelType;
1053 typedef MergeGraphAdaptor<GRAPH> MergeGraph;
1054 typedef typename GRAPH::template EdgeMap<float> EdgeUltrametric;
1055 typedef typename GRAPH::template NodeMap<LabelType> NodeSeeds;
1056
1057 MergeGraph mergeGraph(graph);
1058
1059 // create property maps to store the computed ultrametric and
1060 // to provide optional cannot-link constraints;
1061 // we don't use these options here and therefore leave the maps empty
1062 EdgeUltrametric edgeUltrametric(graph);
1063 NodeSeeds nodeSeeds(graph);
1064
1065 // create an operator that stores all property maps needed for
1066 // hierarchical clustering and updates them after every merge step
1067 typedef cluster_operators::EdgeWeightNodeFeatures<
1068 MergeGraph,
1069 EDGE_WEIGHT_MAP,
1070 EDGE_LENGTH_MAP,
1071 NODE_FEATURE_MAP,
1072 NOSE_SIZE_MAP,
1073 EdgeUltrametric,
1074 NodeSeeds>
1075 MergeOperator;
1076
1077 MergeOperator mergeOperator(mergeGraph,
1078 edgeWeights, edgeLengths,
1079 nodeFeatures, nodeSizes,
1080 edgeUltrametric, nodeSeeds,
1081 options.nodeFeatureImportance_,
1082 options.nodeFeatureMetric_,
1083 options.sizeImportance_,
1084 options.maxMergeWeight_);
1085
1086 typedef HierarchicalClusteringImpl<MergeOperator> Clustering;
1087
1088 Clustering clustering(mergeOperator, options);
1089 clustering.cluster();
1090
1091 for(typename GRAPH::NodeIt node(graph); node != lemon::INVALID; ++node)
1092 {
1093 labelMap[*node] = mergeGraph.reprNodeId(graph.id(*node));
1094 }
7941095 }
7951096
1097 //@}
1098
1099 } // namespace vigra
1100
7961101 #endif // VIGRA_HIERARCHICAL_CLUSTERING_HXX
156156
157157 void getBinCenters(ArrayVector<DataType> * centers) const
158158 {
159 double invScale = 1.0 / scale_;
160159 for(int k=0; k < size_; ++k)
161160 {
162161 (*centers)[k] = mapItemInverse(k + 0.5) ;
347346
348347 public:
349348 Histogram(DataType const & min, DataType const & max, int binCount,
350 BinType * bins = 0, int stride = 1)
349 BinType * /*bins*/ = 0, int /*stride*/ = 1)
351350 : BaseType(min, max, binCount),
352351 data_(binCount)
353352 {
762762 /** swap contents of this array with the contents of other
763763 (STL-Container interface)
764764 */
765 void swap(const ImagePyramid<ImageType, Alloc> &other)
765 void swap(ImagePyramid<ImageType, Alloc> &other)
766766 {
767767 images_.swap(other.images_);
768768 std::swap(lowestLevel_, other.lowestLevel_);
7878
7979 cumulativeSum(array, intarray, 0, f);
8080
81 for(int axis=1; axis < N; ++axis)
81 for(unsigned axis=1; axis < N; ++axis)
8282 cumulativeSum(intarray, intarray, axis, functor::Identity());
8383 }
8484
706706 /* */
707707 /********************************************************/
708708
709
710 enum EdgeImageLabelPolicy { CopyRegionLabels, EdgeOverlayOnly };
711
712
709713 /** \brief Transform a labeled image into a crack edge (interpixel edge) image.
710714
711715 <b> Declarations:</b>
718722 void
719723 regionImageToCrackEdgeImage(MultiArrayView<2, T1, S1> const & src,
720724 MultiArrayView<2, T2, S2> dest,
721 DestValue edge_marker);
725 DestValue edge_marker,
726 EdgeImageLabelPolicy labelPolicy = CopyRegionLabels);
722727 }
723728 \endcode
724729
731736 void regionImageToCrackEdgeImage(
732737 SrcIterator sul, SrcIterator slr, SrcAccessor sa,
733738 DestIterator dul, DestAccessor da,
734 DestValue edge_marker)
739 DestValue edge_marker,
740 EdgeImageLabelPolicy labelPolicy = CopyRegionLabels)
735741 }
736742 \endcode
737743 use argument objects in conjunction with \ref ArgumentObjectFactories :
742748 void regionImageToCrackEdgeImage(
743749 triple<SrcIterator, SrcIterator, SrcAccessor> src,
744750 pair<DestIterator, DestAccessor> dest,
745 DestValue edge_marker)
751 DestValue edge_marker,
752 EdgeImageLabelPolicy labelPolicy = CopyRegionLabels)
746753 }
747754 \endcode
748755 \deprecatedEnd
749756
750 This algorithm inserts border pixels (so called "crack edges" or "interpixel edges")
751 between regions in a labeled image like this (<TT>a</TT> and
752 <TT>c</TT> are the original labels, and <TT>0</TT> is the value of
753 <TT>edge_marker</TT> and denotes the inserted edges):
754
755 \code
756 original image insert zero- and one-cells
757 The destination image must be twice the size of the input image (precisely,
758 <TT>(2*w-1)</TT> by <TT>(2*h-1)</TT> pixels) to have space for the so called
759 "crack edges" or "interpixel edges" which are logically situated between pixels
760 (at half-integer coordinates of the input image) and correspond to the odd-valued
761 coordinates in the result image (see \ref CrackEdgeImage for more details).
762
763 When <tt>labelPolicy == CopyRegionLabels</tt> (the default), this algorithm
764 transfers the labels of a labeled image to the output image (repeating them
765 as appropriate to account for the output image size) and inserts border pixels
766 when the label changes. For example, if <TT>a</TT> and <TT>c</TT> are the
767 original labels, and <TT>0</TT> is the value of <TT>edge_marker</TT>, the
768 transformation looks like this:
769
770 \code
771 original image copy labels and insert edges
757772
758773 a 0 c c c
759774 a c c a 0 0 0 c
762777 a a a a a
763778 \endcode
764779
780 When <tt>labelPolicy == EdgeOverlayOnly</tt>, the region pixels of the output
781 image remain untouched, and only the edge marker is inserted. This is especially
782 useful for visualization, when the output is the interpolated original image:
783 \code
784 original image destination image overlay edges only
785
786 d d d d d d 0 d d d
787 a c c d d d d d d 0 0 0 d
788 a a c + d d d d d => d d d 0 d
789 a a a d d d d d d d d 0 0
790 d d d d d d d d d d
791 \endcode
792
765793 The algorithm assumes that the original labeled image contains
766794 no background. Therefore, it is suitable as a post-processing
767795 operation of \ref labelImage() or \ref seededRegionGrowing().
768796
769 The destination image must be twice the size of the original
770 (precisely, <TT>(2*w-1)</TT> by <TT>(2*h-1)</TT> pixels). The
771 source value type (<TT>SrcAccessor::value-type</TT>) must be
797 The source value type (<TT>SrcAccessor::value-type</TT>) must be
772798 equality-comparable.
773799
774800 <b> Usage:</b>
840866 void regionImageToCrackEdgeImage(
841867 SrcIterator sul, SrcIterator slr, SrcAccessor sa,
842868 DestIterator dul, DestAccessor da,
843 DestValue edge_marker)
869 DestValue edge_marker,
870 EdgeImageLabelPolicy labelPolicy = CopyRegionLabels)
844871 {
845872 int w = slr.x - sul.x;
846873 int h = slr.y - sul.y;
862889
863890 for(x=0; x<w-1; ++x, ++ix.x, dx.x+=2)
864891 {
892 if(labelPolicy == CopyRegionLabels)
893 {
894 da.set(sa(ix), dx);
895 da.set(sa(ix), dx, bottomright);
896 }
897
898 if(sa(ix, right) != sa(ix))
899 {
900 da.set(edge_marker, dx, right);
901 }
902 else if(labelPolicy == CopyRegionLabels)
903 {
904 da.set(sa(ix), dx, right);
905 }
906 if(sa(ix, bottom) != sa(ix))
907 {
908 da.set(edge_marker, dx, bottom);
909 }
910 else if(labelPolicy == CopyRegionLabels)
911 {
912 da.set(sa(ix), dx, bottom);
913 }
914
915 }
916
917 if(labelPolicy == CopyRegionLabels)
918 {
865919 da.set(sa(ix), dx);
866 da.set(sa(ix), dx, bottomright);
867
868 if(sa(ix, right) != sa(ix))
869 {
870 da.set(edge_marker, dx, right);
871 }
872 else
873 {
874 da.set(sa(ix), dx, right);
875 }
876 if(sa(ix, bottom) != sa(ix))
877 {
878 da.set(edge_marker, dx, bottom);
879 }
880 else
881 {
882 da.set(sa(ix), dx, bottom);
883 }
884
885 }
886
887 da.set(sa(ix), dx);
920 }
888921 if(sa(ix, bottom) != sa(ix))
889922 {
890923 da.set(edge_marker, dx, bottom);
891924 }
892 else
925 else if(labelPolicy == CopyRegionLabels)
893926 {
894927 da.set(sa(ix), dx, bottom);
895928 }
900933
901934 for(x=0; x<w-1; ++x, ++ix.x, dx.x+=2)
902935 {
936 if(labelPolicy == CopyRegionLabels)
937 {
938 da.set(sa(ix), dx);
939 }
940 if(sa(ix, right) != sa(ix))
941 {
942 da.set(edge_marker, dx, right);
943 }
944 else if(labelPolicy == CopyRegionLabels)
945 {
946 da.set(sa(ix), dx, right);
947 }
948 }
949 if(labelPolicy == CopyRegionLabels)
950 {
903951 da.set(sa(ix), dx);
904 if(sa(ix, right) != sa(ix))
905 {
906 da.set(edge_marker, dx, right);
907 }
908 else
909 {
910 da.set(sa(ix), dx, right);
911 }
912 }
913 da.set(sa(ix), dx);
914
952 }
915953 dy = dul + Diff2D(1,1);
916954
917955 const Diff2D dist[] = {right, top, left, bottom };
938976 inline void
939977 regionImageToCrackEdgeImage(triple<SrcIterator, SrcIterator, SrcAccessor> src,
940978 pair<DestIterator, DestAccessor> dest,
941 DestValue edge_marker)
979 DestValue edge_marker,
980 EdgeImageLabelPolicy labelPolicy = CopyRegionLabels)
942981 {
943982 regionImageToCrackEdgeImage(src.first, src.second, src.third,
944983 dest.first, dest.second,
945 edge_marker);
984 edge_marker, labelPolicy);
946985 }
947986
948987 template <class T1, class S1,
950989 inline void
951990 regionImageToCrackEdgeImage(MultiArrayView<2, T1, S1> const & src,
952991 MultiArrayView<2, T2, S2> dest,
953 DestValue edge_marker)
992 DestValue edge_marker,
993 EdgeImageLabelPolicy labelPolicy = CopyRegionLabels)
954994 {
955995 vigra_precondition(2*src.shape()-Shape2(1) == dest.shape(),
956996 "regionImageToCrackEdgeImage(): shape mismatch between input and output.");
957997 regionImageToCrackEdgeImage(srcImageRange(src),
958998 destImage(dest),
959 edge_marker);
999 edge_marker,
1000 labelPolicy);
9601001 }
9611002
9621003 /********************************************************/
5858
5959 MultiArrayIndex m = rowCount(a), n = columnCount(a);
6060 vigra_precondition(n == m,
61 "determinant(): square matrix required.");
62
61 "determinantByLUDecomposition(): square matrix required.");
62 vigra_precondition(NumericTraits<T>::isIntegral::value == false,
63 "determinantByLUDecomposition(): Input matrix must not be an integral type.");
64
6365 Matrix<T> LU(a);
6466 T det = 1.0;
6567
9092 break; // det is zero
9193 }
9294 return det;
95 }
96
97 template <class T, class C1>
98 typename NumericTraits<T>::Promote
99 determinantByMinors(MultiArrayView<2, T, C1> const & mat)
100 {
101 typedef typename NumericTraits<T>::Promote PromoteType;
102 MultiArrayIndex m = rowCount(mat);
103 MultiArrayIndex n = columnCount(mat);
104 vigra_precondition(
105 n == m,
106 "determinantByMinors(): square matrix required.");
107 vigra_precondition(
108 NumericTraits<PromoteType>::isSigned::value,
109 "determinantByMinors(): promote type must be signed.");
110 if (m == 1)
111 {
112 return mat(0, 0);
113 }
114 else
115 {
116 Matrix<T> minor_mat(Shape2(m-1, n-1));
117 PromoteType det = NumericTraits<PromoteType>::zero();
118 for (MultiArrayIndex i = 0; i < m; i++)
119 {
120 for (MultiArrayIndex j = 0, jj = 0; j < (m - 1); j++, jj++)
121 {
122 if (j == i)
123 {
124 jj++;
125 }
126 rowVector(minor_mat, j) = rowVector(mat, Shape2(jj, 1), m);
127 }
128 const PromoteType sign = 1 - 2 * (i % 2);
129 det += sign * mat(i, 0) * determinantByMinors(minor_mat);
130 }
131 return det;
132 }
93133 }
94134
95135 // returns the new value of 'a' (when this Givens rotation is applied to 'a' and 'b')
800840
801841 \a method must be one of the following:
802842 <DL>
843 <DT>"default"<DD> Use "minor" for integral types and "LU" for any other.
803844 <DT>"Cholesky"<DD> Compute the solution by means of Cholesky decomposition. This
804845 method is faster than "LU", but requires the matrix \a a
805846 to be symmetric positive definite. If this is
806847 not the case, a <tt>ContractViolation</tt> exception is thrown.
807
808 <DT>"LU"<DD> (default) Compute the solution by means of LU decomposition.
848 <DT>"LU"<DD> Compute the solution by means of LU decomposition.
849 <DT>"minor"<DD> Compute the solution by means of determinants of minors.
809850 </DL>
810851
811852 <b>\#include</b> \<vigra/linear_solve.hxx\> or<br>
813854 Namespaces: vigra and vigra::linalg
814855 */
815856 template <class T, class C1>
816 T determinant(MultiArrayView<2, T, C1> const & a, std::string method = "LU")
817 {
857 typename NumericTraits<T>::Promote
858 determinant(MultiArrayView<2, T, C1> const & a, std::string method = "default")
859 {
860 typedef typename NumericTraits<T>::Promote PromoteType;
818861 MultiArrayIndex n = columnCount(a);
819862 vigra_precondition(rowCount(a) == n,
820863 "determinant(): Square matrix required.");
821864
822865 method = tolower(method);
823
866
867 if(method == "default")
868 {
869 method = NumericTraits<T>::isIntegral::value ? "minor" : "lu";
870 }
824871 if(n == 1)
825872 return a(0,0);
826873 if(n == 2)
828875 if(method == "lu")
829876 {
830877 return detail::determinantByLUDecomposition(a);
878 }
879 else if(method == "minor")
880 {
881 return detail::determinantByMinors(a);
831882 }
832883 else if(method == "cholesky")
833884 {
843894 {
844895 vigra_precondition(false, "determinant(): Unknown solution method.");
845896 }
846 return T();
897 return PromoteType();
847898 }
848899
849900 /** Compute the logarithm of the determinant of a symmetric positive definite matrix.
908959 MultiArrayView<2, T, C2> &L)
909960 {
910961 MultiArrayIndex n = columnCount(A);
962 vigra_precondition(NumericTraits<T>::isIntegral::value == false,
963 "choleskyDecomposition(): Input matrix must not be an integral type.");
911964 vigra_precondition(rowCount(A) == n,
912965 "choleskyDecomposition(): Input matrix must be square.");
913966 vigra_precondition(n == columnCount(L) && n == rowCount(L),
179179 localMinMax3D(SrcIterator sul, SrcShape shp, SrcAccessor sa,
180180 DestIterator dul, DestAccessor da,
181181 DestValue marker,
182 Neighborhood neighborhood,
182 Neighborhood,
183183 typename SrcAccessor::value_type threshold,
184184 Compare compare,
185185 bool allowExtremaAtBorder = false)
348348 DestValue marker,
349349 Neighborhood neighbourhood,
350350 Compare compare,
351 Equal equal,
351 Equal,
352352 typename SrcAccessor::value_type threshold,
353353 bool allowExtremaAtBorder = false)
354354 {
162162
163163 using std::isinf;
164164 using std::isnan;
165 using std::isfinite;
165166
166167 #else
167168
168169 template <class REAL>
169170 inline bool isinf(REAL v)
170171 {
171 return _finite(v) == 0;
172 return _finite(v) == 0 && _isnan(v) == 0;
172173 }
173174
174175 template <class REAL>
175176 inline bool isnan(REAL v)
176177 {
177178 return _isnan(v) != 0;
179 }
180
181 template <class REAL>
182 inline bool isfinite(REAL v)
183 {
184 return _finite(v) != 0;
178185 }
179186
180187 #endif
264271 : (long long)(t - 0.5);
265272 }
266273
274 /** \brief Determine whether x is a power of 2
275 Bit twiddle from https://graphics.stanford.edu/~seander/bithacks.html#DetermineIfPowerOf2
276 */
277 inline bool isPower2(UInt32 x)
278 {
279 return x && !(x & (x - 1));
280 }
281
282
267283 /** \brief Round up to the nearest power of 2.
268284
269285 Efficient algorithm for finding the smallest power of 2 which is not smaller than \a x
286302 x = x | (x >>16);
287303 return x + 1;
288304 }
305
289306
290307 /** \brief Round down to the nearest power of 2.
291308
727744 This uses a numerically stable version of the analytical eigenvalue formula according to
728745 <p>
729746 David Eberly: <a href="http://www.geometrictools.com/Documentation/EigenSymmetric3x3.pdf">
730 <em>"Eigensystems for 3 × 3 Symmetric Matrices (Revisited)"</em></a>, Geometric Tools Documentation, 2006
747 <em>"Eigensystems for 3 x 3 Symmetric Matrices (Revisited)"</em></a>, Geometric Tools Documentation, 2006
731748
732749 <b>\#include</b> \<vigra/mathutil.hxx\><br>
733750 Namespace: vigra
17251742 }
17261743
17271744
1745 #ifdef __GNUC__
1746 #pragma GCC diagnostic push
1747 #pragma GCC diagnostic ignored "-Wtype-limits"
1748 #endif
1749
17281750 VIGRA_MATH_FUNC_HELPER(unsigned char)
17291751 VIGRA_MATH_FUNC_HELPER(unsigned short)
17301752 VIGRA_MATH_FUNC_HELPER(unsigned int)
17391761 VIGRA_MATH_FUNC_HELPER(double)
17401762 VIGRA_MATH_FUNC_HELPER(long double)
17411763
1742
1764 #ifdef __GNUC__
1765 #pragma GCC diagnostic pop
1766 #endif
17431767
17441768 #undef VIGRA_MATH_FUNC_HELPER
17451769
5454
5555 namespace matlab {
5656
57 /*++++++++++++++++++++++++++HELPERFUNC+++++++++++++++++++++++++++++++*
58 * This is used for better readability of the test cases .
59 * Nothing to be done here.
60 *+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
61 int cantorPair(int x, int y){
62 return (int)(((x+y)*(x+y+1))/2+y);
63 }
64
65 int cantorPair(int x, int y, int z){
66 return cantorPair(cantorPair(x,y),z);
67 }
68
69 template <int x, int y>
70 struct cP{
71 enum { value = (int)(((x+y)*(x+y+1))/2+y)};
72 };
73
74 template <int x, int y, int z>
75 struct cP3{
76 enum { value = cP<cP<x, y>::value, z>::value};
77 };
78
79 template <class T>
80 inline bool is_in_range(T in, T min, T max)
81 {
82 return (in >= min && in <= max);
83 }
84 template<class T>
85 inline bool is_in_range(T in, std::string min, T max)
86 {
87 return(in <= max);
88 }
89
90 template<class T>
91 inline bool is_in_range(T in, T min, std::string max)
92 {
93 return (in >= min);
94 }
95
5796 template <class T>
5897 struct ValueType;
5998
10831122 using namespace vigra;
10841123
10851124
1086 /*++++++++++++++++++++++++++HELPERFUNC+++++++++++++++++++++++++++++++*
1087 * This is used for better readability of the test cases .
1088 * Nothing to be done here.
1089 *+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
1090 int cantorPair(int x, int y){
1091 return (int)(((x+y)*(x+y+1))/2+y);
1092 }
1093
1094 int cantorPair(int x, int y, int z){
1095 return cantorPair(cantorPair(x,y),z);
1096 }
1097
1098 template <int x, int y>
1099 struct cP{
1100 enum { value = (int)(((x+y)*(x+y+1))/2+y)};
1101 };
1102
1103 template <int x, int y, int z>
1104 struct cP3{
1105 enum { value = cP<cP<x, y>::value, z>::value};
1106 };
1107
1108 template <class T>
1109 inline bool is_in_range(T in, T min, T max)
1110 {
1111 return (in >= min && in <= max);
1112 }
1113 template<class T>
1114 inline bool is_in_range(T in, std::string min, T max)
1115 {
1116 return(in <= max);
1117 }
1118
1119 template<class T>
1120 inline bool is_in_range(T in, T min, std::string max)
1121 {
1122 return (in >= min);
1123 }
1124
1125
1126
11271125 //Wrapper classes to STL-Map for use as a sparse array.
11281126
11291127 //This is used for the ordering of the map. Lexicographical ordering of the index pairs.
5555
5656 /** \ingroup LinearAlgebraModule
5757
58 \brief Linear algebra functions.
59
5860 Namespace <tt>vigra/linalg</tt> hold VIGRA's linear algebra functionality. But most of its contents
5961 is exported into namespace <tt>vigra</tt> via <tt>using</tt> directives.
6062 */
6264 {
6365
6466 template <class T, class C>
65 inline MultiArrayIndex
67 inline MultiArrayIndex
6668 rowCount(const MultiArrayView<2, T, C> &x);
6769
6870 template <class T, class C>
69 inline MultiArrayIndex
71 inline MultiArrayIndex
7072 columnCount(const MultiArrayView<2, T, C> &x);
7173
7274 template <class T, class C>
9496 /* */
9597 /********************************************************/
9698
97 /** Matrix class.
98
99 \ingroup LinearAlgebraModule
99 /** Matrix class.
100
101 \ingroup LinearAlgebraModule
100102
101103 This is the basic class for all linear algebra computations. Matrices are
102104 stored in a <i>column-major</i> format, i.e. the row index is varying fastest.
397399 vigra::FindSum<T>() );
398400 return result;
399401 }
400
402
401403 /** sums over dimension \a d of the matrix.
402404 */
403405 TemporaryMatrix<T> sum(difference_type_1 d) const
420422 vigra::FindAverage<T>() );
421423 return result;
422424 }
423
425
424426 /** calculates mean over dimension \a d of the matrix.
425427 */
426428 TemporaryMatrix<T> mean(difference_type_1 d) const
458460 typename NormTraits<Matrix>::NormType norm() const;
459461
460462 /** create a transposed view of this matrix.
461 No data are copied. If you want to transpose this matrix permanently,
463 No data are copied. If you want to transpose this matrix permanently,
462464 you have to assign the transposed view:
463
465
464466 \code
465467 a = a.transpose();
466468 \endcode
585587 {
586588 this->swap(const_cast<TemporaryMatrix &>(rhs));
587589 }
588
590
589591 template <class U>
590592 TemporaryMatrix & init(const U & init)
591593 {
652654 /** \defgroup LinearAlgebraFunctions Matrix Functions
653655
654656 \brief Basic matrix algebra, element-wise mathematical functions, row and columns statistics, data normalization etc.
655
657
656658 \ingroup LinearAlgebraModule
657659 */
658660 //@{
664666 Namespaces: vigra and vigra::linalg
665667 */
666668 template <class T, class C>
667 inline MultiArrayIndex
669 inline MultiArrayIndex
668670 rowCount(const MultiArrayView<2, T, C> &x)
669671 {
670672 return x.shape(0);
677679 Namespaces: vigra and vigra::linalg
678680 */
679681 template <class T, class C>
680 inline MultiArrayIndex
682 inline MultiArrayIndex
681683 columnCount(const MultiArrayView<2, T, C> &x)
682684 {
683685 return x.shape(1);
698700 }
699701
700702
701 /** Create a row vector view of the matrix \a m starting at element \a first and ranging
703 /** Create a row vector view of the matrix \a m starting at element \a first and ranging
702704 to column \a end (non-inclusive).
703705
704706 <b>\#include</b> \<vigra/matrix.hxx\> or<br>
727729 return m.subarray(Shape(0, d), Shape(rowCount(m), d+1));
728730 }
729731
730 /** Create a column vector view of the matrix \a m starting at element \a first and
732 /** Create a column vector view of the matrix \a m starting at element \a first and
731733 ranging to row \a end (non-inclusive).
732734
733735 <b>\#include</b> \<vigra/matrix.hxx\> or<br>
742744 return m.subarray(first, Shape(end, first[1]+1));
743745 }
744746
745 /** Create a sub vector view of the vector \a m starting at element \a first and
747 /** Create a sub vector view of the vector \a m starting at element \a first and
746748 ranging to row \a end (non-inclusive).
747
749
748750 Note: This function may only be called when either <tt>rowCount(m) == 1</tt> or
749 <tt>columnCount(m) == 1</tt>, i.e. when \a m really represents a vector.
751 <tt>columnCount(m) == 1</tt>, i.e. when \a m really represents a vector.
750752 Otherwise, a PreconditionViolation exception is raised.
751753
752754 <b>\#include</b> \<vigra/matrix.hxx\> or<br>
760762 typedef typename MultiArrayView <2, T, C>::difference_type Shape;
761763 if(columnCount(m) == 1)
762764 return m.subarray(Shape(first, 0), Shape(end, 1));
763 vigra_precondition(rowCount(m) == 1,
765 vigra_precondition(rowCount(m) == 1,
764766 "linalg::subVector(): Input must be a vector (1xN or Nx1).");
765767 return m.subarray(Shape(0, first), Shape(1, end));
766768 }
798800 trace(MultiArrayView<2, T, C> const & m)
799801 {
800802 typedef typename NumericTraits<T>::Promote SumType;
801
803
802804 const MultiArrayIndex size = rowCount(m);
803805 vigra_precondition(size == columnCount(m), "linalg::trace(): Matrix must be square.");
804806
971973 }
972974
973975 /** create the transpose of matrix \a v.
974 This does not copy any data, but only creates a transposed view
976 This does not copy any data, but only creates a transposed view
975977 to the original matrix. A copy is only made when the transposed view
976978 is assigned to another matrix.
977979 Usage:
988990 Namespaces: vigra and vigra::linalg
989991 */
990992 template <class T, class C>
991 inline MultiArrayView<2, T, StridedArrayTag>
993 inline MultiArrayView<2, T, StridedArrayTag>
992994 transpose(MultiArrayView<2, T, C> const & v)
993995 {
994996 return v.transpose();
10071009 joinVertically(const MultiArrayView<2, T, C1> &a, const MultiArrayView<2, T, C2> &b)
10081010 {
10091011 typedef typename TemporaryMatrix<T>::difference_type Shape;
1010
1012
10111013 MultiArrayIndex n = columnCount(a);
10121014 vigra_precondition(n == columnCount(b),
10131015 "joinVertically(): shape mismatch.");
1014
1016
10151017 MultiArrayIndex ma = rowCount(a);
10161018 MultiArrayIndex mb = rowCount(b);
10171019 TemporaryMatrix<T> t(ma + mb, n, T());
10331035 joinHorizontally(const MultiArrayView<2, T, C1> &a, const MultiArrayView<2, T, C2> &b)
10341036 {
10351037 typedef typename TemporaryMatrix<T>::difference_type Shape;
1036
1038
10371039 MultiArrayIndex m = rowCount(a);
10381040 vigra_precondition(m == rowCount(b),
10391041 "joinHorizontally(): shape mismatch.");
1040
1042
10411043 MultiArrayIndex na = columnCount(a);
10421044 MultiArrayIndex nb = columnCount(b);
10431045 TemporaryMatrix<T> t(m, na + nb, T());
10471049 }
10481050
10491051 /** Initialize a matrix with repeated copies of a given matrix.
1050
1052
10511053 Matrix \a r will consist of \a verticalCount downward repetitions of \a v,
10521054 and \a horizontalCount side-by-side repetitions. When \a v has size <tt>m</tt> by <tt>n</tt>,
10531055 \a r must have size <tt>(m*verticalCount)</tt> by <tt>(n*horizontalCount)</tt>.
10571059 Namespace: vigra::linalg
10581060 */
10591061 template <class T, class C1, class C2>
1060 void repeatMatrix(MultiArrayView<2, T, C1> const & v, MultiArrayView<2, T, C2> &r,
1062 void repeatMatrix(MultiArrayView<2, T, C1> const & v, MultiArrayView<2, T, C2> &r,
10611063 unsigned int verticalCount, unsigned int horizontalCount)
10621064 {
10631065 typedef typename Matrix<T>::difference_type Shape;
10651067 MultiArrayIndex m = rowCount(v), n = columnCount(v);
10661068 vigra_precondition(m*verticalCount == rowCount(r) && n*horizontalCount == columnCount(r),
10671069 "repeatMatrix(): Shape mismatch.");
1068
1070
10691071 for(MultiArrayIndex l=0; l<static_cast<MultiArrayIndex>(horizontalCount); ++l)
10701072 {
10711073 for(MultiArrayIndex k=0; k<static_cast<MultiArrayIndex>(verticalCount); ++k)
10761078 }
10771079
10781080 /** Create a new matrix by repeating a given matrix.
1079
1081
10801082 The resulting matrix \a r will consist of \a verticalCount downward repetitions of \a v,
1081 and \a horizontalCount side-by-side repetitions, i.e. it will be of size
1083 and \a horizontalCount side-by-side repetitions, i.e. it will be of size
10821084 <tt>(m*verticalCount)</tt> by <tt>(n*horizontalCount)</tt> when \a v has size <tt>m</tt> by <tt>n</tt>.
10831085 The result is returned as a temporary matrix.
10841086
10871089 Namespace: vigra::linalg
10881090 */
10891091 template <class T, class C>
1090 TemporaryMatrix<T>
1092 TemporaryMatrix<T>
10911093 repeatMatrix(MultiArrayView<2, T, C> const & v, unsigned int verticalCount, unsigned int horizontalCount)
10921094 {
10931095 MultiArrayIndex m = rowCount(v), n = columnCount(v);
13251327 /** calculate the inner product of two matrices representing vectors.
13261328 Typically, matrix \a x has a single row, and matrix \a y has
13271329 a single column, and the other dimensions match. In addition, this
1328 function handles the cases when either or both of the two inputs are
1329 transposed (e.g. it can compute the dot product of two column vectors).
1330 function handles the cases when either or both of the two inputs are
1331 transposed (e.g. it can compute the dot product of two column vectors).
13301332 A <tt>PreconditionViolation</tt> exception is thrown when
1331 the shape conditions are violated.
1333 the shape conditions are violated.
13321334
13331335 <b>\#include</b> \<vigra/matrix.hxx\> or<br>
13341336 <b>\#include</b> \<vigra/linear_algebra.hxx\><br>
13351337 Namespaces: vigra and vigra::linalg
13361338 */
13371339 template <class T, class C1, class C2>
1338 typename NormTraits<T>::SquaredNormType
1340 typename NormTraits<T>::SquaredNormType
13391341 dot(const MultiArrayView<2, T, C1> &x, const MultiArrayView<2, T, C2> &y)
13401342 {
1341 typename NormTraits<T>::SquaredNormType ret =
1343 typename NormTraits<T>::SquaredNormType ret =
13421344 NumericTraits<typename NormTraits<T>::SquaredNormType>::zero();
13431345 if(y.shape(1) == 1)
13441346 {
13491351 else if(x.shape(1) == 1u && x.shape(0) == size) // two column vectors
13501352 for(std::ptrdiff_t i = 0; i < size; ++i)
13511353 ret += x(i, 0) * y(i, 0);
1352 else
1354 else
13531355 vigra_precondition(false, "dot(): wrong matrix shapes.");
13541356 }
13551357 else if(y.shape(0) == 1)
13611363 else if(x.shape(1) == 1u && x.shape(0) == size) // column dot row
13621364 for(std::ptrdiff_t i = 0; i < size; ++i)
13631365 ret += x(i, 0) * y(0, i);
1364 else
1366 else
13651367 vigra_precondition(false, "dot(): wrong matrix shapes.");
13661368 }
13671369 else
13771379 Namespaces: vigra and vigra::linalg
13781380 */
13791381 template <class T, class C1, class C2>
1380 typename NormTraits<T>::SquaredNormType
1382 typename NormTraits<T>::SquaredNormType
13811383 dot(const MultiArrayView<1, T, C1> &x, const MultiArrayView<1, T, C2> &y)
13821384 {
13831385 const MultiArrayIndex n = x.elementCount();
13841386 vigra_precondition(n == y.elementCount(),
13851387 "dot(): shape mismatch.");
1386 typename NormTraits<T>::SquaredNormType ret =
1388 typename NormTraits<T>::SquaredNormType ret =
13871389 NumericTraits<typename NormTraits<T>::SquaredNormType>::zero();
13881390 for(MultiArrayIndex i = 0; i < n; ++i)
13891391 ret += x(i) * y(i);
15451547 {
15461548 public:
15471549 T const & t;
1548
1550
15491551 PointWise(T const & it)
15501552 : t(it)
15511553 {}
16091611 "mmul(): Matrix shapes must agree.");
16101612
16111613 // order of loops ensures that inner loop goes down columns
1612 for(MultiArrayIndex i = 0; i < rcols; ++i)
1613 {
1614 for(MultiArrayIndex j = 0; j < rrows; ++j)
1614 for(MultiArrayIndex i = 0; i < rcols; ++i)
1615 {
1616 for(MultiArrayIndex j = 0; j < rrows; ++j)
16151617 r(j, i) = a(j, 0) * b(0, i);
1616 for(MultiArrayIndex k = 1; k < acols; ++k)
1617 for(MultiArrayIndex j = 0; j < rrows; ++j)
1618 for(MultiArrayIndex k = 1; k < acols; ++k)
1619 for(MultiArrayIndex j = 0; j < rrows; ++j)
16181620 r(j, i) += a(j, k) * b(k, i);
16191621 }
16201622 }
16801682 /** multiply matrices \a a and \a b pointwise.
16811683 \a a and \a b must have matching shapes.
16821684 The result is returned as a temporary matrix.
1683
1685
16841686 Usage:
1685
1687
16861688 \code
16871689 Matrix<double> a(m,n), b(m,n);
1688
1690
16891691 Matrix<double> c = a * pointWise(b);
16901692 // is equivalent to
16911693 // Matrix<double> c = pmul(a, b);
18671869 /** divide matrices \a a and \a b pointwise.
18681870 \a a and \a b must have matching shapes.
18691871 The result is returned as a temporary matrix.
1870
1872
18711873 Usage:
1872
1874
18731875 \code
18741876 Matrix<double> a(m,n), b(m,n);
1875
1877
18761878 Matrix<double> c = a / pointWise(b);
18771879 // is equivalent to
18781880 // Matrix<double> c = pdiv(a, b);
19301932 using vigra::argMaxIf;
19311933
19321934 /** \brief Find the index of the minimum element in a matrix.
1933
1935
19341936 The function returns the index in column-major scan-order sense,
19351937 i.e. according to the order used by <tt>MultiArrayView::operator[]</tt>.
19361938 If the matrix is actually a vector, this is just the row or columns index.
1937 In case of a truly 2-dimensional matrix, the index can be converted to an
1939 In case of a truly 2-dimensional matrix, the index can be converted to an
19381940 index pair by calling <tt>MultiArrayView::scanOrderIndexToCoordinate()</tt>
1939
1941
19401942 <b>Required Interface:</b>
1941
1943
19421944 \code
19431945 bool f = a[0] < NumericTraits<T>::max();
19441946 \endcode
19631965 }
19641966
19651967 /** \brief Find the index of the maximum element in a matrix.
1966
1968
19671969 The function returns the index in column-major scan-order sense,
19681970 i.e. according to the order used by <tt>MultiArrayView::operator[]</tt>.
19691971 If the matrix is actually a vector, this is just the row or columns index.
1970 In case of a truly 2-dimensional matrix, the index can be converted to an
1972 In case of a truly 2-dimensional matrix, the index can be converted to an
19711973 index pair by calling <tt>MultiArrayView::scanOrderIndexToCoordinate()</tt>
1972
1974
19731975 <b>Required Interface:</b>
1974
1976
19751977 \code
19761978 bool f = NumericTraits<T>::min() < a[0];
19771979 \endcode
19961998 }
19971999
19982000 /** \brief Find the index of the minimum element in a matrix subject to a condition.
1999
2001
20002002 The function returns <tt>-1</tt> if no element conforms to \a condition.
20012003 Otherwise, the index of the maximum element is returned in column-major scan-order sense,
20022004 i.e. according to the order used by <tt>MultiArrayView::operator[]</tt>.
20032005 If the matrix is actually a vector, this is just the row or columns index.
2004 In case of a truly 2-dimensional matrix, the index can be converted to an
2006 In case of a truly 2-dimensional matrix, the index can be converted to an
20052007 index pair by calling <tt>MultiArrayView::scanOrderIndexToCoordinate()</tt>
2006
2008
20072009 <b>Required Interface:</b>
2008
2010
20092011 \code
20102012 bool c = condition(a[0]);
20112013 bool f = a[0] < NumericTraits<T>::max();
20312033 }
20322034
20332035 /** \brief Find the index of the maximum element in a matrix subject to a condition.
2034
2036
20352037 The function returns <tt>-1</tt> if no element conforms to \a condition.
20362038 Otherwise, the index of the maximum element is returned in column-major scan-order sense,
20372039 i.e. according to the order used by <tt>MultiArrayView::operator[]</tt>.
20382040 If the matrix is actually a vector, this is just the row or columns index.
2039 In case of a truly 2-dimensional matrix, the index can be converted to an
2041 In case of a truly 2-dimensional matrix, the index can be converted to an
20402042 index pair by calling <tt>MultiArrayView::scanOrderIndexToCoordinate()</tt>
2041
2043
20422044 <b>Required Interface:</b>
2043
2045
20442046 \code
20452047 bool c = condition(a[0]);
20462048 bool f = NumericTraits<T>::min() < a[0];
23472349
23482350 template <class T1, class C1, class T2, class C2, class T3, class C3>
23492351 void
2350 columnStatisticsImpl(MultiArrayView<2, T1, C1> const & A,
2352 columnStatisticsImpl(MultiArrayView<2, T1, C1> const & A,
23512353 MultiArrayView<2, T2, C2> & mean, MultiArrayView<2, T3, C3> & sumOfSquaredDifferences)
23522354 {
23532355 MultiArrayIndex m = rowCount(A);
23592361 // West's algorithm for incremental variance computation
23602362 mean.init(NumericTraits<T2>::zero());
23612363 sumOfSquaredDifferences.init(NumericTraits<T3>::zero());
2362
2364
23632365 for(MultiArrayIndex k=0; k<m; ++k)
23642366 {
23652367 typedef typename NumericTraits<T2>::RealPromote TmpType;
23732375
23742376 template <class T1, class C1, class T2, class C2, class T3, class C3>
23752377 void
2376 columnStatistics2PassImpl(MultiArrayView<2, T1, C1> const & A,
2378 columnStatistics2PassImpl(MultiArrayView<2, T1, C1> const & A,
23772379 MultiArrayView<2, T2, C2> & mean, MultiArrayView<2, T3, C3> & sumOfSquaredDifferences)
23782380 {
23792381 MultiArrayIndex m = rowCount(A);
23832385 "columnStatistics(): Shape mismatch between input and output.");
23842386
23852387 // two-pass algorithm for incremental variance computation
2386 mean.init(NumericTraits<T2>::zero());
2388 mean.init(NumericTraits<T2>::zero());
23872389 for(MultiArrayIndex k=0; k<m; ++k)
23882390 {
23892391 mean += rowVector(A, k);
23902392 }
23912393 mean /= static_cast<double>(m);
2392
2394
23932395 sumOfSquaredDifferences.init(NumericTraits<T3>::zero());
23942396 for(MultiArrayIndex k=0; k<m; ++k)
23952397 {
24032405 */
24042406 //@{
24052407 /** Compute statistics of every column of matrix \a A.
2406
2408
24072409 The result matrices must be row vectors with as many columns as \a A.
24082410
24092411 <b> Declarations:</b>
24132415 namespace vigra { namespace linalg {
24142416 template <class T1, class C1, class T2, class C2>
24152417 void
2416 columnStatistics(MultiArrayView<2, T1, C1> const & A,
2418 columnStatistics(MultiArrayView<2, T1, C1> const & A,
24172419 MultiArrayView<2, T2, C2> & mean);
24182420 } }
24192421 \endcode
24232425 namespace vigra { namespace linalg {
24242426 template <class T1, class C1, class T2, class C2, class T3, class C3>
24252427 void
2426 columnStatistics(MultiArrayView<2, T1, C1> const & A,
2427 MultiArrayView<2, T2, C2> & mean,
2428 columnStatistics(MultiArrayView<2, T1, C1> const & A,
2429 MultiArrayView<2, T2, C2> & mean,
24282430 MultiArrayView<2, T3, C3> & stdDev);
24292431 } }
24302432 \endcode
24342436 namespace vigra { namespace linalg {
24352437 template <class T1, class C1, class T2, class C2, class T3, class C3, class T4, class C4>
24362438 void
2437 columnStatistics(MultiArrayView<2, T1, C1> const & A,
2438 MultiArrayView<2, T2, C2> & mean,
2439 MultiArrayView<2, T3, C3> & stdDev,
2439 columnStatistics(MultiArrayView<2, T1, C1> const & A,
2440 MultiArrayView<2, T2, C2> & mean,
2441 MultiArrayView<2, T3, C3> & stdDev,
24402442 MultiArrayView<2, T4, C4> & norm);
24412443 } }
24422444 \endcode
24602462
24612463 template <class T1, class C1, class T2, class C2>
24622464 void
2463 columnStatistics(MultiArrayView<2, T1, C1> const & A,
2465 columnStatistics(MultiArrayView<2, T1, C1> const & A,
24642466 MultiArrayView<2, T2, C2> & mean)
24652467 {
24662468 MultiArrayIndex m = rowCount(A);
24692471 "columnStatistics(): Shape mismatch between input and output.");
24702472
24712473 mean.init(NumericTraits<T2>::zero());
2472
2474
24732475 for(MultiArrayIndex k=0; k<m; ++k)
24742476 {
24752477 mean += rowVector(A, k);
24792481
24802482 template <class T1, class C1, class T2, class C2, class T3, class C3>
24812483 void
2482 columnStatistics(MultiArrayView<2, T1, C1> const & A,
2484 columnStatistics(MultiArrayView<2, T1, C1> const & A,
24832485 MultiArrayView<2, T2, C2> & mean, MultiArrayView<2, T3, C3> & stdDev)
24842486 {
24852487 detail::columnStatisticsImpl(A, mean, stdDev);
2486
2488
24872489 if(rowCount(A) > 1)
24882490 stdDev = sqrt(stdDev / T3(rowCount(A) - 1.0));
24892491 }
24902492
24912493 template <class T1, class C1, class T2, class C2, class T3, class C3, class T4, class C4>
24922494 void
2493 columnStatistics(MultiArrayView<2, T1, C1> const & A,
2495 columnStatistics(MultiArrayView<2, T1, C1> const & A,
24942496 MultiArrayView<2, T2, C2> & mean, MultiArrayView<2, T3, C3> & stdDev, MultiArrayView<2, T4, C4> & norm)
24952497 {
24962498 MultiArrayIndex m = rowCount(A);
25062508 }
25072509
25082510 /** Compute statistics of every row of matrix \a A.
2509
2511
25102512 The result matrices must be column vectors with as many rows as \a A.
25112513
25122514 <b> Declarations:</b>
25162518 namespace vigra { namespace linalg {
25172519 template <class T1, class C1, class T2, class C2>
25182520 void
2519 rowStatistics(MultiArrayView<2, T1, C1> const & A,
2521 rowStatistics(MultiArrayView<2, T1, C1> const & A,
25202522 MultiArrayView<2, T2, C2> & mean);
25212523 } }
25222524 \endcode
25262528 namespace vigra { namespace linalg {
25272529 template <class T1, class C1, class T2, class C2, class T3, class C3>
25282530 void
2529 rowStatistics(MultiArrayView<2, T1, C1> const & A,
2530 MultiArrayView<2, T2, C2> & mean,
2531 rowStatistics(MultiArrayView<2, T1, C1> const & A,
2532 MultiArrayView<2, T2, C2> & mean,
25312533 MultiArrayView<2, T3, C3> & stdDev);
25322534 } }
25332535 \endcode
25372539 namespace vigra { namespace linalg {
25382540 template <class T1, class C1, class T2, class C2, class T3, class C3, class T4, class C4>
25392541 void
2540 rowStatistics(MultiArrayView<2, T1, C1> const & A,
2541 MultiArrayView<2, T2, C2> & mean,
2542 MultiArrayView<2, T3, C3> & stdDev,
2542 rowStatistics(MultiArrayView<2, T1, C1> const & A,
2543 MultiArrayView<2, T2, C2> & mean,
2544 MultiArrayView<2, T3, C3> & stdDev,
25432545 MultiArrayView<2, T4, C4> & norm);
25442546 } }
25452547 \endcode
25632565
25642566 template <class T1, class C1, class T2, class C2>
25652567 void
2566 rowStatistics(MultiArrayView<2, T1, C1> const & A,
2568 rowStatistics(MultiArrayView<2, T1, C1> const & A,
25672569 MultiArrayView<2, T2, C2> & mean)
25682570 {
25692571 vigra_precondition(1 == columnCount(mean) && rowCount(A) == rowCount(mean),
25742576
25752577 template <class T1, class C1, class T2, class C2, class T3, class C3>
25762578 void
2577 rowStatistics(MultiArrayView<2, T1, C1> const & A,
2579 rowStatistics(MultiArrayView<2, T1, C1> const & A,
25782580 MultiArrayView<2, T2, C2> & mean, MultiArrayView<2, T3, C3> & stdDev)
25792581 {
25802582 vigra_precondition(1 == columnCount(mean) && rowCount(A) == rowCount(mean) &&
25872589
25882590 template <class T1, class C1, class T2, class C2, class T3, class C3, class T4, class C4>
25892591 void
2590 rowStatistics(MultiArrayView<2, T1, C1> const & A,
2592 rowStatistics(MultiArrayView<2, T1, C1> const & A,
25912593 MultiArrayView<2, T2, C2> & mean, MultiArrayView<2, T3, C3> & stdDev, MultiArrayView<2, T4, C4> & norm)
25922594 {
25932595 vigra_precondition(1 == columnCount(mean) && rowCount(A) == rowCount(mean) &&
25952597 1 == columnCount(norm) && rowCount(A) == rowCount(norm),
25962598 "rowStatistics(): Shape mismatch between input and output.");
25972599 MultiArrayView<2, T2, StridedArrayTag> tm = transpose(mean);
2598 MultiArrayView<2, T3, StridedArrayTag> ts = transpose(stdDev);
2600 MultiArrayView<2, T3, StridedArrayTag> ts = transpose(stdDev);
25992601 MultiArrayView<2, T4, StridedArrayTag> tn = transpose(norm);
26002602 columnStatistics(transpose(A), tm, ts, tn);
26012603 }
26132615 "updateCovarianceMatrix(): Shape mismatch between feature vector and mean vector.");
26142616 vigra_precondition(n == rowCount(covariance) && n == columnCount(covariance),
26152617 "updateCovarianceMatrix(): Shape mismatch between feature vector and covariance matrix.");
2616
2618
26172619 // West's algorithm for incremental covariance matrix computation
26182620 Matrix<T2> t = features - mean;
26192621 ++count;
26202622 T2 f = T2(1.0) / count,
26212623 f1 = T2(1.0) - f;
26222624 mean += f*t;
2623
2625
26242626 if(rowCount(features) == 1) // update column covariance from current row
26252627 {
26262628 for(MultiArrayIndex k=0; k<n; ++k)
26502652 } // namespace detail
26512653
26522654 /** \brief Compute the covariance matrix between the columns of a matrix \a features.
2653
2655
26542656 The result matrix \a covariance must by a square matrix with as many rows and
26552657 columns as the number of columns in matrix \a features.
26562658
26732675 }
26742676
26752677 /** \brief Compute the covariance matrix between the columns of a matrix \a features.
2676
2678
26772679 The result is returned as a square temporary matrix with as many rows and
26782680 columns as the number of columns in matrix \a features.
26792681
26812683 Namespace: vigra
26822684 */
26832685 template <class T, class C>
2684 TemporaryMatrix<T>
2686 TemporaryMatrix<T>
26852687 covarianceMatrixOfColumns(MultiArrayView<2, T, C> const & features)
26862688 {
26872689 TemporaryMatrix<T> res(columnCount(features), columnCount(features));
26902692 }
26912693
26922694 /** \brief Compute the covariance matrix between the rows of a matrix \a features.
2693
2695
26942696 The result matrix \a covariance must by a square matrix with as many rows and
26952697 columns as the number of rows in matrix \a features.
26962698
27132715 }
27142716
27152717 /** \brief Compute the covariance matrix between the rows of a matrix \a features.
2716
2718
27172719 The result is returned as a square temporary matrix with as many rows and
27182720 columns as the number of rows in matrix \a features.
27192721
27212723 Namespace: vigra
27222724 */
27232725 template <class T, class C>
2724 TemporaryMatrix<T>
2726 TemporaryMatrix<T>
27252727 covarianceMatrixOfRows(MultiArrayView<2, T, C> const & features)
27262728 {
27272729 TemporaryMatrix<T> res(rowCount(features), rowCount(features));
27402742
27412743 template <class T, class C1, class C2, class C3, class C4>
27422744 void
2743 prepareDataImpl(const MultiArrayView<2, T, C1> & A,
2744 MultiArrayView<2, T, C2> & res, MultiArrayView<2, T, C3> & offset, MultiArrayView<2, T, C4> & scaling,
2745 prepareDataImpl(const MultiArrayView<2, T, C1> & A,
2746 MultiArrayView<2, T, C2> & res, MultiArrayView<2, T, C3> & offset, MultiArrayView<2, T, C4> & scaling,
27452747 DataPreparationGoals goals)
27462748 {
27472749 MultiArrayIndex m = rowCount(A);
27482750 MultiArrayIndex n = columnCount(A);
2749 vigra_precondition(A.shape() == res.shape() &&
2751 vigra_precondition(A.shape() == res.shape() &&
27502752 n == columnCount(offset) && 1 == rowCount(offset) &&
27512753 n == columnCount(scaling) && 1 == rowCount(scaling),
27522754 "prepareDataImpl(): Shape mismatch between input and output.");
27582760 scaling.init(NumericTraits<T>::one());
27592761 return;
27602762 }
2761
2763
27622764 bool zeroMean = (goals & ZeroMean) != 0;
27632765 bool unitVariance = (goals & UnitVariance) != 0;
27642766 bool unitNorm = (goals & UnitNorm) != 0;
27682770 {
27692771 vigra_precondition(goals == UnitSum,
27702772 "prepareData(): Unit sum is not compatible with any other data preparation goal.");
2771
2773
27722774 transformMultiArray(srcMultiArrayRange(A), destMultiArrayRange(scaling), FindSum<T>());
2773
2775
27742776 offset.init(NumericTraits<T>::zero());
2775
2777
27762778 for(MultiArrayIndex k=0; k<n; ++k)
27772779 {
27782780 if(scaling(0, k) != NumericTraits<T>::zero())
27852787 scaling(0, k) = NumericTraits<T>::one();
27862788 }
27872789 }
2788
2789 return;
2790
2791 return;
27902792 }
27912793
27922794 vigra_precondition(!(unitVariance && unitNorm),
27942796
27952797 Matrix<T> mean(1, n), sumOfSquaredDifferences(1, n);
27962798 detail::columnStatisticsImpl(A, mean, sumOfSquaredDifferences);
2797
2799
27982800 for(MultiArrayIndex k=0; k<n; ++k)
27992801 {
28002802 T stdDev = std::sqrt(sumOfSquaredDifferences(0, k) / T(m-1));
28012803 if(closeAtTolerance(stdDev / mean(0,k), NumericTraits<T>::zero()))
28022804 stdDev = NumericTraits<T>::zero();
2803 if(zeroMean && stdDev > NumericTraits<T>::zero())
2805 if(zeroMean && stdDev > NumericTraits<T>::zero())
28042806 {
28052807 columnVector(res, k) = columnVector(A, k) - mean(0,k);
28062808 offset(0, k) = mean(0, k);
28072809 mean(0, k) = NumericTraits<T>::zero();
28082810 }
2809 else
2811 else
28102812 {
28112813 columnVector(res, k) = columnVector(A, k);
28122814 offset(0, k) = NumericTraits<T>::zero();
28132815 }
2814
2816
28152817 T norm = mean(0,k) == NumericTraits<T>::zero()
28162818 ? std::sqrt(sumOfSquaredDifferences(0, k))
28172819 : std::sqrt(sumOfSquaredDifferences(0, k) + T(m) * sq(mean(0,k)));
28352837 } // namespace detail
28362838
28372839 /** \brief Standardize the columns of a matrix according to given <tt>DataPreparationGoals</tt>.
2838
2839 For every column of the matrix \a A, this function computes mean,
2840 standard deviation, and norm. It then applies a linear transformation to the values of
2840
2841 For every column of the matrix \a A, this function computes mean,
2842 standard deviation, and norm. It then applies a linear transformation to the values of
28412843 the column according to these statistics and the given <tt>DataPreparationGoals</tt>.
28422844 The result is returned in matrix \a res which must have the same size as \a A.
28432845 Optionally, the transformation applied can also be returned in the matrices \a offset
28442846 and \a scaling (see below for an example how these matrices can be used to standardize
28452847 more data according to the same transformation).
2846
2848
28472849 The following <tt>DataPreparationGoals</tt> are supported:
2848
2850
28492851 <DL>
2850 <DT><tt>ZeroMean</tt><DD> Subtract the column mean form every column if the values in the column are not constant.
2852 <DT><tt>ZeroMean</tt><DD> Subtract the column mean form every column if the values in the column are not constant.
28512853 Do nothing in a constant column.
2852 <DT><tt>UnitSum</tt><DD> Scale the columns so that the their sum is one if the sum was initially non-zero.
2854 <DT><tt>UnitSum</tt><DD> Scale the columns so that the their sum is one if the sum was initially non-zero.
28532855 Do nothing in a zero-sum column.
2854 <DT><tt>UnitVariance</tt><DD> Divide by the column standard deviation if the values in the column are not constant.
2856 <DT><tt>UnitVariance</tt><DD> Divide by the column standard deviation if the values in the column are not constant.
28552857 Do nothing in a constant column.
28562858 <DT><tt>UnitNorm</tt><DD> Divide by the column norm if it is non-zero.
2857 <DT><tt>ZeroMean | UnitVariance</tt><DD> First subtract the mean and then divide by the standard deviation, unless the
2859 <DT><tt>ZeroMean | UnitVariance</tt><DD> First subtract the mean and then divide by the standard deviation, unless the
28582860 column is constant (in which case the column remains unchanged).
28592861 <DT><tt>ZeroMean | UnitNorm</tt><DD> If the column is non-constant, subtract the mean. Then divide by the norm
28602862 of the result if the norm is non-zero.
28682870 namespace vigra { namespace linalg {
28692871 template <class T, class C1, class C2, class C3, class C4>
28702872 void
2871 prepareColumns(MultiArrayView<2, T, C1> const & A,
2872 MultiArrayView<2, T, C2> & res,
2873 MultiArrayView<2, T, C3> & offset,
2874 MultiArrayView<2, T, C4> & scaling,
2873 prepareColumns(MultiArrayView<2, T, C1> const & A,
2874 MultiArrayView<2, T, C2> & res,
2875 MultiArrayView<2, T, C3> & offset,
2876 MultiArrayView<2, T, C4> & scaling,
28752877 DataPreparationGoals goals = ZeroMean | UnitVariance);
28762878 } }
28772879 \endcode
28812883 namespace vigra { namespace linalg {
28822884 template <class T, class C1, class C2>
28832885 void
2884 prepareColumns(MultiArrayView<2, T, C1> const & A,
2885 MultiArrayView<2, T, C2> & res,
2886 prepareColumns(MultiArrayView<2, T, C1> const & A,
2887 MultiArrayView<2, T, C2> & res,
28862888 DataPreparationGoals goals = ZeroMean | UnitVariance);
28872889 } }
28882890 \endcode
28992901 Matrix standardizedA(rows, columns), offset(1, columns), scaling(1, columns);
29002902
29012903 prepareColumns(A, standardizedA, offset, scaling, ZeroMean | UnitNorm);
2902
2904
29032905 // use offset and scaling to prepare additional data according to the same transformation
29042906 Matrix newData(nrows, columns);
2905
2907
29062908 Matrix standardizedNewData = (newData - repeatMatrix(offset, nrows, 1)) * pointWise(repeatMatrix(scaling, nrows, 1));
29072909
29082910 \endcode
29112913
29122914 template <class T, class C1, class C2, class C3, class C4>
29132915 inline void
2914 prepareColumns(MultiArrayView<2, T, C1> const & A,
2915 MultiArrayView<2, T, C2> & res, MultiArrayView<2, T, C3> & offset, MultiArrayView<2, T, C4> & scaling,
2916 prepareColumns(MultiArrayView<2, T, C1> const & A,
2917 MultiArrayView<2, T, C2> & res, MultiArrayView<2, T, C3> & offset, MultiArrayView<2, T, C4> & scaling,
29162918 DataPreparationGoals goals = ZeroMean | UnitVariance)
29172919 {
29182920 detail::prepareDataImpl(A, res, offset, scaling, goals);
29202922
29212923 template <class T, class C1, class C2>
29222924 inline void
2923 prepareColumns(MultiArrayView<2, T, C1> const & A, MultiArrayView<2, T, C2> & res,
2925 prepareColumns(MultiArrayView<2, T, C1> const & A, MultiArrayView<2, T, C2> & res,
29242926 DataPreparationGoals goals = ZeroMean | UnitVariance)
29252927 {
29262928 Matrix<T> offset(1, columnCount(A)), scaling(1, columnCount(A));
29282930 }
29292931
29302932 /** \brief Standardize the rows of a matrix according to given <tt>DataPreparationGoals</tt>.
2931
2933
29322934 This algorithm works in the same way as \ref prepareColumns() (see there for detailed
29332935 documentation), but is applied to the rows of the matrix \a A instead. Accordingly, the
29342936 matrices holding the parameters of the linear transformation must be column vectors
29392941 Standardize the matrix and return the parameters of the linear transformation.
29402942 The matrices \a offset and \a scaling must be column vectors
29412943 with as many rows as \a A.
2942
2944
29432945 \code
29442946 namespace vigra { namespace linalg {
29452947 template <class T, class C1, class C2, class C3, class C4>
29462948 void
2947 prepareRows(MultiArrayView<2, T, C1> const & A,
2948 MultiArrayView<2, T, C2> & res,
2949 MultiArrayView<2, T, C3> & offset,
2950 MultiArrayView<2, T, C4> & scaling,
2949 prepareRows(MultiArrayView<2, T, C1> const & A,
2950 MultiArrayView<2, T, C2> & res,
2951 MultiArrayView<2, T, C3> & offset,
2952 MultiArrayView<2, T, C4> & scaling,
29512953 DataPreparationGoals goals = ZeroMean | UnitVariance)´;
29522954 } }
29532955 \endcode
29572959 namespace vigra { namespace linalg {
29582960 template <class T, class C1, class C2>
29592961 void
2960 prepareRows(MultiArrayView<2, T, C1> const & A,
2961 MultiArrayView<2, T, C2> & res,
2962 prepareRows(MultiArrayView<2, T, C1> const & A,
2963 MultiArrayView<2, T, C2> & res,
29622964 DataPreparationGoals goals = ZeroMean | UnitVariance);
29632965 } }
29642966 \endcode
29752977 Matrix standardizedA(rows, columns), offset(rows, 1), scaling(rows, 1);
29762978
29772979 prepareRows(A, standardizedA, offset, scaling, ZeroMean | UnitNorm);
2978
2980
29792981 // use offset and scaling to prepare additional data according to the same transformation
29802982 Matrix newData(rows, ncolumns);
2981
2983
29822984 Matrix standardizedNewData = (newData - repeatMatrix(offset, 1, ncolumns)) * pointWise(repeatMatrix(scaling, 1, ncolumns));
29832985
29842986 \endcode
29872989
29882990 template <class T, class C1, class C2, class C3, class C4>
29892991 inline void
2990 prepareRows(MultiArrayView<2, T, C1> const & A,
2991 MultiArrayView<2, T, C2> & res, MultiArrayView<2, T, C3> & offset, MultiArrayView<2, T, C4> & scaling,
2992 prepareRows(MultiArrayView<2, T, C1> const & A,
2993 MultiArrayView<2, T, C2> & res, MultiArrayView<2, T, C3> & offset, MultiArrayView<2, T, C4> & scaling,
29922994 DataPreparationGoals goals = ZeroMean | UnitVariance)
29932995 {
29942996 MultiArrayView<2, T, StridedArrayTag> tr = transpose(res), to = transpose(offset), ts = transpose(scaling);
29972999
29983000 template <class T, class C1, class C2>
29993001 inline void
3000 prepareRows(MultiArrayView<2, T, C1> const & A, MultiArrayView<2, T, C2> & res,
3002 prepareRows(MultiArrayView<2, T, C1> const & A, MultiArrayView<2, T, C2> & res,
30013003 DataPreparationGoals goals = ZeroMean | UnitVariance)
30023004 {
30033005 MultiArrayView<2, T, StridedArrayTag> tr = transpose(res);
296296 typedef MERGE_GRAPH Graph;
297297 typedef typename Graph::Node Node;
298298 // Invalid constructor & conversion.
299 MergeGraphNodeIt(const lemon::Invalid & invalid = lemon::INVALID)
299 MergeGraphNodeIt(const lemon::Invalid & /*invalid*/ = lemon::INVALID)
300300 : graph_(NULL),
301301 nodeIdIt_(),
302302 node_(){
345345 typedef MERGE_GRAPH Graph;
346346 typedef typename Graph::Edge Edge;
347347 // Invalid constructor & conversion.
348 MergeGraphEdgeIt(const lemon::Invalid & invalid = lemon::INVALID)
348 MergeGraphEdgeIt(const lemon::Invalid & /*invalid*/ = lemon::INVALID)
349349 : graph_(NULL),
350350 edgeIdIt_(),
351351 edge_(){
399399 typedef typename Graph::Arc Arc;
400400 typedef typename Graph::Edge Edge;
401401 typedef typename Graph::EdgeIt EdgeIt;
402 MergeGraphArcIt(const lemon::Invalid invalid = lemon::INVALID )
402 MergeGraphArcIt(const lemon::Invalid /*invalid*/ = lemon::INVALID )
403403 : graph_(NULL),
404404 pos_(),
405405 inFirstHalf_(false),
11381138 inline bool MergeGraphAdaptor<GRAPH>::stateOfInitalEdge(
11391139 const typename MergeGraphAdaptor<GRAPH>::IdType initalEdge
11401140 )const{
1141 const index_type rep = reprEdgeId(initalEdge);
11421141
11431142 const index_type rnid0= reprNodeId( graphUId(initalEdge) );
11441143 const index_type rnid1= reprNodeId( graphVId(initalEdge) );
11791178 nDoubleEdges_=0;
11801179 for(;iter!=end;++iter){
11811180 const size_t adjToDeadNodeId = iter->nodeId();
1182 if(adjToDeadNodeId!=newNodeRep){
1181 if(newNodeRep < 0 || adjToDeadNodeId!=static_cast<unsigned long long>(newNodeRep)){
11831182
11841183 // REFACTOR ME, we can make that faster if
11851184 // we do that in set intersect style
14251424 }
14261425
14271426 template<class T>
1428 inline bool operator == (const ConstRepIter<T> & iter,const lemon::Invalid & iv){
1427 inline bool operator == (const ConstRepIter<T> & iter,const lemon::Invalid & /*iv*/){
14291428 return iter.isEnd();
14301429 }
14311430 template<class T>
1432 inline bool operator == (const lemon::Invalid & iv , const ConstRepIter<T> & iter){
1431 inline bool operator == (const lemon::Invalid & /*iv*/ , const ConstRepIter<T> & iter){
14331432 return iter.isEnd();
14341433 }
14351434
14361435 template<class T>
1437 inline bool operator != (const ConstRepIter<T> & iter,const lemon::Invalid & iv){
1436 inline bool operator != (const ConstRepIter<T> & iter,const lemon::Invalid & /*iv*/){
14381437 return !iter.isEnd();
14391438 }
14401439 template<class T>
1441 inline bool operator != (const lemon::Invalid & iv , const ConstRepIter<T> & iter){
1440 inline bool operator != (const lemon::Invalid & /*iv*/ , const ConstRepIter<T> & iter){
14421441 return !iter.isEnd();
14431442 }
14441443
2828 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
2929 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
3030 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
3232 /* */
3333 /************************************************************************/
3434
8585 };
8686
8787 /** \addtogroup MultiArrayTags Multi-dimensional Array Tags
88 Meta-programming tags to mark array's as strided or unstrided.
88
89 Meta-programming tags to mark array's as strided, unstrided, or chunked.
90
91 An array is unstrided if the array elements occupy consecutive
92 memory locations, strided if adjacent elements have a constant
93 offset (e.g. when a view skips every other array element),
94 and chunked if the array is stored in rectangular blocks with
95 arbitrary offsets inbetween.
96
97 These tags are used to specialize algorithms for different memory
98 layouts. Older compilers can generate faster code for unstrided arrays.
99 Normally, users don't have to worry about these tags.
89100 */
90101
91102 //@{
142153 typedef VigraTrueType isConst;
143154 };
144155
145 template<class T>
156 template<class T>
146157 class TypeTraits<T *>
147158 {
148159 public:
151162 typedef VigraTrueType isBuiltinType;
152163 };
153164
154 template<class T>
165 template<class T>
155166 class TypeTraits<T const *>
156167 {
157168 public:
167178 template <int size>
168179 struct SizeToType;
169180
170 } // namespace detail
181 } // namespace detail
171182
172183 #define VIGRA_TYPE_TRAITS(type, size) \
173184 template<> \
188199 { \
189200 typedef type result; \
190201 }; \
191 }
202 }
192203
193204 VIGRA_TYPE_TRAITS(char, 1)
194205 VIGRA_TYPE_TRAITS(signed char, 2)
381392 {
382393 typedef char falseResult[1];
383394 typedef char trueResult[2];
384
395
385396 static From const & check();
386
397
387398 static falseResult * testIsConvertible(...);
388399 static trueResult * testIsConvertible(To const &);
389
400
390401 enum { resultSize = sizeof(*testIsConvertible(check())) };
391
402
392403 static const bool value = (resultSize == 2);
393 typedef typename
404 typedef typename
394405 IfBool<value, VigraTrueType, VigraFalseType>::type
395406 type;
396407 };
400411 {
401412 typedef char falseResult[1];
402413 typedef char trueResult[2];
403
414
404415 static falseResult * testIsDerivedFrom(...);
405416 static trueResult * testIsDerivedFrom(BASE const *);
406
417
407418 enum { resultSize = sizeof(*testIsDerivedFrom(static_cast<DERIVED const *>(0))) };
408
419
409420 static const bool value = (resultSize == 2);
410 typedef typename
421 typedef typename
411422 IfBool<value, VigraTrueType, VigraFalseType>::type
412423 type;
413424
473484 {
474485 typedef char falseResult[1];
475486 typedef char trueResult[2];
476
487
477488 static falseResult * test(...);
478489 static trueResult * test(USER<sfinae_void>);
479
490
480491 enum { resultSize = sizeof(*test(static_cast<T*>(0))) };
481
492
482493 static const bool value = (resultSize == 2);
483494 typedef typename
484495 IfBool<value, VigraTrueType, VigraFalseType>::type
546557 {
547558 typedef char falseResult[1];
548559 typedef char trueResult[2];
549
560
550561 static falseResult * test(...);
551562 template <class U, unsigned n>
552563 static trueResult * test(U (*)[n]);
553
564
554565 enum { resultSize = sizeof(*test(static_cast<T*>(0))) };
555
566
556567 static const bool value = (resultSize == 2);
557568 typedef typename
558569 IfBool<value, VigraTrueType, VigraFalseType>::type
863874 typedef L01 type;
864875 };
865876
877 template <typename T0>
878 inline void ignore_argument(const T0 &)
879 {}
880
881 template <typename T0, typename T1>
882 inline void ignore_argument(const T0 &, const T1 &)
883 {}
884
885 template <typename T0, typename T1, typename T2>
886 inline void ignore_argument(const T0 &, const T1 &, const T2 &)
887 {}
888
889 template <typename T0, typename T1, typename T2, typename T3>
890 inline void ignore_argument(const T0 &, const T1 &, const T2 &, const T3 &)
891 {}
892
893 template <typename T0, typename T1, typename T2, typename T3, typename T4>
894 inline void ignore_argument(const T0 &, const T1 &, const T2 &, const T3 &, const T4 &)
895 {}
896
897 template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5>
898 inline void ignore_argument(const T0 &, const T1 &, const T2 &, const T3 &, const T4 &, const T5 &)
899 {}
900
901 template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
902 inline void ignore_argument(const T0 &, const T1 &, const T2 &, const T3 &, const T4 &, const T5 &, const T6 &)
903 {}
904
905 template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
906 inline void ignore_argument(const T0 &, const T1 &, const T2 &, const T3 &, const T4 &, const T5 &, const T6 &, const T7 &)
907 {}
908
909 template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
910 inline void ignore_argument(const T0 &, const T1 &, const T2 &, const T3 &, const T4 &, const T5 &, const T6 &, const T7 &, const T8 &)
911 {}
912
913 template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
914 inline void ignore_argument(const T0 &, const T1 &, const T2 &, const T3 &, const T4 &, const T5 &, const T6 &, const T7 &, const T8 &, const T9 &)
915 {}
916
866917 // mask cl.exe shortcomings [end]
867918 #if defined(_MSC_VER)
868919 #pragma warning( pop )
3939
4040 #include <cmath>
4141
42 /** \addtogroup MathFunctions
43 */
44 //@{
45
46
4247 namespace vigra{
48
49 /// \brief Define functors for various common metrics.
4350 namespace metrics{
4451
45
4652 template<class T>
4753 class ChiSquared{
4854 public:
5359 template<class A, class B>
5460 T operator()(const A & a,const B & b)const{
5561 return opImpl(a.begin(),a.end(),b.begin());
56 }
57 private:
58 template<class ITER_A,class ITER_B>
59 T opImpl(
60 ITER_A iterA ,ITER_A endA ,ITER_B iterB
61 )const{
62 }
63 private:
64 template<class ITER_A,class ITER_B>
65 T opImpl(
66 ITER_A iterA ,ITER_A endA ,ITER_B iterB
67 )const{
6268 T res = 0.0;
6369 while(iterA!=endA){
6470 const T aa=static_cast<T>(*iterA);
6571 const T bb=static_cast<T>(*iterB);
6672 const T sum = aa + bb;
67 const T diff = aa - bb;
73 const T diff = aa - bb;
6874 if(sum> static_cast<T>(0.0000001))
6975 res+=(diff*diff)/sum;
7076 ++iterA;
8490 template<class A, class B>
8591 T operator()(const A & a,const B & b)const{
8692 return opImpl(a.begin(),a.end(),b.begin());
87 }
88 private:
89 template<class ITER_A,class ITER_B>
90 T opImpl(
91 ITER_A iterA ,ITER_A endA ,ITER_B iterB
92 )const{
93 }
94 private:
95 template<class ITER_A,class ITER_B>
96 T opImpl(
97 ITER_A iterA ,ITER_A endA ,ITER_B iterB
98 )const{
9399 T res = 0.0;
94100 while(iterA!=endA){
95101 const T aa=std::sqrt(static_cast<T>(*iterA));
96102 const T bb=std::sqrt(static_cast<T>(*iterB));
97 const T diff = aa - bb;
103 const T diff = aa - bb;
98104 res+=diff*diff;
99105 ++iterA;
100106 ++iterB;
102108 return std::sqrt(res)/std::sqrt(2.0);
103109 }
104110 };
105
111
106112 template<class T,unsigned int NORM,bool TAKE_ROOT=true>
107113 class PNorm{
108114 public:
113119 template<class A, class B>
114120 T operator()(const A & a,const B & b)const{
115121 return opImpl(a.begin(),a.end(),b.begin());
116 }
117 private:
118 template<class ITER_A,class ITER_B>
119 T opImpl(
120 ITER_A iterA ,ITER_A endA ,ITER_B iterB
122 }
123 private:
124 template<class ITER_A,class ITER_B>
125 T opImpl(
126 ITER_A iterA ,ITER_A endA ,ITER_B iterB
121127 )const{
122128 T res = static_cast<T>(0.0);
123129 while(iterA!=endA){
169175 template<class A, class B>
170176 T operator()(const A & a,const B & b)const{
171177 return opImpl(a.begin(),a.end(),b.begin());
172 }
173 private:
174 template<class ITER_A,class ITER_B>
175 T opImpl(
176 ITER_A iterA ,ITER_A endA ,ITER_B iterB
178 }
179 private:
180 template<class ITER_A,class ITER_B>
181 T opImpl(
182 ITER_A iterA ,ITER_A endA ,ITER_B iterB
177183 )const{
178184 T res = static_cast<T>(0.0);
179185 while(iterA!=endA){
199205 template<class A, class B>
200206 T operator()(const A & a,const B & b)const{
201207 return opImpl(a.begin(),a.end(),b.begin());
202 }
203 private:
204 template<class ITER_A,class ITER_B>
205 T opImpl(
206 ITER_A iterA ,ITER_A endA ,ITER_B iterB
208 }
209 private:
210 template<class ITER_A,class ITER_B>
211 T opImpl(
212 ITER_A iterA ,ITER_A endA ,ITER_B iterB
207213 )const{
208214 T res = static_cast<T>(0.0);
209215 while(iterA!=endA){
217223 }
218224 };
219225
220 enum MetricType{
221 ChiSquaredMetric=0,
222 HellingerMetric=1,
223 SquaredNormMetric=2,
224 NormMetric=3,
225 ManhattanMetric=4,
226 SymetricKlMetric=5,
227 BhattacharyaMetric=6
228 };
229
230
226 /** \brief Tags to select a metric for vector distance computation.
227 */
228 enum MetricType
229 {
230 ChiSquaredMetric=0, //!< chi-squared distance for histograms (sum of squared differences normalized by means)
231 HellingerMetric=1, //!< Hellinger distance (Euclidean distance between the square-root vectors)
232 SquaredNormMetric=2, //!< squared Euclidean distance
233 NormMetric=3, //!< Euclidean distance (L2 norm)
234 L2Norm=NormMetric, //!< Euclidean distance (L2 norm)
235 ManhattanMetric=4, //!< Manhattan distance (L1 norm)
236 L1Norm=ManhattanMetric, //!< Manhattan distance (L1 norm)
237 SymetricKlMetric=5, //!< symmetric Kullback-Leibler divergence
238 BhattacharyaMetric=6 //!< Bhattacharya distance (sum of elementwise geometric means)
239 };
240
241
242 /** \brief Functor to compute a metric between two vectors.
243
244 The value type of the metric is given by template parameter <tt>T</tt>. Supported
245 metrics are defined in \ref vigra::metrics::MetricType. The functor's argument
246 must support <tt>begin()</tt> and <tt>end()</tt> to create an STL range.
247 */
231248 template<class T>
232249 class Metric{
233250 public:
234251
252 /** \brief Construct functor for the given metric.
253
254 The value type of the metric is given by template parameter <tt>T</tt>. Supported
255 metrics are defined in \ref vigra::metrics::MetricType.
256 */
235257 Metric(const MetricType metricType = ManhattanMetric)
236258 : metricType_(metricType){
237259
251273 case 4:
252274 return manhattan_(a,b);
253275 case 5:
254 return symetricKlDivergenz_(a,b);
276 return symetricKlDivergenz_(a,b);
255277 case 6 :
256 return bhattacharyaDistance_(a,b);
278 return bhattacharyaDistance_(a,b);
257279 default :
258280 return 0;
259281 }
260 }
282 }
261283 private:
262284 MetricType metricType_;
263285 ChiSquared<T> chiSquared_;
272294 } // end namespace metric
273295 } // end namepsace vigra
274296
297 //@}
275298
276299 #endif //VIGRA_METRIC_HXX
123123 {
124124 typedef StridedMultiIterator <N, T, REFERENCE, POINTER> type;
125125 };
126
126
127127 template <unsigned int N, class T, class REFERENCE, class POINTER>
128128 struct Iterator
129129 {
130130 typedef StridedScanOrderIterator <N, T, REFERENCE, POINTER> type;
131131 };
132
132
133133 template <class Iter, class View>
134134 static Iter constructIterator(View * v)
135135 {
156156 {
157157 typedef MultiIterator <N, T, REFERENCE, POINTER> type;
158158 };
159
159
160160 template <unsigned int N, class T, class REFERENCE, class POINTER>
161161 struct Iterator
162162 {
163163 typedef POINTER type;
164164 };
165
165
166166 template <class Iter, class View>
167167 static Iter constructIterator(View * v)
168168 {
470470
471471 /********************************************************/
472472 /* */
473 /* MultiArrayView */
473 /* namespace multi_math forward declarations */
474474 /* */
475475 /********************************************************/
476476
477 // forward declarations
478
477 /** \brief Arithmetic and algebraic functions for multi-dimensional arrays.
478
479 \defgroup MultiMathModule vigra::multi_math
480
481 Namespace <tt>vigra::multi_math</tt> holds VIGRA's support for efficient arithmetic and algebraic functions on multi-dimensional arrays (that is, \ref MultiArrayView and its subclasses). All <tt>multi_math</tt> functions operate element-wise. If you need matrix multiplication, use \ref LinearAlgebraModule instead.
482
483 In order to avoid overload ambiguities, multi-array arithmetic must be explicitly activated by
484 \code
485 using namespace vigra::multi_math;
486 \endcode
487 (this should not be done globally, but only in the scope where the functionality is actually used).
488
489 You can then use the standard operators in the expected way:
490 \code
491 MultiArray<2, float> i(Shape2(100, 100)), j(Shape2(100, 100));
492
493 MultiArray<2, float> h = i + 4.0 * j;
494 h += (i.transpose() - j) / 2.0;
495 \endcode
496 etc. (supported operators are <tt>+ - * / ! ~ % && || == != &lt; &lt;= &gt; &gt;= &lt;&lt; &gt;&gt; & | ^ = += -= *= /=</tt>, with both scalar and array arguments).
497
498 Algebraic functions are available as well:
499 \code
500 h = exp(-(sq(i) + sq(j)));
501 h *= atan2(-i, j);
502 \endcode
503 The following functions are implemented: <tt>abs, erf, even, odd, sign, signi, round, roundi, sqrt, sqrti, sq,
504 norm, squaredNorm, gamma, loggamma, exp, log, log10, sin, sin_pi, cos, cos_pi, asin, acos, tan, atan,
505 floor, ceil, conj, real, imag, arg, atan2, pow, fmod, min, max</tt>,
506 provided the array's element type supports the respective function.
507
508 Supported element types currently include the built-in numeric types, \ref TinyVector, \ref RGBValue,
509 <tt>std::complex</tt>, and \ref FFTWComplex.
510
511 In addition, <tt>multi_math</tt> supports a number of functions that reduce arrays to scalars:
512 \code
513 double s = sum<double>(i); // compute the sum of the elements, using 'double' as accumulator type
514 double p = product<double>(abs(i)); // compute the product of the elements' absolute values
515
516 bool a = any(i < 0.0); // check if any element of i is negative
517 bool b = all(i > 0.0); // check if all elements of i are positive
518 \endcode
519
520 Expressions are expanded so that no temporary arrays have to be created. To optimize cache locality,
521 loops are executed in the stride ordering of the left-hand-side array.
522
523 <b>\#include</b> \<vigra/multi_math.hxx\>
524
525 Namespace: vigra::multi_math
526 */
479527 namespace multi_math {
480528
481529 template <class T>
609657 typedef typename BaseType::NormType NormType;
610658 };
611659
660 /********************************************************/
661 /* */
662 /* MultiArrayView */
663 /* */
664 /********************************************************/
665
612666 /** \brief Base class for, and view to, \ref vigra::MultiArray.
613667
614668 This class implements the interface of both MultiArray and
615669 MultiArrayView. By default, MultiArrayViews are tagged as
616 strided (using <tt>StridedArrayTag</tt> as third template parameter).
670 strided (using <tt>StridedArrayTag</tt> as third template parameter).
617671 This means that the array elements need not be consecutive in memory,
618672 making the view flexible to represent all kinds of subarrays and slices.
619 In certain cases (which have become rare due to improvements of
620 optimizer and processor technology), an array may be tagged with
673 In certain cases (which have become rare due to improvements of
674 optimizer and processor technology), an array may be tagged with
621675 <tt>UnstridedArrayTag</tt> which indicates that the first array dimension
622676 is guaranteed to be unstrided, i.e. has consecutive elements in memory.
623677
624678 In addition to the member functions described here, <tt>MultiArrayView</tt>
625 and its subclasses support arithmetic and algebraic functions via the
679 and its subclasses support arithmetic and algebraic functions via the
626680 module \ref MultiMathModule.
627681
628682 If you want to apply an algorithm requiring an image to a
636690
637691 T: the type of the array elements
638692
639 C: a tag determining whether the array's inner dimension is strided
640 or not. An array is unstrided if the array elements occupy consecutive
641 memory location, strided if there is an offset in between (e.g.
642 when a view is created that skips every other array element).
643 The compiler can generate faster code for unstrided arrays.
644 Possible values: StridedArrayTag (default), UnstridedArrayTag
693 C: a tag determining if the array's inner dimension is strided
694 (the tag can be used to specialize algorithms for different memory
695 layouts, see \ref MultiArrayTags for details). Users normally need
696 not care about this parameter.
645697 \endcode
646698
647699 <b>\#include</b> \<vigra/multi_array.hxx\> <br/>
727779 {
728780 return m_stride[0] <= 1;
729781 }
730
782
731783 bool checkInnerStride(StridedArrayTag) const
732784 {
733785 return true;
778830 {}
779831
780832 /** construct from another array view.
781 Throws a precondition error if this array has UnstridedArrayTag, but the
833 Throws a precondition error if this array has UnstridedArrayTag, but the
782834 innermost dimension of \a other is strided.
783835 */
784836 template <class Stride>
814866 vigra_precondition(checkInnerStride(StrideTag()),
815867 "MultiArrayView<..., UnstridedArrayTag>::MultiArrayView(): First dimension of given array is not unstrided.");
816868 }
817
869
818870 /** Construct from an old-style BasicImage.
819871 */
820872 template <class ALLOC>
823875 m_stride (detail::defaultStride<actual_dimension>(m_shape)),
824876 m_ptr (const_cast<pointer>(image.data()))
825877 {}
826
878
827879 /** Conversion to a strided view.
828880 */
829881 operator MultiArrayView<N, T, StridedArrayTag>() const
831883 return MultiArrayView<N, T, StridedArrayTag>(m_shape, m_stride, m_ptr);
832884 }
833885
834 /** Reset this <tt>MultiArrayView</tt> to an invalid state (as after default construction).
835 Can e.g. be used prior to assignment to make a view object point to new data.
886 /** Reset this <tt>MultiArrayView</tt> to an invalid state (as after default construction).
887 Can e.g. be used prior to assignment to make a view object point to new data.
836888 */
837889 void reset() {
838 m_shape = diff_zero_t(0);
839 m_stride = diff_zero_t(0);
840 m_ptr = 0;
890 m_shape = diff_zero_t(0);
891 m_stride = diff_zero_t(0);
892 m_ptr = 0;
841893 }
842894
843895
846898 <ul>
847899 <li> When this <tt>MultiArrayView</tt> does not point to valid data
848900 (e.g. after default construction), it becomes a new view of \a rhs.
849 <li> Otherwise, when the shapes of the two arrays match, the contents
901 <li> Otherwise, when the shapes of the two arrays match, the contents
850902 (i.e. the elements) of \a rhs are copied.
851903 <li> Otherwise, a <tt>PreconditionViolation</tt> exception is thrown.
852904 </ul>
866918 }
867919
868920 /** Assignment of a differently typed MultiArrayView. It copies the elements
869 of\a rhs or fails with <tt>PreconditionViolation</tt> exception when
921 of\a rhs or fails with <tt>PreconditionViolation</tt> exception when
870922 the shapes do not match.
871923 */
872924 template<class U, class C1>
10191071 Mostly useful to support standard indexing for 1-dimensional multi-arrays,
10201072 but works for any N. Use scanOrderIndexToCoordinate() and
10211073 coordinateToScanOrderIndex() for conversion between indices and coordinates.
1022
1074
10231075 <b>Note:</b> This function should not be used in the inner loop, because the
10241076 conversion of the scan order index into a memory address is expensive
10251077 (it must take into account that memory may not be consecutive for subarrays
1026 and/or strided arrays). Always prefer operator() if possible.
1078 and/or strided arrays). Always prefer operator() if possible.
10271079 */
10281080 reference operator[](difference_type_1 d)
10291081 {
10351087 Mostly useful to support standard indexing for 1-dimensional multi-arrays,
10361088 but works for any N. Use scanOrderIndexToCoordinate() and
10371089 coordinateToScanOrderIndex() for conversion between indices and coordinates.
1038
1090
10391091 <b>Note:</b> This function should not be used in the inner loop, because the
10401092 conversion of the scan order index into a memory address is expensive
10411093 (it must take into account that memory may not be consecutive for subarrays
1042 and/or strided arrays). Always prefer operator() if possible.
1094 and/or strided arrays). Always prefer operator() if possible.
10431095 */
10441096 const_reference operator[](difference_type_1 d) const
10451097 {
11761228 }
11771229
11781230 /** Swap the pointers, shaes and strides between two array views.
1179
1231
11801232 This function must be used with care. Never swap a MultiArray
11811233 (which owns data) with a MultiArrayView:
11821234 \code
11831235 MultiArray<2, int> a(3,2), b(3,2);
11841236 MultiArrayView<2, int> va(a);
1185
1237
11861238 va.swap(b); // danger!
11871239 \endcode
11881240 Now, <tt>a</tt> and <tt>b</tt> refer to the same memory. This may lead
1189 to a crash in their destructor, and in any case leaks <tt>b</tt>'s original
1241 to a crash in their destructor, and in any case leaks <tt>b</tt>'s original
11901242 memory. Only use swap() on copied MultiArrayViews:
11911243 \code
11921244 MultiArray<2, int> a(3,2), b(3,2);
11931245 MultiArrayView<2, int> va(a), vb(b);
1194
1246
11951247 va.swap(vb); // OK
11961248 \endcode
11971249 */
12231275 {
12241276 swapDataImpl(rhs);
12251277 }
1226
1227 /** check whether the array is unstrided (i.e. has consecutive memory) up
1278
1279 /** check whether the array is unstrided (i.e. has consecutive memory) up
12281280 to the given dimension.
12291281
1230 \a dimension can range from 0 ... N-1. If a certain dimension is unstrided,
1282 \a dimension can range from 0 ... N-1. If a certain dimension is unstrided,
12311283 all lower dimensions are also unstrided.
12321284 */
12331285 bool isUnstrided(unsigned int dimension = N-1) const
13421394 */
13431395 MultiArrayView <N-1, T, StridedArrayTag>
13441396 bindAt (difference_type_1 m, difference_type_1 d) const;
1345
1397
13461398 /** Create a view to channel 'i' of a vector-like value type. Possible value types
1347 (of the original array) are: \ref TinyVector, \ref RGBValue, \ref FFTWComplex,
1399 (of the original array) are: \ref TinyVector, \ref RGBValue, \ref FFTWComplex,
13481400 and <tt>std::complex</tt>. The list can be extended to any type whose memory
1349 layout is equivalent to a fixed-size C array, by specializing
1401 layout is equivalent to a fixed-size C array, by specializing
13501402 <tt>ExpandElementResult</tt>.
13511403
13521404 <b>Usage:</b>
13531405 \code
13541406 MultiArray<2, RGBValue<float> > rgb_image(Shape2(w, h));
1355
1407
13561408 MultiArrayView<2, float, StridedArrayTag> red = rgb_image.bindElementChannel(0);
13571409 MultiArrayView<2, float, StridedArrayTag> green = rgb_image.bindElementChannel(1);
13581410 MultiArrayView<2, float, StridedArrayTag> blue = rgb_image.bindElementChannel(2);
13591411 \endcode
13601412 */
1361 MultiArrayView <N, typename ExpandElementResult<T>::type, StridedArrayTag>
1413 MultiArrayView <N, typename ExpandElementResult<T>::type, StridedArrayTag>
13621414 bindElementChannel(difference_type_1 i) const
13631415 {
13641416 vigra_precondition(0 <= i && i < ExpandElementResult<T>::size,
13661418 return expandElements(0).bindInner(i);
13671419 }
13681420
1369 /** Create a view where a vector-like element type is expanded into a new
1421 /** Create a view where a vector-like element type is expanded into a new
13701422 array dimension. The new dimension is inserted at index position 'd',
13711423 which must be between 0 and N inclusive.
1372
1373 Possible value types of the original array are: \ref TinyVector, \ref RGBValue,
1374 \ref FFTWComplex, <tt>std::complex</tt>, and the built-in number types (in this
1375 case, <tt>expandElements</tt> is equivalent to <tt>insertSingletonDimension</tt>).
1424
1425 Possible value types of the original array are: \ref TinyVector, \ref RGBValue,
1426 \ref FFTWComplex, <tt>std::complex</tt>, and the built-in number types (in this
1427 case, <tt>expandElements</tt> is equivalent to <tt>insertSingletonDimension</tt>).
13761428 The list of supported types can be extended to any type whose memory
1377 layout is equivalent to a fixed-size C array, by specializing
1429 layout is equivalent to a fixed-size C array, by specializing
13781430 <tt>ExpandElementResult</tt>.
13791431
13801432 <b>Usage:</b>
13811433 \code
13821434 MultiArray<2, RGBValue<float> > rgb_image(Shape2(w, h));
1383
1435
13841436 MultiArrayView<3, float, StridedArrayTag> multiband_image = rgb_image.expandElements(2);
13851437 \endcode
13861438 */
1387 MultiArrayView <N+1, typename ExpandElementResult<T>::type, StridedArrayTag>
1439 MultiArrayView <N+1, typename ExpandElementResult<T>::type, StridedArrayTag>
13881440 expandElements(difference_type_1 d) const;
1389
1441
13901442 /** Add a singleton dimension (dimension of length 1).
13911443
13921444 Singleton dimensions don't change the size of the data, but introduce
14181470 */
14191471 MultiArrayView <N+1, T, StrideTag>
14201472 insertSingletonDimension (difference_type_1 i) const;
1421
1473
14221474 /** create a multiband view for this array.
14231475
14241476 The type <tt>MultiArrayView<N, Multiband<T> ></tt> tells VIGRA
14251477 algorithms which recognize the <tt>Multiband</tt> modifier to
1426 interpret the outermost (last) dimension as a channel dimension.
1427 In effect, these algorithms will treat the data as a set of
1478 interpret the outermost (last) dimension as a channel dimension.
1479 In effect, these algorithms will treat the data as a set of
14281480 (N-1)-dimensional arrays instead of a single N-dimensional array.
14291481 */
14301482 MultiArrayView<N, Multiband<value_type>, StrideTag> multiband() const
14331485 }
14341486
14351487 /** Create a view to the diagonal elements of the array.
1436
1488
14371489 This produces a 1D array view whose size equals the size
14381490 of the shortest dimension of the original array.
14391491
14501502 */
14511503 MultiArrayView<1, T, StridedArrayTag> diagonal() const
14521504 {
1453 return MultiArrayView<1, T, StridedArrayTag>(Shape1(vigra::min(m_shape)),
1505 return MultiArrayView<1, T, StridedArrayTag>(Shape1(vigra::min(m_shape)),
14541506 Shape1(vigra::sum(m_stride)), m_ptr);
14551507 }
14561508
14571509 /** create a rectangular subarray that spans between the
1458 points p and q, where p is in the subarray, q not.
1510 points p and q, where p is in the subarray, q not.
14591511 If an element of p or q is negative, it is subtracted
14601512 from the correspongng shape.
14611513
14671519
14681520 // get a subarray set is smaller by one element at all sides
14691521 MultiArrayView <3, double> subarray = array3.subarray(Shape(1,1,1), Shape(39, 29, 19));
1470
1522
14711523 // specifying the end point with a vector of '-1' is equivalent
14721524 MultiArrayView <3, double> subarray2 = array3.subarray(Shape(1,1,1), Shape(-1, -1, -1));
14731525 \endcode
15201572
15211573 /** Permute the dimensions of the array.
15221574 The function exchanges the orer of the array's axes without copying the data.
1523 Argument\a permutation specifies the desired order such that
1575 Argument\a permutation specifies the desired order such that
15241576 <tt>permutation[k] = j</tt> means that axis <tt>j</tt> in the original array
1525 becomes axis <tt>k</tt> in the transposed array.
1577 becomes axis <tt>k</tt> in the transposed array.
15261578
15271579 <b>Usage:</b><br>
15281580 \code
15501602 */
15511603 MultiArrayView <N, T, StridedArrayTag>
15521604 permuteStridesAscending() const;
1553
1605
15541606 /** Permute the dimensions of the array so that the strides are in descending order.
15551607 Determines the appropriate permutation and then calls permuteDimensions().
15561608 */
15571609 MultiArrayView <N, T, StridedArrayTag>
15581610 permuteStridesDescending() const;
1559
1611
15601612 /** Compute the ordering of the strides in this array.
1561 The result is describes the current permutation of the axes relative
1613 The result is describes the current permutation of the axes relative
15621614 to the standard ascending stride order.
15631615 */
15641616 difference_type strideOrdering() const
15651617 {
15661618 return strideOrdering(m_stride);
15671619 }
1568
1620
15691621 /** Compute the ordering of the given strides.
1570 The result is describes the current permutation of the axes relative
1622 The result is describes the current permutation of the axes relative
15711623 to the standard ascending stride order.
15721624 */
15731625 static difference_type strideOrdering(difference_type strides);
16851737 {
16861738 bool res = true;
16871739 detail::reduceOverMultiArray(traverser_begin(), shape(),
1688 res,
1740 res,
16891741 detail::AllTrueReduceFunctor(),
16901742 MetaInt<actual_dimension-1>());
16911743 return res;
16981750 {
16991751 bool res = false;
17001752 detail::reduceOverMultiArray(traverser_begin(), shape(),
1701 res,
1753 res,
17021754 detail::AnyTrueReduceFunctor(),
17031755 MetaInt<actual_dimension-1>());
17041756 return res;
17051757 }
17061758
1707 /** Find the minimum and maximum element in this array.
1708 See \ref FeatureAccumulators for a general feature
1759 /** Find the minimum and maximum element in this array.
1760 See \ref FeatureAccumulators for a general feature
17091761 extraction framework.
17101762 */
17111763 void minmax(T * minimum, T * maximum) const
17121764 {
17131765 std::pair<T, T> res(NumericTraits<T>::max(), NumericTraits<T>::min());
17141766 detail::reduceOverMultiArray(traverser_begin(), shape(),
1715 res,
1767 res,
17161768 detail::MinmaxReduceFunctor(),
17171769 MetaInt<actual_dimension-1>());
17181770 *minimum = res.first;
17191771 *maximum = res.second;
17201772 }
17211773
1722 /** Compute the mean and variance of the values in this array.
1723 See \ref FeatureAccumulators for a general feature
1774 /** Compute the mean and variance of the values in this array.
1775 See \ref FeatureAccumulators for a general feature
17241776 extraction framework.
17251777 */
17261778 template <class U>
17301782 R zero = R();
17311783 triple<double, R, R> res(0.0, zero, zero);
17321784 detail::reduceOverMultiArray(traverser_begin(), shape(),
1733 res,
1785 res,
17341786 detail::MeanVarianceReduceFunctor(),
17351787 MetaInt<actual_dimension-1>());
17361788 *mean = res.second;
17421794 You must provide the type of the result by an explicit template parameter:
17431795 \code
17441796 MultiArray<2, UInt8> A(width, height);
1745
1797
17461798 double sum = A.sum<double>();
17471799 \endcode
17481800 */
17511803 {
17521804 U res = NumericTraits<U>::zero();
17531805 detail::reduceOverMultiArray(traverser_begin(), shape(),
1754 res,
1806 res,
17551807 detail::SumReduceFunctor(),
17561808 MetaInt<actual_dimension-1>());
17571809 return res;
17581810 }
17591811
17601812 /** Compute the sum of the array elements over selected axes.
1761
1813
17621814 \arg sums must have the same shape as this array, except for the
1763 axes along which the sum is to be accumulated. These axes must be
1815 axes along which the sum is to be accumulated. These axes must be
17641816 singletons. Note that you must include <tt>multi_pointoperators.hxx</tt>
17651817 for this function to work.
17661818
17681820 \code
17691821 #include <vigra/multi_array.hxx>
17701822 #include <vigra/multi_pointoperators.hxx>
1771
1823
17721824 MultiArray<2, double> A(Shape2(rows, cols));
17731825 ... // fill A
1774
1826
17751827 // make the first axis a singleton to sum over the first index
17761828 MultiArray<2, double> rowSums(Shape2(1, cols));
17771829 A.sum(rowSums);
1778
1830
17791831 // this is equivalent to
17801832 transformMultiArray(srcMultiArrayRange(A),
17811833 destMultiArrayRange(rowSums),
17951847 You must provide the type of the result by an explicit template parameter:
17961848 \code
17971849 MultiArray<2, UInt8> A(width, height);
1798
1850
17991851 double prod = A.product<double>();
18001852 \endcode
18011853 */
18041856 {
18051857 U res = NumericTraits<U>::one();
18061858 detail::reduceOverMultiArray(traverser_begin(), shape(),
1807 res,
1859 res,
18081860 detail::ProdReduceFunctor(),
18091861 MetaInt<actual_dimension-1>());
18101862 return res;
18121864
18131865 /** Compute the squared Euclidean norm of the array (sum of squares of the array elements).
18141866 */
1815 typename NormTraits<MultiArrayView>::SquaredNormType
1867 typename NormTraits<MultiArrayView>::SquaredNormType
18161868 squaredNorm() const
18171869 {
18181870 typedef typename NormTraits<MultiArrayView>::SquaredNormType SquaredNormType;
18191871 SquaredNormType res = NumericTraits<SquaredNormType>::zero();
18201872 detail::reduceOverMultiArray(traverser_begin(), shape(),
1821 res,
1873 res,
18221874 detail::SquaredL2NormReduceFunctor(),
18231875 MetaInt<actual_dimension-1>());
18241876 return res;
18371889 Parameter \a useSquaredNorm has no effect when \a type != 2. Defaults: compute L2 norm as square root of
18381890 <tt>squaredNorm()</tt>.
18391891 */
1840 typename NormTraits<MultiArrayView>::NormType
1892 typename NormTraits<MultiArrayView>::NormType
18411893 norm(int type = 2, bool useSquaredNorm = true) const;
18421894
18431895 /** return the pointer to the image data
18461898 {
18471899 return m_ptr;
18481900 }
1849
1901
18501902 pointer & unsafePtr()
18511903 {
18521904 return m_ptr;
19602012 {
19612013 vigra_precondition(rhs.checkInnerStride(Stride1()),
19622014 "MultiArrayView<..., UnstridedArrayTag>::operator=(MultiArrayView const &): cannot create unstrided view from strided array.");
1963
2015
19642016 m_shape = rhs.shape();
19652017 m_stride = rhs.stride();
19662018 m_ptr = rhs.data();
20792131 }
20802132
20812133 template <unsigned int N, class T, class StrideTag>
2082 typename MultiArrayView <N, T, StrideTag>::difference_type
2134 typename MultiArrayView <N, T, StrideTag>::difference_type
20832135 MultiArrayView <N, T, StrideTag>::strideOrdering(difference_type stride)
20842136 {
20852137 difference_type permutation;
22732325 {
22742326 vigra_precondition(0 <= d && d <= static_cast <difference_type_1> (N),
22752327 "MultiArrayView<N, ...>::expandElements(d): 0 <= 'd' <= N required.");
2276
2328
22772329 int elementSize = ExpandElementResult<T>::size;
22782330 typename MultiArrayShape<N+1>::type newShape, newStrides;
22792331 for(int k=0; k<d; ++k)
22802332 {
22812333 newShape[k] = m_shape[k];
22822334 newStrides[k] = m_stride[k]*elementSize;
2283 }
2284
2335 }
2336
22852337 newShape[d] = elementSize;
22862338 newStrides[d] = 1;
2287
2288 for(int k=d; k<N; ++k)
2339
2340 for(unsigned k=d; k<N; ++k)
22892341 {
22902342 newShape[k+1] = m_shape[k];
22912343 newStrides[k+1] = m_stride[k]*elementSize;
2292 }
2293
2294 typedef typename ExpandElementResult<T>::type U;
2344 }
2345
2346 typedef typename ExpandElementResult<T>::type U;
22952347 return MultiArrayView<N+1, U, StridedArrayTag>(
22962348 newShape, newStrides, reinterpret_cast<U*>(m_ptr));
22972349 }
23252377 case 0:
23262378 {
23272379 NormType res = NumericTraits<NormType>::zero();
2328 detail::reduceOverMultiArray(traverser_begin(), shape(),
2329 res,
2380 detail::reduceOverMultiArray(traverser_begin(), shape(),
2381 res,
23302382 detail::MaxNormReduceFunctor(),
23312383 MetaInt<actual_dimension-1>());
23322384 return res;
23342386 case 1:
23352387 {
23362388 NormType res = NumericTraits<NormType>::zero();
2337 detail::reduceOverMultiArray(traverser_begin(), shape(),
2338 res,
2389 detail::reduceOverMultiArray(traverser_begin(), shape(),
2390 res,
23392391 detail::L1NormReduceFunctor(),
23402392 MetaInt<actual_dimension-1>());
23412393 return res;
23492401 else
23502402 {
23512403 NormType normMax = NumericTraits<NormType>::zero();
2352 detail::reduceOverMultiArray(traverser_begin(), shape(),
2353 normMax,
2404 detail::reduceOverMultiArray(traverser_begin(), shape(),
2405 normMax,
23542406 detail::MaxNormReduceFunctor(),
23552407 MetaInt<actual_dimension-1>());
23562408 if(normMax == NumericTraits<NormType>::zero())
23572409 return normMax;
23582410 NormType res = NumericTraits<NormType>::zero();
2359 detail::reduceOverMultiArray(traverser_begin(), shape(),
2360 res,
2411 detail::reduceOverMultiArray(traverser_begin(), shape(),
2412 res,
23612413 detail::WeightedL2NormReduceFunctor<NormType>(1.0/normMax),
23622414 MetaInt<actual_dimension-1>());
23632415 return sqrt(res)*normMax;
24182470 Namespace: vigra
24192471 */
24202472 template <unsigned int N, class T, class A /* default already declared above */>
2421 class MultiArray
2422 : public MultiArrayView <N, typename vigra::detail::ResolveMultiband<T>::type,
2473 class MultiArray
2474 : public MultiArrayView <N, typename vigra::detail::ResolveMultiband<T>::type,
24232475 typename vigra::detail::ResolveMultiband<T>::Stride>
24242476 {
24252477 public:
24272479
24282480 /** the view type associated with this array.
24292481 */
2430 typedef MultiArrayView <N, typename vigra::detail::ResolveMultiband<T>::type,
2482 typedef MultiArrayView <N, typename vigra::detail::ResolveMultiband<T>::type,
24312483 typename vigra::detail::ResolveMultiband<T>::Stride> view_type;
2432
2484
24332485 using view_type::actual_dimension;
24342486
24352487 /** the allocator type used to allocate the memory
25462598 {}
25472599
25482600 /** construct with given length
2549
2601
25502602 Use only for 1-dimensional arrays (<tt>N==1</tt>).
25512603 */
25522604 explicit MultiArray (difference_type_1 length,
25542606
25552607
25562608 /** construct with given width and height
2557
2609
25582610 Use only for 2-dimensional arrays (<tt>N==2</tt>).
25592611 */
25602612 MultiArray (difference_type_1 width, difference_type_1 height,
25702622 MultiArray (const difference_type &shape, const_reference init,
25712623 allocator_type const & alloc = allocator_type());
25722624
2573 /** construct from shape and initialize with a linear sequence in scan order
2625 /** construct from shape and initialize with a linear sequence in scan order
25742626 (i.e. first pixel gets value 0, second on gets value 1 and so on).
25752627 */
25762628 MultiArray (const difference_type &shape, MultiArrayInitializationTag init,
26422694
26432695 /** Add-assignment from arbitrary MultiArrayView. Fails with
26442696 <tt>PreconditionViolation</tt> exception when the shapes do not match.
2645 If the left array has no data (hasData() is false), this function is
2697 If the left array has no data (hasData() is false), this function is
26462698 equivalent to a normal assignment (i.e. an empty
26472699 array is interpreted as a zero-array of appropriate size).
26482700 */
26582710
26592711 /** Subtract-assignment from arbitrary MultiArrayView. Fails with
26602712 <tt>PreconditionViolation</tt> exception when the shapes do not match.
2661 If the left array has no data (hasData() is false), this function is
2713 If the left array has no data (hasData() is false), this function is
26622714 equivalent to an assignment of the negated rhs (i.e. an empty
26632715 array is interpreted as a zero-array of appropriate size).
26642716 */
26732725
26742726 /** Multiply-assignment from arbitrary MultiArrayView. Fails with
26752727 <tt>PreconditionViolation</tt> exception when the shapes do not match.
2676 If the left array has no data (hasData() is false), this function is
2728 If the left array has no data (hasData() is false), this function is
26772729 equivalent to reshape(rhs.shape()) with zero initialisation (i.e. an empty
26782730 array is interpreted as a zero-array of appropriate size).
26792731 */
26892741
26902742 /** Divide-assignment from arbitrary MultiArrayView. Fails with
26912743 <tt>PreconditionViolation</tt> exception when the shapes do not match.
2692 If the left array has no data (hasData() is false), this function is
2744 If the left array has no data (hasData() is false), this function is
26932745 equivalent to reshape(rhs.shape()) with zero initialisation (i.e. an empty
26942746 array is interpreted as a zero-array of appropriate size).
26952747 */
28582910 {
28592911 return m_alloc;
28602912 }
2861
2913
28622914 static difference_type defaultStride(difference_type const & shape)
28632915 {
28642916 return vigra::detail::ResolveMultiband<T>::defaultStride(shape);
34073459 /********************************************************/
34083460
34093461 /** \addtogroup MultiArrayToImage Create BasicImageView from MultiArrayViews
3410
3462
34113463 Some convenience functions for wrapping a \ref vigra::MultiArrayView's
3412 data in a \ref vigra::BasicImageView.
3464 data in a \ref vigra::BasicImageView.
34133465 */
34143466 //@{
34153467 /** Create a \ref vigra::BasicImageView from an unstrided 2-dimensional
34223474 BasicImageView <T>
34233475 makeBasicImageView (MultiArrayView <2, T, Stride> const &array)
34243476 {
3425 vigra_precondition(array.isUnstrided(),
3426 "makeBasicImageView(array): array must be unstrided (i.e. array.isUnstrided() == true).");
3477 vigra_precondition(array.isUnstrided(0),
3478 "makeBasicImageView(array): array must be unstrided along x (i.e. array.isUnstrided(0) == true).");
34273479 return BasicImageView <T> (array.data (), array.shape (0),
34283480 array.shape (1), array.stride(1));
34293481 }
34583510 BasicImageView <RGBValue<T> >
34593511 makeRGBImageView (MultiArrayView<3, T, Stride> const &array)
34603512 {
3461 vigra_precondition(array.shape (0) == 3,
3513 vigra_precondition(array.shape (0) == 3,
34623514 "makeRGBImageView(): array.shape(0) must be 3.");
34633515 vigra_precondition(array.isUnstrided(),
34643516 "makeRGBImageView(array): array must be unstrided (i.e. array.isUnstrided() == true).");
139139 #include "threading.hxx"
140140 #include "compression.hxx"
141141
142 // // FIXME: why is this needed when compiling the Python bindng,
143 // // but not when compiling test_multiarray_chunked?
144 // #if defined(__GNUC__)
145 // # define memory_order_release memory_order_seq_cst
146 // # define memory_order_acquire memory_order_seq_cst
147 // #endif
148
149142 #ifdef _WIN32
150143 # include "windows.h"
151144 #else
456449
457450 bool isInside(shape_type const & p) const
458451 {
459 for(int d=0; d<N; ++d)
452 for(unsigned d=0; d<N; ++d)
460453 if(p[d] < 0 || p[d] >= shape_[d])
461454 return false;
462455 return true;
13301323 int cache_max;
13311324 CompressionMethod compression_method;
13321325 };
1326
1327 /** \weakgroup ParallelProcessing
1328 \sa ChunkedArray
1329 */
13331330
13341331 /** \brief Interface and base class for chunked arrays.
13351332
18371834 if(chunk)
18381835 {
18391836 long rc = chunk->chunk_state_.fetch_sub(1);
1837 ignore_argument(rc);
18401838 #ifdef VIGRA_CHECK_BOUNDS
18411839 vigra_invariant(rc >= 0,
18421840 "ChunkedArray::unrefChunk(): chunk refcount got negative!");
25122510 P0(m.shape())));
25132511 }
25142512
2513 /** \weakgroup ParallelProcessing
2514 \sa ChunkedArrayFull
2515 */
2516
25152517 /** Implement ChunkedArray as an ordinary MultiArray with a single chunk.
25162518
25172519 <b>\#include</b> \<vigra/multi_array_chunked.hxx\> <br/>
25432545
25442546 static shape_type computeChunkShape(shape_type s)
25452547 {
2546 for(int k=0; k<N; ++k)
2548 for(unsigned k=0; k<N; ++k)
25472549 s[k] = ceilPower2(s[k]);
25482550 return s;
25492551 }
26202622 return false; // never destroys the data
26212623 }
26222624
2623 virtual std::size_t dataBytes(Chunk * c) const
2625 virtual std::size_t dataBytes(Chunk *) const
26242626 {
26252627 return prod(this->shape());
26262628 }
26722674 shape_type upper_bound_;
26732675 Chunk chunk_; // a dummy chunk to fulfill the API
26742676 };
2677
2678 /** \weakgroup ParallelProcessing
2679 \sa ChunkedArrayLazy
2680 */
26752681
26762682 /** Implement ChunkedArray as a collection of in-memory chunks.
26772683
27932799
27942800 Alloc alloc_;
27952801 };
2802
2803 /** \weakgroup ParallelProcessing
2804 \sa ChunkedArrayCompressed
2805 */
27962806
27972807 /** Implement ChunkedArray as a collection of potentially compressed
27982808 in-memory chunks.
29842994
29852995 CompressionMethod compression_method_;
29862996 };
2997
2998 /** \weakgroup ParallelProcessing
2999 \sa ChunkedArrayTmpFile
3000 */
29873001
29883002 /** Implement ChunkedArray as a collection of chunks that can be
29893003 swapped out into a temporary file when asleep.
31213135 , file_size_()
31223136 , file_capacity_()
31233137 {
3138 ignore_argument(path);
31243139 #ifdef VIGRA_NO_SPARSE_FILE
31253140 file_capacity_ = 4*prod(this->chunk_shape_)*sizeof(T);
31263141 #else
5353 /** \addtogroup ChunkedArrayClasses
5454 */
5555 //@{
56
57 /** \weakgroup ParallelProcessing
58 \sa ChunkedArrayHDF5
59 */
5660
5761 /** Implement ChunkedArray as a chunked dataset in an HDF5 file.
5862
221225 HDF5File::OpenMode mode = HDF5File::ReadOnly,
222226 ChunkedArrayOptions const & options = ChunkedArrayOptions(),
223227 Alloc const & alloc = Alloc())
224 : ChunkedArray<N, T>(shape_type(), shape_type(), options),
228 : ChunkedArray<N, T>(shape_type(),
229 ceilPower2<N>(shape_type(file.getChunkShape(dataset).begin())),
230 options),
225231 file_(file),
226232 dataset_name_(dataset),
227233 dataset_(),
231237 init(mode);
232238 }
233239
240
241 // copy constructor
242 ChunkedArrayHDF5(const ChunkedArrayHDF5 & src)
243 : ChunkedArray<N, T>(src),
244 file_(src.file_),
245 dataset_name_(src.dataset_name_),
246 compression_(src.compression_),
247 alloc_(src.alloc_)
248 {
249 if( file_.isReadOnly() )
250 init(HDF5File::ReadOnly);
251 else
252 init(HDF5File::ReadWrite);
253 }
254
234255 void init(HDF5File::OpenMode mode)
235256 {
236257 bool exists = file_.existsDataset(dataset_name_);
263284 // H5Pset_chunk_cache (dapl, rdcc_nslots, rdcc_nbytes, rdcc_w0);
264285 // Chunk cache size (rdcc_nbytes) should be large
265286 // enough to hold all the chunks in a selection
266 // • If this is not possible, it may be best to disable chunk
287 // * If this is not possible, it may be best to disable chunk
267288 // caching altogether (set rdcc_nbytes to 0)
268 // • rdcc_slots should be a prime number that is at
289 // * rdcc_slots should be a prime number that is at
269290 // least 10 to 100 times the number of chunks that can fit
270291 // into rdcc_nbytes
271 // • rdcc_w0 should be set to 1 if chunks that have been
292 // * rdcc_w0 should be set to 1 if chunks that have been
272293 // fully read/written will never be read/written again
273294 //
274295 // the above may be WRONG in general - it may only apply if the
300321 {
301322 vigra_precondition(fileShape.size() == N+1,
302323 "ChunkedArrayHDF5(file, dataset): dataset has wrong dimension.");
303 vigra_precondition(fileShape[0] == TypeTraits::numberOfBands(),
324 vigra_precondition(fileShape[0] == static_cast<unsigned>(TypeTraits::numberOfBands()),
304325 "ChunkedArrayHDF5(file, dataset): dataset has wrong number of bands.");
305326 shape_type shape(fileShape.begin()+1);
306327 if(this->size() > 0)
197197
198198 parallel_foreach(options.getNumThreads(),
199199 beginIter, endIter,
200 [&](const int threadId, const BlockWithBorder bwb)
200 [&](const int /*threadId*/, const BlockWithBorder bwb)
201201 {
202202 // get the input of the block as a view
203203 vigra::MultiArrayView<DIM, T_IN, ST_IN> sourceSub = source.subarray(bwb.border().begin(),
249249
250250 parallel_foreach(options.getNumThreads(),
251251 beginIter, endIter,
252 [&](const int threadId, const BlockWithBorder bwb)
252 [&](const int /*threadId*/, const BlockWithBorder bwb)
253253 {
254254 // get the input of the block as a view
255255 vigra::MultiArrayView<DIM, T_IN, ST_IN> sourceSub = source.subarray(bwb.border().begin(),
273273 public: \
274274 typedef ConvolutionOptions<DIM> ConvOpt; \
275275 FUNCTOR_NAME(const ConvOpt & convOpt) \
276 : convOpt_(convOpt){} \
276 : sharedOpt_(convOpt){} \
277277 template<class S, class D> \
278278 void operator()(const S & s, D & d)const{ \
279 FUNCTION_NAME(s, d, convOpt_); \
279 FUNCTION_NAME(s, d, sharedOpt_); \
280280 } \
281281 template<class S, class D,class SHAPE> \
282282 void operator()(const S & s, D & d, const SHAPE & roiBegin, const SHAPE & roiEnd){ \
283 ConvOpt convOpt(convOpt_); \
284 convOpt.subarray(roiBegin, roiEnd); \
285 FUNCTION_NAME(s, d, convOpt); \
283 ConvOpt localOpt(sharedOpt_); \
284 localOpt.subarray(roiBegin, roiEnd); \
285 FUNCTION_NAME(s, d, localOpt); \
286286 } \
287287 private: \
288 ConvOpt convOpt_; \
288 ConvOpt sharedOpt_; \
289289 };
290290
291291
305305 public:
306306 typedef ConvolutionOptions<DIM> ConvOpt;
307307 HessianOfGaussianEigenvaluesFunctor(const ConvOpt & convOpt)
308 : convOpt_(convOpt){}
308 : sharedOpt_(convOpt){}
309309 template<class S, class D>
310310 void operator()(const S & s, D & d)const{
311311 typedef typename vigra::NumericTraits<typename S::value_type>::RealPromote RealType;
312312 vigra::MultiArray<DIM, TinyVector<RealType, int(DIM*(DIM+1)/2)> > hessianOfGaussianRes(d.shape());
313 vigra::hessianOfGaussianMultiArray(s, hessianOfGaussianRes, convOpt_);
313 vigra::hessianOfGaussianMultiArray(s, hessianOfGaussianRes, sharedOpt_);
314314 vigra::tensorEigenvaluesMultiArray(hessianOfGaussianRes, d);
315315 }
316316 template<class S, class D,class SHAPE>
317317 void operator()(const S & s, D & d, const SHAPE & roiBegin, const SHAPE & roiEnd){
318318 typedef typename vigra::NumericTraits<typename S::value_type>::RealPromote RealType;
319319 vigra::MultiArray<DIM, TinyVector<RealType, int(DIM*(DIM+1)/2)> > hessianOfGaussianRes(roiEnd-roiBegin);
320 convOpt_.subarray(roiBegin, roiEnd);
321 vigra::hessianOfGaussianMultiArray(s, hessianOfGaussianRes, convOpt_);
320 ConvOpt localOpt(sharedOpt_);
321 localOpt.subarray(roiBegin, roiEnd);
322 vigra::hessianOfGaussianMultiArray(s, hessianOfGaussianRes, localOpt);
322323 vigra::tensorEigenvaluesMultiArray(hessianOfGaussianRes, d);
323324 }
324325 private:
325 ConvOpt convOpt_;
326 ConvOpt sharedOpt_;
326327 };
327328
328329 template<unsigned int DIM, unsigned int EV>
330331 public:
331332 typedef ConvolutionOptions<DIM> ConvOpt;
332333 HessianOfGaussianSelectedEigenvalueFunctor(const ConvOpt & convOpt)
333 : convOpt_(convOpt){}
334 : sharedOpt_(convOpt){}
334335 template<class S, class D>
335336 void operator()(const S & s, D & d)const{
336337 typedef typename vigra::NumericTraits<typename S::value_type>::RealPromote RealType;
337338
338339 // compute the hessian of gaussian and extract eigenvalue
339340 vigra::MultiArray<DIM, TinyVector<RealType, int(DIM*(DIM+1)/2)> > hessianOfGaussianRes(s.shape());
340 vigra::hessianOfGaussianMultiArray(s, hessianOfGaussianRes, convOpt_);
341 vigra::hessianOfGaussianMultiArray(s, hessianOfGaussianRes, sharedOpt_);
341342
342343 vigra::MultiArray<DIM, TinyVector<RealType, DIM > > allEigenvalues(s.shape());
343344 vigra::tensorEigenvaluesMultiArray(hessianOfGaussianRes, allEigenvalues);
351352
352353 // compute the hessian of gaussian and extract eigenvalue
353354 vigra::MultiArray<DIM, TinyVector<RealType, int(DIM*(DIM+1)/2)> > hessianOfGaussianRes(roiEnd-roiBegin);
354 convOpt_.subarray(roiBegin, roiEnd);
355 vigra::hessianOfGaussianMultiArray(s, hessianOfGaussianRes, convOpt_);
355 ConvOpt localOpt(sharedOpt_);
356 localOpt.subarray(roiBegin, roiEnd);
357 vigra::hessianOfGaussianMultiArray(s, hessianOfGaussianRes, localOpt);
356358
357359 vigra::MultiArray<DIM, TinyVector<RealType, DIM > > allEigenvalues(roiEnd-roiBegin);
358360 vigra::tensorEigenvaluesMultiArray(hessianOfGaussianRes, allEigenvalues);
360362 d = allEigenvalues.bindElementChannel(EV);
361363 }
362364 private:
363 ConvOpt convOpt_;
365 ConvOpt sharedOpt_;
364366 };
365367
366368
681681
682682 } // namespace detail
683683
684 /** \addtogroup MultiArrayConvolutionFilters Convolution filters for multi-dimensional arrays.
685
686 These functions realize a separable convolution on an arbitrary dimensional
687 array that is specified by iterators (compatible to \ref MultiIteratorPage)
688 and shape objects. It can therefore be applied to a wide range of data structures
689 (\ref vigra::MultiArrayView, \ref vigra::MultiArray etc.).
684 /** \addtogroup ConvolutionFilters
690685 */
691686 //@{
692687
11681163 /* */
11691164 /********************************************************/
11701165
1166 /** \weakgroup ParallelProcessing
1167 \sa gaussianSmoothMultiArray <B>(...,</B> BlockwiseConvolutionOptions<B>)</B>
1168 */
1169
11711170 /** \brief Isotropic Gaussian smoothing of a multi-dimensional arrays.
11721171
11731172 This function computes an isotropic convolution of the given N-dimensional
12111210 MultiArrayView<N, T2, S2> dest,
12121211 ConvolutionOptions<N> opt);
12131212
1214 // as above, but execute algorirhm in parallel
1213 // as above, but execute algorithm in parallel
12151214 template <unsigned int N, class T1, class S1,
12161215 class T2, class S2>
12171216 void
13911390 /* */
13921391 /********************************************************/
13931392
1393 /** \weakgroup ParallelProcessing
1394 \sa gaussianGradientMultiArray <B>(...,</B> BlockwiseConvolutionOptions<B>)</B>
1395 */
1396
13941397 /** \brief Calculate Gaussian gradient of a multi-dimensional arrays.
13951398
13961399 This function computes the Gaussian gradient of the given N-dimensional
17331736 /* */
17341737 /********************************************************/
17351738
1739 /** \weakgroup ParallelProcessing
1740 \sa symmetricGradientMultiArray <B>(...,</B> BlockwiseConvolutionOptions<B>)</B>
1741 */
1742
17361743 /** \brief Calculate gradient of a multi-dimensional arrays using symmetric difference filters.
17371744
17381745 This function computes the gradient of the given N-dimensional
19121919 /* laplacianOfGaussianMultiArray */
19131920 /* */
19141921 /********************************************************/
1922
1923 /** \weakgroup ParallelProcessing
1924 \sa laplacianOfGaussianMultiArray <B>(...,</B> BlockwiseConvolutionOptions<B>)</B>
1925 */
19151926
19161927 /** \brief Calculate Laplacian of a N-dimensional arrays using Gaussian derivative filters.
19171928
21522163 /* */
21532164 /********************************************************/
21542165
2166 /** \weakgroup ParallelProcessing
2167 \sa gaussianDivergenceMultiArray <B>(...,</B> BlockwiseConvolutionOptions<B>)</B>
2168 */
2169
21552170 /** \brief Calculate the divergence of a vector field using Gaussian derivative filters.
21562171
21572172 This function computes the divergence of the given N-dimensional vector field
23352350 /* hessianOfGaussianMultiArray */
23362351 /* */
23372352 /********************************************************/
2353
2354 /** \weakgroup ParallelProcessing
2355 \sa hessianOfGaussianMultiArray <B>(...,</B> BlockwiseConvolutionOptions<B>)</B>
2356 */
23382357
23392358 /** \brief Calculate Hessian matrix of a N-dimensional arrays using Gaussian derivative filters.
23402359
26062625 /* */
26072626 /********************************************************/
26082627
2609 /** \brief Calculate th structure tensor of a multi-dimensional arrays.
2628 /** \weakgroup ParallelProcessing
2629 \sa structureTensorMultiArray <B>(...,</B> BlockwiseConvolutionOptions<B>)</B>
2630 */
2631
2632 /** \brief Calculate the structure tensor of a multi-dimensional arrays.
26102633
26112634 This function computes the gradient (outer product) tensor for each element
26122635 of the given N-dimensional array with first-derivative-of-Gaussian filters at
6161 {
6262 double left, center, right;
6363 Value apex_height;
64
64
6565 DistParabolaStackEntry(Value const & p, double l, double c, double r)
6666 : left(l), center(c), right(r), apex_height(p)
6767 {}
8484 double w = iend - is;
8585 if(w <= 0)
8686 return;
87
87
8888 double sigma2 = sigma * sigma;
8989 double sigma22 = 2.0 * sigma2;
90
90
9191 typedef typename SrcAccessor::value_type SrcType;
9292 typedef DistParabolaStackEntry<SrcType> Influence;
9393 std::vector<Influence> _stack;
9494 _stack.push_back(Influence(sa(is), 0.0, 0.0, w));
95
95
9696 ++is;
9797 double current = 1.0;
9898 for(;current < w; ++is, ++current)
9999 {
100100 double intersection;
101
101
102102 while(true)
103103 {
104104 Influence & s = _stack.back();
105105 double diff = current - s.center;
106106 intersection = current + (sa(is) - s.apex_height - sigma2*sq(diff)) / (sigma22 * diff);
107
107
108108 if( intersection < s.left) // previous point has no influence
109109 {
110110 _stack.pop_back();
128128 typename std::vector<Influence>::iterator it = _stack.begin();
129129 for(current = 0.0; current < w; ++current, ++id)
130130 {
131 while( current >= it->right)
132 ++it;
131 while( current >= it->right)
132 ++it;
133133 da.set(sigma2 * sq(current - it->center) + it->apex_height, id);
134134 }
135135 }
162162
163163 // we need the Promote type here if we want to invert the image (dilation)
164164 typedef typename NumericTraits<typename DestAccessor::value_type>::RealPromote TmpType;
165
165
166166 // temporary array to hold the current line to enable in-place operation
167167 ArrayVector<TmpType> tmp( shape[0] );
168168
169169 typedef MultiArrayNavigator<SrcIterator, N> SNavigator;
170170 typedef MultiArrayNavigator<DestIterator, N> DNavigator;
171
172
171
172
173173 // only operate on first dimension here
174174 SNavigator snav( si, shape, 0 );
175175 DNavigator dnav( di, shape, 0 );
182182 // Invert the values if necessary. Only needed for grayscale morphology
183183 if(invert)
184184 transformLine( snav.begin(), snav.end(), src, tmp.begin(),
185 typename AccessorTraits<TmpType>::default_accessor(),
185 typename AccessorTraits<TmpType>::default_accessor(),
186186 Param(NumericTraits<TmpType>::zero())-Arg1());
187187 else
188188 copyLine( snav.begin(), snav.end(), src, tmp.begin(),
192192 typename AccessorTraits<TmpType>::default_const_accessor()),
193193 destIter( dnav.begin(), dest ), sigmas[0] );
194194 }
195
195
196196 // operate on further dimensions
197197 for( int d = 1; d < N; ++d )
198198 {
199199 DNavigator dnav( di, shape, d );
200200
201201 tmp.resize( shape[d] );
202
202
203203
204204 for( ; dnav.hasMore(); dnav++ )
205205 {
234234
235235 } // namespace detail
236236
237 /** \addtogroup MultiArrayDistanceTransform Euclidean distance transform for multi-dimensional arrays.
238
239 These functions perform variants of the Euclidean distance transform on
240 arbitrary dimensional arrays.
237 /** \addtogroup DistanceTransform
241238 */
242239 //@{
243240
259256 namespace vigra {
260257 // explicitly specify pixel pitch for each coordinate
261258 template <unsigned int N, class T1, class S1,
262 class T2, class S2,
259 class T2, class S2,
263260 class Array>
264261 void
265262 separableMultiDistSquared(MultiArrayView<N, T1, S1> const & source,
272269 class T2, class S2>
273270 void
274271 separableMultiDistSquared(MultiArrayView<N, T1, S1> const & source,
275 MultiArrayView<N, T2, S2> dest,
272 MultiArrayView<N, T2, S2> dest,
276273 bool background);
277274 }
278275 \endcode
284281 // explicitly specify pixel pitch for each coordinate
285282 template <class SrcIterator, class SrcShape, class SrcAccessor,
286283 class DestIterator, class DestAccessor, class Array>
287 void
284 void
288285 separableMultiDistSquared( SrcIterator s, SrcShape const & shape, SrcAccessor src,
289 DestIterator d, DestAccessor dest,
286 DestIterator d, DestAccessor dest,
290287 bool background,
291288 Array const & pixelPitch);
292
289
293290 // use default pixel pitch = 1.0 for each coordinate
294291 template <class SrcIterator, class SrcShape, class SrcAccessor,
295292 class DestIterator, class DestAccessor>
296293 void
297294 separableMultiDistSquared(SrcIterator siter, SrcShape const & shape, SrcAccessor src,
298 DestIterator diter, DestAccessor dest,
295 DestIterator diter, DestAccessor dest,
299296 bool background);
300297
301298 }
306303 // explicitly specify pixel pitch for each coordinate
307304 template <class SrcIterator, class SrcShape, class SrcAccessor,
308305 class DestIterator, class DestAccessor, class Array>
309 void
306 void
310307 separableMultiDistSquared( triple<SrcIterator, SrcShape, SrcAccessor> const & source,
311 pair<DestIterator, DestAccessor> const & dest,
308 pair<DestIterator, DestAccessor> const & dest,
312309 bool background,
313310 Array const & pixelPitch);
314
311
315312 // use default pixel pitch = 1.0 for each coordinate
316313 template <class SrcIterator, class SrcShape, class SrcAccessor,
317314 class DestIterator, class DestAccessor>
329326 arrays are represented by iterators, shape objects and accessors.
330327 The destination array is required to already have the correct size.
331328
332 This function expects a mask as its source, where background pixels are
333 marked as zero, and non-background pixels as non-zero. If the parameter
329 This function expects a mask as its source, where background pixels are
330 marked as zero, and non-background pixels as non-zero. If the parameter
334331 <i>background</i> is true, then the squared distance of all background
335332 pixels to the nearest object is calculated. Otherwise, the distance of all
336333 object pixels to the nearest background pixel is calculated.
337
338 Optionally, one can pass an array that specifies the pixel pitch in each direction.
334
335 Optionally, one can pass an array that specifies the pixel pitch in each direction.
339336 This is necessary when the data have non-uniform resolution (as is common in confocal
340 microscopy, for example).
337 microscopy, for example).
341338
342339 This function may work in-place, which means that <tt>siter == diter</tt> is allowed.
343340 A full-sized internal array is only allocated if working on the destination
356353 MultiArray<3, unsigned int> dest(shape);
357354 ...
358355
359 // Calculate Euclidean distance squared for all background pixels
356 // Calculate Euclidean distance squared for all background pixels
360357 separableMultiDistSquared(source, dest, true);
361358 \endcode
362359
386383 pixelPitchIsReal = true;
387384 dmax += sq(pixelPitch[k]*shape[k]);
388385 }
389
386
390387 using namespace vigra::functor;
391
392 if(dmax > NumericTraits<DestType>::toRealPromote(NumericTraits<DestType>::max())
388
389 if(dmax > NumericTraits<DestType>::toRealPromote(NumericTraits<DestType>::max())
393390 || pixelPitchIsReal) // need a temporary array to avoid overflows
394391 {
395392 // Threshold the values so all objects have infinity value in the beginning
396393 Real maxDist = (Real)dmax, rzero = (Real)0.0;
397394 MultiArray<SrcShape::static_size, Real> tmpArray(shape);
398395 if(background == true)
399 transformMultiArray( s, shape, src,
396 transformMultiArray( s, shape, src,
400397 tmpArray.traverser_begin(), typename AccessorTraits<Real>::default_accessor(),
401398 ifThenElse( Arg1() == Param(zero), Param(maxDist), Param(rzero) ));
402399 else
403 transformMultiArray( s, shape, src,
400 transformMultiArray( s, shape, src,
404401 tmpArray.traverser_begin(), typename AccessorTraits<Real>::default_accessor(),
405402 ifThenElse( Arg1() != Param(zero), Param(maxDist), Param(rzero) ));
406
407 detail::internalSeparableMultiArrayDistTmp( tmpArray.traverser_begin(),
403
404 detail::internalSeparableMultiArrayDistTmp( tmpArray.traverser_begin(),
408405 shape, typename AccessorTraits<Real>::default_accessor(),
409 tmpArray.traverser_begin(),
406 tmpArray.traverser_begin(),
410407 typename AccessorTraits<Real>::default_accessor(), pixelPitch);
411
408
412409 copyMultiArray(srcMultiArrayRange(tmpArray), destIter(d, dest));
413410 }
414 else // work directly on the destination array
411 else // work directly on the destination array
415412 {
416413 // Threshold the values so all objects have infinity value in the beginning
417414 DestType maxDist = DestType(std::ceil(dmax)), rzero = (DestType)0;
419416 transformMultiArray( s, shape, src, d, dest,
420417 ifThenElse( Arg1() == Param(zero), Param(maxDist), Param(rzero) ));
421418 else
422 transformMultiArray( s, shape, src, d, dest,
419 transformMultiArray( s, shape, src, d, dest,
423420 ifThenElse( Arg1() != Param(zero), Param(maxDist), Param(rzero) ));
424
421
425422 detail::internalSeparableMultiArrayDistTmp( d, shape, dest, d, dest, pixelPitch);
426423 }
427424 }
428425
429426 template <class SrcIterator, class SrcShape, class SrcAccessor,
430427 class DestIterator, class DestAccessor>
431 inline
428 inline
432429 void separableMultiDistSquared( SrcIterator s, SrcShape const & shape, SrcAccessor src,
433430 DestIterator d, DestAccessor dest, bool background)
434431 {
456453 }
457454
458455 template <unsigned int N, class T1, class S1,
459 class T2, class S2,
456 class T2, class S2,
460457 class Array>
461458 inline void
462459 separableMultiDistSquared(MultiArrayView<N, T1, S1> const & source,
497494 // explicitly specify pixel pitch for each coordinate
498495 template <unsigned int N, class T1, class S1,
499496 class T2, class S2, class Array>
500 void
497 void
501498 separableMultiDistance(MultiArrayView<N, T1, S1> const & source,
502 MultiArrayView<N, T2, S2> dest,
499 MultiArrayView<N, T2, S2> dest,
503500 bool background,
504501 Array const & pixelPitch);
505502
506503 // use default pixel pitch = 1.0 for each coordinate
507504 template <unsigned int N, class T1, class S1,
508505 class T2, class S2>
509 void
506 void
510507 separableMultiDistance(MultiArrayView<N, T1, S1> const & source,
511 MultiArrayView<N, T2, S2> dest,
508 MultiArrayView<N, T2, S2> dest,
512509 bool background);
513510 }
514511 \endcode
520517 // explicitly specify pixel pitch for each coordinate
521518 template <class SrcIterator, class SrcShape, class SrcAccessor,
522519 class DestIterator, class DestAccessor, class Array>
523 void
520 void
524521 separableMultiDistance( SrcIterator s, SrcShape const & shape, SrcAccessor src,
525 DestIterator d, DestAccessor dest,
522 DestIterator d, DestAccessor dest,
526523 bool background,
527524 Array const & pixelPitch);
528
525
529526 // use default pixel pitch = 1.0 for each coordinate
530527 template <class SrcIterator, class SrcShape, class SrcAccessor,
531528 class DestIterator, class DestAccessor>
532529 void
533530 separableMultiDistance(SrcIterator siter, SrcShape const & shape, SrcAccessor src,
534 DestIterator diter, DestAccessor dest,
531 DestIterator diter, DestAccessor dest,
535532 bool background);
536533
537534 }
542539 // explicitly specify pixel pitch for each coordinate
543540 template <class SrcIterator, class SrcShape, class SrcAccessor,
544541 class DestIterator, class DestAccessor, class Array>
545 void
542 void
546543 separableMultiDistance( triple<SrcIterator, SrcShape, SrcAccessor> const & source,
547 pair<DestIterator, DestAccessor> const & dest,
544 pair<DestIterator, DestAccessor> const & dest,
548545 bool background,
549546 Array const & pixelPitch);
550
547
551548 // use default pixel pitch = 1.0 for each coordinate
552549 template <class SrcIterator, class SrcShape, class SrcAccessor,
553550 class DestIterator, class DestAccessor>
564561 multi-dimensional array. It simply calls \ref separableMultiDistSquared()
565562 and takes the pixel-wise square root of the result. See \ref separableMultiDistSquared()
566563 for more documentation.
567
564
568565 <b> Usage:</b>
569566
570567 <b>\#include</b> \<vigra/multi_distance.hxx\><br/>
576573 MultiArray<3, float> dest(shape);
577574 ...
578575
579 // Calculate Euclidean distance for all background pixels
576 // Calculate Euclidean distance for all background pixels
580577 separableMultiDistance(source, dest, true);
581578 \endcode
582579
591588 Array const & pixelPitch)
592589 {
593590 separableMultiDistSquared( s, shape, src, d, dest, background, pixelPitch);
594
591
595592 // Finally, calculate the square root of the distances
596593 using namespace vigra::functor;
597
594
598595 transformMultiArray( d, shape, dest, d, dest, sqrt(Arg1()) );
599596 }
600597
604601 DestIterator d, DestAccessor dest, bool background)
605602 {
606603 separableMultiDistSquared( s, shape, src, d, dest, background);
607
604
608605 // Finally, calculate the square root of the distances
609606 using namespace vigra::functor;
610
607
611608 transformMultiArray( d, shape, dest, d, dest, sqrt(Arg1()) );
612609 }
613610
632629
633630 template <unsigned int N, class T1, class S1,
634631 class T2, class S2, class Array>
635 inline void
632 inline void
636633 separableMultiDistance(MultiArrayView<N, T1, S1> const & source,
637 MultiArrayView<N, T2, S2> dest,
634 MultiArrayView<N, T2, S2> dest,
638635 bool background,
639636 Array const & pixelPitch)
640637 {
646643
647644 template <unsigned int N, class T1, class S1,
648645 class T2, class S2>
649 inline void
646 inline void
650647 separableMultiDistance(MultiArrayView<N, T1, S1> const & source,
651 MultiArrayView<N, T2, S2> dest,
648 MultiArrayView<N, T2, S2> dest,
652649 bool background)
653650 {
654651 vigra_precondition(source.shape() == dest.shape(),
660657 //%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% BoundaryDistanceTransform %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
661658
662659 //rewrite labeled data and work with separableMultiDist
663 namespace lemon_graph {
660 namespace lemon_graph {
664661
665662 template <class Graph, class T1Map, class T2Map>
666 void
663 void
667664 markRegionBoundaries(Graph const & g,
668665 T1Map const & labels,
669666 T2Map & out)
672669 typedef typename Graph::OutBackArcIt neighbor_iterator;
673670
674671 //find faces
675 for (graph_scanner node(g); node != INVALID; ++node)
672 for (graph_scanner node(g); node != INVALID; ++node)
676673 {
677674 typename T1Map::value_type center = labels[*node];
678
675
679676 for (neighbor_iterator arc(g, node); arc != INVALID; ++arc)
680677 {
681678 // set adjacent nodes with different labels to 1
719716 /********************************************************/
720717
721718 template <class DestIterator, class LabelIterator>
722 void
723 boundaryDistParabola(DestIterator is, DestIterator iend,
724 LabelIterator ilabels,
719 void
720 boundaryDistParabola(DestIterator is, DestIterator iend,
721 LabelIterator ilabels,
725722 double dmax,
726723 bool array_border_is_active=false)
727724 {
755752 Influence & s = _stack.back();
756753 double diff = current - s.center;
757754 double intersection = current + (apex_height - s.apex_height - sq(diff)) / (2.0 * diff);
758
755
759756 if(intersection < s.left) // previous parabola has no influence
760757 {
761758 _stack.pop_back();
772769 _stack.push_back(Influence(apex_height, intersection, current, w));
773770 if(current < w && current_label == *ilabels)
774771 break; // finished present pixel, advance to next one
775
772
776773 // label changed => finalize the current segment
777774 typename Stack::iterator it = _stack.begin();
778775 for(double c = begin; c < current; ++c, ++id)
779776 {
780 while(c >= it->right)
781 ++it;
777 while(c >= it->right)
778 ++it;
782779 *id = sq(c - it->center) + it->apex_height;
783780 }
784781 if(current == w)
785782 break; // stop when this was the last segment
786
783
787784 // initialize the new segment
788785 begin = current;
789786 current_label = *ilabels;
790787 apex_height = *is;
791788 Stack(1, Influence(0.0, begin-1.0, begin-1.0, w)).swap(_stack);
792 // don't advance to next pixel here, because the present pixel must also
789 // don't advance to next pixel here, because the present pixel must also
793790 // be analysed in the context of the new segment
794791 }
795792 }
813810 typedef typename MultiArrayView<N, T2, S2>::traverser DestIterator;
814811 typedef MultiArrayNavigator<LabelIterator, N> LabelNavigator;
815812 typedef MultiArrayNavigator<DestIterator, N> DNavigator;
816
813
817814 dest = dmax;
818 for( int d = 0; d < N; ++d )
815 for( unsigned d = 0; d < N; ++d )
819816 {
820817 LabelNavigator lnav( labels.traverser_begin(), labels.shape(), d );
821818 DNavigator dnav( dest.traverser_begin(), dest.shape(), d );
823820 for( ; dnav.hasMore(); dnav++, lnav++ )
824821 {
825822 boundaryDistParabola(dnav.begin(), dnav.end(),
826 lnav.begin(),
823 lnav.begin(),
827824 dmax, array_border_is_active);
828825 }
829826 }
831828
832829 } // namespace detail
833830
834 /** \brief Specify which boundary is used for boundaryMultiDistance().
835
831 /** \brief Specify which boundary is used for boundaryMultiDistance().
832
836833 */
837834 enum BoundaryDistanceTag {
838835 OuterBoundary, ///< Pixels just outside of each region
863860 BoundaryDistanceTag boundary=InterpixelBoundary);
864861 }
865862 \endcode
866
863
867864 This function computes the distance transform of a labeled image <i>simultaneously</i>
868865 for all regions. Depending on the requested type of \a boundary, three modes
869866 are supported:
874871 <li><tt>InterpixelBoundary</tt> (default): Like <tt>OuterBoundary</tt>, but shift the distance
875872 to the interpixel boundary by subtractiong 1/2. This make the distences consistent
876873 accross boundaries.</li>
877 <li><tt>InnerBoundary</tt>: In each region, compute the distance to the nearest pixel in the
874 <li><tt>InnerBoundary</tt>: In each region, compute the distance to the nearest pixel in the
878875 region which is adjacent to the boundary. </li>
879876 </ul>
880 If <tt>array_border_is_active=true</tt>, the
881 outer border of the array (i.e. the interpixel boundary between the array
882 and the infinite region) is also used. Otherwise (the default), regions
877 If <tt>array_border_is_active=true</tt>, the
878 outer border of the array (i.e. the interpixel boundary between the array
879 and the infinite region) is also used. Otherwise (the default), regions
883880 touching the array border are treated as if they extended to infinity.
884
881
885882 <b> Usage:</b>
886883
887884 <b>\#include</b> \<vigra/multi_distance.hxx\><br/>
896893
897894 // find regions (interpixel boundaries are implied)
898895 labelMultiArray(source, labels);
899
900 // Calculate Euclidean distance to interpixel boundary for all pixels
896
897 // Calculate Euclidean distance to interpixel boundary for all pixels
901898 boundaryMultiDistance(labels, dest);
902899 \endcode
903900
915912 {
916913 vigra_precondition(labels.shape() == dest.shape(),
917914 "boundaryMultiDistance(): shape mismatch between input and output.");
918
915
919916 using namespace vigra::functor;
920
917
921918 if(boundary == InnerBoundary)
922919 {
923920 MultiArray<N, unsigned char> boundaries(labels.shape());
924
921
925922 markRegionBoundaries(labels, boundaries, IndirectNeighborhood);
926923 if(array_border_is_active)
927924 initMultiArrayBorder(boundaries, 1, 1);
930927 else
931928 {
932929 T2 offset = 0.0;
933
930
934931 if(boundary == InterpixelBoundary)
935932 {
936933 vigra_precondition(!NumericTraits<T2>::isIntegral::value,
3636 #define VIGRA_MULTI_FFT_HXX
3737
3838 #include "fftw3.hxx"
39 #include "metaprogramming.hxx"
3940 #include "multi_array.hxx"
4041 #include "multi_math.hxx"
4142 #include "navigator.hxx"
5051 /* */
5152 /********************************************************/
5253
53 /** \addtogroup FourierTransform
54 /** \addtogroup FourierTransform
5455 */
5556 //@{
5657
6263 typedef typename MultiArrayView<N, T, C>::traverser Traverser;
6364 typedef MultiArrayNavigator<Traverser, N> Navigator;
6465 typedef typename Navigator::iterator Iterator;
65
66
6667 for(unsigned int d = startDimension; d < N; ++d)
6768 {
6869 Navigator nav(a.traverser_begin(), a.shape(), d);
7273 Iterator i = nav.begin();
7374 int s = nav.end() - i;
7475 int s2 = s/2;
75
76
7677 if(even(s))
7778 {
7879 for(int k=0; k<s2; ++k)
8081 std::swap(i[k], i[k+s2]);
8182 }
8283 }
83 else
84 else
8485 {
8586 T v = i[0];
8687 for(int k=0; k<s2; ++k)
100101 typedef typename MultiArrayView<N, T, C>::traverser Traverser;
101102 typedef MultiArrayNavigator<Traverser, N> Navigator;
102103 typedef typename Navigator::iterator Iterator;
103
104
104105 for(unsigned int d = startDimension; d < N; ++d)
105106 {
106107 Navigator nav(a.traverser_begin(), a.shape(), d);
110111 Iterator i = nav.begin();
111112 int s = nav.end() - i;
112113 int s2 = s/2;
113
114
114115 if(even(s))
115116 {
116117 for(int k=0; k<s2; ++k)
118119 std::swap(i[k], i[k+s2]);
119120 }
120121 }
121 else
122 else
122123 {
123124 T v = i[s2];
124125 for(int k=s2; k>0; --k)
174175 {
175176 public:
176177 threading::lock_guard<threading::mutex> guard_;
177
178
178179 FFTWLock()
179180 : guard_(plan_mutex_)
180181 {}
181
182
182183 static threading::mutex plan_mutex_;
183184 };
184185
191192 class FFTWLock
192193 {
193194 public:
194
195
195196 FFTWLock()
196197 {}
197198 };
198199
199200 #endif // not VIGRA_SINGLE_THREADED
200201
201 inline fftw_plan
202 fftwPlanCreate(unsigned int N, int* shape,
202 inline fftw_plan
203 fftwPlanCreate(unsigned int N, int* shape,
203204 FFTWComplex<double> * in, int* instrides, int instep,
204205 FFTWComplex<double> * out, int* outstrides, int outstep,
205206 int sign, unsigned int planner_flags)
210211 sign, planner_flags);
211212 }
212213
213 inline fftw_plan
214 fftwPlanCreate(unsigned int N, int* shape,
214 inline fftw_plan
215 fftwPlanCreate(unsigned int N, int* shape,
215216 double * in, int* instrides, int instep,
216217 FFTWComplex<double> * out, int* outstrides, int outstep,
217218 int /*sign is ignored*/, unsigned int planner_flags)
222223 planner_flags);
223224 }
224225
225 inline fftw_plan
226 fftwPlanCreate(unsigned int N, int* shape,
226 inline fftw_plan
227 fftwPlanCreate(unsigned int N, int* shape,
227228 FFTWComplex<double> * in, int* instrides, int instep,
228229 double * out, int* outstrides, int outstep,
229230 int /*sign is ignored*/, unsigned int planner_flags)
234235 planner_flags);
235236 }
236237
237 inline fftwf_plan
238 fftwPlanCreate(unsigned int N, int* shape,
238 inline fftwf_plan
239 fftwPlanCreate(unsigned int N, int* shape,
239240 FFTWComplex<float> * in, int* instrides, int instep,
240241 FFTWComplex<float> * out, int* outstrides, int outstep,
241242 int sign, unsigned int planner_flags)
246247 sign, planner_flags);
247248 }
248249
249 inline fftwf_plan
250 fftwPlanCreate(unsigned int N, int* shape,
250 inline fftwf_plan
251 fftwPlanCreate(unsigned int N, int* shape,
251252 float * in, int* instrides, int instep,
252253 FFTWComplex<float> * out, int* outstrides, int outstep,
253254 int /*sign is ignored*/, unsigned int planner_flags)
258259 planner_flags);
259260 }
260261
261 inline fftwf_plan
262 fftwPlanCreate(unsigned int N, int* shape,
262 inline fftwf_plan
263 fftwPlanCreate(unsigned int N, int* shape,
263264 FFTWComplex<float> * in, int* instrides, int instep,
264265 float * out, int* outstrides, int outstep,
265266 int /*sign is ignored*/, unsigned int planner_flags)
270271 planner_flags);
271272 }
272273
273 inline fftwl_plan
274 fftwPlanCreate(unsigned int N, int* shape,
274 inline fftwl_plan
275 fftwPlanCreate(unsigned int N, int* shape,
275276 FFTWComplex<long double> * in, int* instrides, int instep,
276277 FFTWComplex<long double> * out, int* outstrides, int outstep,
277278 int sign, unsigned int planner_flags)
282283 sign, planner_flags);
283284 }
284285
285 inline fftwl_plan
286 fftwPlanCreate(unsigned int N, int* shape,
286 inline fftwl_plan
287 fftwPlanCreate(unsigned int N, int* shape,
287288 long double * in, int* instrides, int instep,
288289 FFTWComplex<long double> * out, int* outstrides, int outstep,
289290 int /*sign is ignored*/, unsigned int planner_flags)
294295 planner_flags);
295296 }
296297
297 inline fftwl_plan
298 fftwPlanCreate(unsigned int N, int* shape,
298 inline fftwl_plan
299 fftwPlanCreate(unsigned int N, int* shape,
299300 FFTWComplex<long double> * in, int* instrides, int instep,
300301 long double * out, int* outstrides, int outstep,
301302 int /*sign is ignored*/, unsigned int planner_flags)
324325 fftwl_destroy_plan(plan);
325326 }
326327
327 inline void
328 fftwPlanExecute(fftw_plan plan)
328 inline void
329 fftwPlanExecute(fftw_plan plan)
329330 {
330331 fftw_execute(plan);
331332 }
332333
333 inline void
334 fftwPlanExecute(fftwf_plan plan)
334 inline void
335 fftwPlanExecute(fftwf_plan plan)
335336 {
336337 fftwf_execute(plan);
337338 }
338339
339 inline void
340 fftwPlanExecute(fftwl_plan plan)
340 inline void
341 fftwPlanExecute(fftwl_plan plan)
341342 {
342343 fftwl_execute(plan);
343344 }
344345
345 inline void
346 fftwPlanExecute(fftw_plan plan, FFTWComplex<double> * in, FFTWComplex<double> * out)
346 inline void
347 fftwPlanExecute(fftw_plan plan, FFTWComplex<double> * in, FFTWComplex<double> * out)
347348 {
348349 fftw_execute_dft(plan, (fftw_complex *)in, (fftw_complex *)out);
349350 }
350351
351 inline void
352 fftwPlanExecute(fftw_plan plan, double * in, FFTWComplex<double> * out)
352 inline void
353 fftwPlanExecute(fftw_plan plan, double * in, FFTWComplex<double> * out)
353354 {
354355 fftw_execute_dft_r2c(plan, in, (fftw_complex *)out);
355356 }
356357
357 inline void
358 fftwPlanExecute(fftw_plan plan, FFTWComplex<double> * in, double * out)
358 inline void
359 fftwPlanExecute(fftw_plan plan, FFTWComplex<double> * in, double * out)
359360 {
360361 fftw_execute_dft_c2r(plan, (fftw_complex *)in, out);
361362 }
362363
363 inline void
364 fftwPlanExecute(fftwf_plan plan, FFTWComplex<float> * in, FFTWComplex<float> * out)
364 inline void
365 fftwPlanExecute(fftwf_plan plan, FFTWComplex<float> * in, FFTWComplex<float> * out)
365366 {
366367 fftwf_execute_dft(plan, (fftwf_complex *)in, (fftwf_complex *)out);
367368 }
368369
369 inline void
370 fftwPlanExecute(fftwf_plan plan, float * in, FFTWComplex<float> * out)
370 inline void
371 fftwPlanExecute(fftwf_plan plan, float * in, FFTWComplex<float> * out)
371372 {
372373 fftwf_execute_dft_r2c(plan, in, (fftwf_complex *)out);
373374 }
374375
375 inline void
376 fftwPlanExecute(fftwf_plan plan, FFTWComplex<float> * in, float * out)
376 inline void
377 fftwPlanExecute(fftwf_plan plan, FFTWComplex<float> * in, float * out)
377378 {
378379 fftwf_execute_dft_c2r(plan, (fftwf_complex *)in, out);
379380 }
380381
381 inline void
382 fftwPlanExecute(fftwl_plan plan, FFTWComplex<long double> * in, FFTWComplex<long double> * out)
382 inline void
383 fftwPlanExecute(fftwl_plan plan, FFTWComplex<long double> * in, FFTWComplex<long double> * out)
383384 {
384385 fftwl_execute_dft(plan, (fftwl_complex *)in, (fftwl_complex *)out);
385386 }
386387
387 inline void
388 fftwPlanExecute(fftwl_plan plan, long double * in, FFTWComplex<long double> * out)
388 inline void
389 fftwPlanExecute(fftwl_plan plan, long double * in, FFTWComplex<long double> * out)
389390 {
390391 fftwl_execute_dft_r2c(plan, in, (fftwl_complex *)out);
391392 }
392393
393 inline void
394 fftwPlanExecute(fftwl_plan plan, FFTWComplex<long double> * in, long double * out)
394 inline void
395 fftwPlanExecute(fftwl_plan plan, FFTWComplex<long double> * in, long double * out)
395396 {
396397 fftwl_execute_dft_c2r(plan, (fftwl_complex *)in, out);
397398 }
403404 static const int evenSize = 1063;
404405 static int goodSizes[size];
405406 static int goodEvenSizes[evenSize];
406
407
407408 static inline int find(int s)
408409 {
409410 if(s <= 0 || s >= goodSizes[size-1])
428429 return *upperBound;
429430 }
430431 };
431
432
432433 // Image sizes where FFTW is fast. The list contains all
433434 // numbers less than 100000 whose prime decomposition is of the form
434 // 2^a*3^b*5^c*7^d*11^e*13^f, where e+f is either 0 or 1, and the
435 // 2^a*3^b*5^c*7^d*11^e*13^f, where e+f is either 0 or 1, and the
435436 // other exponents are arbitrary
436437 template <int DUMMY>
437438 int FFTWPaddingSize<DUMMY>::goodSizes[size] = {
438 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
439 18, 20, 21, 22, 24, 25, 26, 27, 28, 30, 32, 33, 35, 36, 39, 40, 42, 44, 45, 48,
440 49, 50, 52, 54, 55, 56, 60, 63, 64, 65, 66, 70, 72, 75, 77, 78, 80, 81,
441 84, 88, 90, 91, 96, 98, 99, 100, 104, 105, 108, 110, 112, 117, 120, 125,
442 126, 128, 130, 132, 135, 140, 144, 147, 150, 154, 156, 160, 162, 165,
443 168, 175, 176, 180, 182, 189, 192, 195, 196, 198, 200, 208, 210, 216,
444 220, 224, 225, 231, 234, 240, 243, 245, 250, 252, 256, 260, 264, 270,
445 273, 275, 280, 288, 294, 297, 300, 308, 312, 315, 320, 324, 325, 330,
446 336, 343, 350, 351, 352, 360, 364, 375, 378, 384, 385, 390, 392, 396,
447 400, 405, 416, 420, 432, 440, 441, 448, 450, 455, 462, 468, 480, 486,
448 490, 495, 500, 504, 512, 520, 525, 528, 539, 540, 546, 550, 560, 567,
449 576, 585, 588, 594, 600, 616, 624, 625, 630, 637, 640, 648, 650, 660,
450 672, 675, 686, 693, 700, 702, 704, 720, 728, 729, 735, 750, 756, 768,
451 770, 780, 784, 792, 800, 810, 819, 825, 832, 840, 864, 875, 880, 882,
452 891, 896, 900, 910, 924, 936, 945, 960, 972, 975, 980, 990, 1000, 1008,
453 1024, 1029, 1040, 1050, 1053, 1056, 1078, 1080, 1092, 1100, 1120, 1125,
454 1134, 1152, 1155, 1170, 1176, 1188, 1200, 1215, 1225, 1232, 1248, 1250,
455 1260, 1274, 1280, 1296, 1300, 1320, 1323, 1344, 1350, 1365, 1372, 1375,
456 1386, 1400, 1404, 1408, 1440, 1456, 1458, 1470, 1485, 1500, 1512, 1536,
457 1540, 1560, 1568, 1575, 1584, 1600, 1617, 1620, 1625, 1638, 1650, 1664,
458 1680, 1701, 1715, 1728, 1750, 1755, 1760, 1764, 1782, 1792, 1800, 1820,
459 1848, 1872, 1875, 1890, 1911, 1920, 1925, 1944, 1950, 1960, 1980, 2000,
460 2016, 2025, 2048, 2058, 2079, 2080, 2100, 2106, 2112, 2156, 2160, 2184,
461 2187, 2200, 2205, 2240, 2250, 2268, 2275, 2304, 2310, 2340, 2352, 2376,
462 2400, 2401, 2430, 2450, 2457, 2464, 2475, 2496, 2500, 2520, 2548, 2560,
463 2592, 2600, 2625, 2640, 2646, 2673, 2688, 2695, 2700, 2730, 2744, 2750,
464 2772, 2800, 2808, 2816, 2835, 2880, 2912, 2916, 2925, 2940, 2970, 3000,
465 3024, 3072, 3080, 3087, 3120, 3125, 3136, 3150, 3159, 3168, 3185, 3200,
466 3234, 3240, 3250, 3276, 3300, 3328, 3360, 3375, 3402, 3430, 3456, 3465,
467 3500, 3510, 3520, 3528, 3564, 3584, 3600, 3640, 3645, 3675, 3696, 3744,
468 3750, 3773, 3780, 3822, 3840, 3850, 3888, 3900, 3920, 3960, 3969, 4000,
469 4032, 4050, 4095, 4096, 4116, 4125, 4158, 4160, 4200, 4212, 4224, 4312,
470 4320, 4368, 4374, 4375, 4400, 4410, 4455, 4459, 4480, 4500, 4536, 4550,
471 4608, 4620, 4680, 4704, 4725, 4752, 4800, 4802, 4851, 4860, 4875, 4900,
472 4914, 4928, 4950, 4992, 5000, 5040, 5096, 5103, 5120, 5145, 5184, 5200,
473 5250, 5265, 5280, 5292, 5346, 5376, 5390, 5400, 5460, 5488, 5500, 5544,
474 5600, 5616, 5625, 5632, 5670, 5733, 5760, 5775, 5824, 5832, 5850, 5880,
475 5940, 6000, 6048, 6075, 6125, 6144, 6160, 6174, 6237, 6240, 6250, 6272,
476 6300, 6318, 6336, 6370, 6400, 6468, 6480, 6500, 6552, 6561, 6600, 6615,
477 6656, 6720, 6750, 6804, 6825, 6860, 6875, 6912, 6930, 7000, 7020, 7040,
478 7056, 7128, 7168, 7200, 7203, 7280, 7290, 7350, 7371, 7392, 7425, 7488,
479 7500, 7546, 7560, 7644, 7680, 7700, 7776, 7800, 7840, 7875, 7920, 7938,
480 8000, 8019, 8064, 8085, 8100, 8125, 8190, 8192, 8232, 8250, 8316, 8320,
481 8400, 8424, 8448, 8505, 8575, 8624, 8640, 8736, 8748, 8750, 8775, 8800,
482 8820, 8910, 8918, 8960, 9000, 9072, 9100, 9216, 9240, 9261, 9360, 9375,
483 9408, 9450, 9477, 9504, 9555, 9600, 9604, 9625, 9702, 9720, 9750, 9800,
484 9828, 9856, 9900, 9984, 10000, 10080, 10125, 10192, 10206, 10240, 10290,
485 10368, 10395, 10400, 10500, 10530, 10560, 10584, 10692, 10752, 10780,
486 10800, 10920, 10935, 10976, 11000, 11025, 11088, 11200, 11232, 11250,
487 11264, 11319, 11340, 11375, 11466, 11520, 11550, 11648, 11664, 11700,
488 11760, 11880, 11907, 12000, 12005, 12096, 12150, 12250, 12285, 12288,
489 12320, 12348, 12375, 12474, 12480, 12500, 12544, 12600, 12636, 12672,
490 12740, 12800, 12936, 12960, 13000, 13104, 13122, 13125, 13200, 13230,
491 13312, 13365, 13377, 13440, 13475, 13500, 13608, 13650, 13720, 13750,
492 13824, 13860, 14000, 14040, 14080, 14112, 14175, 14256, 14336, 14400,
493 14406, 14553, 14560, 14580, 14625, 14700, 14742, 14784, 14850, 14976,
494 15000, 15092, 15120, 15288, 15309, 15360, 15400, 15435, 15552, 15600,
495 15625, 15680, 15750, 15795, 15840, 15876, 15925, 16000, 16038, 16128,
496 16170, 16200, 16250, 16380, 16384, 16464, 16500, 16632, 16640, 16800,
497 16807, 16848, 16875, 16896, 17010, 17150, 17199, 17248, 17280, 17325,
498 17472, 17496, 17500, 17550, 17600, 17640, 17820, 17836, 17920, 18000,
499 18144, 18200, 18225, 18375, 18432, 18480, 18522, 18711, 18720, 18750,
500 18816, 18865, 18900, 18954, 19008, 19110, 19200, 19208, 19250, 19404,
501 19440, 19500, 19600, 19656, 19683, 19712, 19800, 19845, 19968, 20000,
502 20160, 20250, 20384, 20412, 20475, 20480, 20580, 20625, 20736, 20790,
503 20800, 21000, 21060, 21120, 21168, 21384, 21504, 21560, 21600, 21609,
504 21840, 21870, 21875, 21952, 22000, 22050, 22113, 22176, 22275, 22295,
505 22400, 22464, 22500, 22528, 22638, 22680, 22750, 22932, 23040, 23100,
506 23296, 23328, 23400, 23520, 23625, 23760, 23814, 24000, 24010, 24057,
507 24192, 24255, 24300, 24375, 24500, 24570, 24576, 24640, 24696, 24750,
508 24948, 24960, 25000, 25088, 25200, 25272, 25344, 25480, 25515, 25600,
509 25725, 25872, 25920, 26000, 26208, 26244, 26250, 26325, 26400, 26411,
510 26460, 26624, 26730, 26754, 26880, 26950, 27000, 27216, 27300, 27440,
511 27500, 27648, 27720, 27783, 28000, 28080, 28125, 28160, 28224, 28350,
512 28431, 28512, 28665, 28672, 28800, 28812, 28875, 29106, 29120, 29160,
513 29250, 29400, 29484, 29568, 29700, 29952, 30000, 30184, 30240, 30375,
514 30576, 30618, 30625, 30720, 30800, 30870, 31104, 31185, 31200, 31213,
515 31250, 31360, 31500, 31590, 31680, 31752, 31850, 32000, 32076, 32256,
516 32340, 32400, 32500, 32760, 32768, 32805, 32928, 33000, 33075, 33264,
517 33280, 33600, 33614, 33696, 33750, 33792, 33957, 34020, 34125, 34300,
518 34375, 34398, 34496, 34560, 34650, 34944, 34992, 35000, 35100, 35200,
519 35280, 35640, 35672, 35721, 35840, 36000, 36015, 36288, 36400, 36450,
520 36750, 36855, 36864, 36960, 37044, 37125, 37422, 37440, 37500, 37632,
521 37730, 37800, 37908, 38016, 38220, 38400, 38416, 38500, 38808, 38880,
522 39000, 39200, 39312, 39366, 39375, 39424, 39600, 39690, 39936, 40000,
523 40095, 40131, 40320, 40425, 40500, 40625, 40768, 40824, 40950, 40960,
524 41160, 41250, 41472, 41580, 41600, 42000, 42120, 42240, 42336, 42525,
525 42768, 42875, 43008, 43120, 43200, 43218, 43659, 43680, 43740, 43750,
526 43875, 43904, 44000, 44100, 44226, 44352, 44550, 44590, 44800, 44928,
527 45000, 45056, 45276, 45360, 45500, 45864, 45927, 46080, 46200, 46305,
528 46592, 46656, 46800, 46875, 47040, 47250, 47385, 47520, 47628, 47775,
529 48000, 48020, 48114, 48125, 48384, 48510, 48600, 48750, 49000, 49140,
530 49152, 49280, 49392, 49500, 49896, 49920, 50000, 50176, 50400, 50421,
531 50544, 50625, 50688, 50960, 51030, 51200, 51450, 51597, 51744, 51840,
532 51975, 52000, 52416, 52488, 52500, 52650, 52800, 52822, 52920, 53248,
533 53460, 53508, 53760, 53900, 54000, 54432, 54600, 54675, 54880, 55000,
534 55125, 55296, 55440, 55566, 56000, 56133, 56160, 56250, 56320, 56448,
535 56595, 56700, 56862, 56875, 57024, 57330, 57344, 57600, 57624, 57750,
536 58212, 58240, 58320, 58500, 58800, 58968, 59049, 59136, 59400, 59535,
537 59904, 60000, 60025, 60368, 60480, 60750, 61152, 61236, 61250, 61425,
538 61440, 61600, 61740, 61875, 62208, 62370, 62400, 62426, 62500, 62720,
539 63000, 63180, 63360, 63504, 63700, 64000, 64152, 64512, 64680, 64800,
540 64827, 65000, 65520, 65536, 65610, 65625, 65856, 66000, 66150, 66339,
541 66528, 66560, 66825, 66885, 67200, 67228, 67375, 67392, 67500, 67584,
542 67914, 68040, 68250, 68600, 68750, 68796, 68992, 69120, 69300, 69888,
543 69984, 70000, 70200, 70400, 70560, 70875, 71280, 71344, 71442, 71680,
544 72000, 72030, 72171, 72576, 72765, 72800, 72900, 73125, 73500, 73710,
545 73728, 73920, 74088, 74250, 74844, 74880, 75000, 75264, 75460, 75600,
546 75816, 76032, 76440, 76545, 76800, 76832, 77000, 77175, 77616, 77760,
547 78000, 78125, 78400, 78624, 78732, 78750, 78848, 78975, 79200, 79233,
548 79380, 79625, 79872, 80000, 80190, 80262, 80640, 80850, 81000, 81250,
549 81536, 81648, 81900, 81920, 82320, 82500, 82944, 83160, 83200, 83349,
550 84000, 84035, 84240, 84375, 84480, 84672, 85050, 85293, 85536, 85750,
551 85995, 86016, 86240, 86400, 86436, 86625, 87318, 87360, 87480, 87500,
552 87750, 87808, 88000, 88200, 88452, 88704, 89100, 89180, 89600, 89856,
553 90000, 90112, 90552, 90720, 91000, 91125, 91728, 91854, 91875, 92160,
554 92400, 92610, 93184, 93312, 93555, 93600, 93639, 93750, 94080, 94325,
555 94500, 94770, 95040, 95256, 95550, 96000, 96040, 96228, 96250, 96768,
556 97020, 97200, 97500, 98000, 98280, 98304, 98415, 98560, 98784, 99000,
557 99225, 99792, 99840
558 };
439 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
440 18, 20, 21, 22, 24, 25, 26, 27, 28, 30, 32, 33, 35, 36, 39, 40, 42, 44, 45, 48,
441 49, 50, 52, 54, 55, 56, 60, 63, 64, 65, 66, 70, 72, 75, 77, 78, 80, 81,
442 84, 88, 90, 91, 96, 98, 99, 100, 104, 105, 108, 110, 112, 117, 120, 125,
443 126, 128, 130, 132, 135, 140, 144, 147, 150, 154, 156, 160, 162, 165,
444 168, 175, 176, 180, 182, 189, 192, 195, 196, 198, 200, 208, 210, 216,
445 220, 224, 225, 231, 234, 240, 243, 245, 250, 252, 256, 260, 264, 270,
446 273, 275, 280, 288, 294, 297, 300, 308, 312, 315, 320, 324, 325, 330,
447 336, 343, 350, 351, 352, 360, 364, 375, 378, 384, 385, 390, 392, 396,
448 400, 405, 416, 420, 432, 440, 441, 448, 450, 455, 462, 468, 480, 486,
449 490, 495, 500, 504, 512, 520, 525, 528, 539, 540, 546, 550, 560, 567,
450 576, 585, 588, 594, 600, 616, 624, 625, 630, 637, 640, 648, 650, 660,
451 672, 675, 686, 693, 700, 702, 704, 720, 728, 729, 735, 750, 756, 768,
452 770, 780, 784, 792, 800, 810, 819, 825, 832, 840, 864, 875, 880, 882,
453 891, 896, 900, 910, 924, 936, 945, 960, 972, 975, 980, 990, 1000, 1008,
454 1024, 1029, 1040, 1050, 1053, 1056, 1078, 1080, 1092, 1100, 1120, 1125,
455 1134, 1152, 1155, 1170, 1176, 1188, 1200, 1215, 1225, 1232, 1248, 1250,
456 1260, 1274, 1280, 1296, 1300, 1320, 1323, 1344, 1350, 1365, 1372, 1375,
457 1386, 1400, 1404, 1408, 1440, 1456, 1458, 1470, 1485, 1500, 1512, 1536,
458 1540, 1560, 1568, 1575, 1584, 1600, 1617, 1620, 1625, 1638, 1650, 1664,
459 1680, 1701, 1715, 1728, 1750, 1755, 1760, 1764, 1782, 1792, 1800, 1820,
460 1848, 1872, 1875, 1890, 1911, 1920, 1925, 1944, 1950, 1960, 1980, 2000,
461 2016, 2025, 2048, 2058, 2079, 2080, 2100, 2106, 2112, 2156, 2160, 2184,
462 2187, 2200, 2205, 2240, 2250, 2268, 2275, 2304, 2310, 2340, 2352, 2376,
463 2400, 2401, 2430, 2450, 2457, 2464, 2475, 2496, 2500, 2520, 2548, 2560,
464 2592, 2600, 2625, 2640, 2646, 2673, 2688, 2695, 2700, 2730, 2744, 2750,
465 2772, 2800, 2808, 2816, 2835, 2880, 2912, 2916, 2925, 2940, 2970, 3000,
466 3024, 3072, 3080, 3087, 3120, 3125, 3136, 3150, 3159, 3168, 3185, 3200,
467 3234, 3240, 3250, 3276, 3300, 3328, 3360, 3375, 3402, 3430, 3456, 3465,
468 3500, 3510, 3520, 3528, 3564, 3584, 3600, 3640, 3645, 3675, 3696, 3744,
469 3750, 3773, 3780, 3822, 3840, 3850, 3888, 3900, 3920, 3960, 3969, 4000,
470 4032, 4050, 4095, 4096, 4116, 4125, 4158, 4160, 4200, 4212, 4224, 4312,
471 4320, 4368, 4374, 4375, 4400, 4410, 4455, 4459, 4480, 4500, 4536, 4550,
472 4608, 4620, 4680, 4704, 4725, 4752, 4800, 4802, 4851, 4860, 4875, 4900,
473 4914, 4928, 4950, 4992, 5000, 5040, 5096, 5103, 5120, 5145, 5184, 5200,
474 5250, 5265, 5280, 5292, 5346, 5376, 5390, 5400, 5460, 5488, 5500, 5544,
475 5600, 5616, 5625, 5632, 5670, 5733, 5760, 5775, 5824, 5832, 5850, 5880,
476 5940, 6000, 6048, 6075, 6125, 6144, 6160, 6174, 6237, 6240, 6250, 6272,
477 6300, 6318, 6336, 6370, 6400, 6468, 6480, 6500, 6552, 6561, 6600, 6615,
478 6656, 6720, 6750, 6804, 6825, 6860, 6875, 6912, 6930, 7000, 7020, 7040,
479 7056, 7128, 7168, 7200, 7203, 7280, 7290, 7350, 7371, 7392, 7425, 7488,
480 7500, 7546, 7560, 7644, 7680, 7700, 7776, 7800, 7840, 7875, 7920, 7938,
481 8000, 8019, 8064, 8085, 8100, 8125, 8190, 8192, 8232, 8250, 8316, 8320,
482 8400, 8424, 8448, 8505, 8575, 8624, 8640, 8736, 8748, 8750, 8775, 8800,
483 8820, 8910, 8918, 8960, 9000, 9072, 9100, 9216, 9240, 9261, 9360, 9375,
484 9408, 9450, 9477, 9504, 9555, 9600, 9604, 9625, 9702, 9720, 9750, 9800,
485 9828, 9856, 9900, 9984, 10000, 10080, 10125, 10192, 10206, 10240, 10290,
486 10368, 10395, 10400, 10500, 10530, 10560, 10584, 10692, 10752, 10780,
487 10800, 10920, 10935, 10976, 11000, 11025, 11088, 11200, 11232, 11250,
488 11264, 11319, 11340, 11375, 11466, 11520, 11550, 11648, 11664, 11700,
489 11760, 11880, 11907, 12000, 12005, 12096, 12150, 12250, 12285, 12288,
490 12320, 12348, 12375, 12474, 12480, 12500, 12544, 12600, 12636, 12672,
491 12740, 12800, 12936, 12960, 13000, 13104, 13122, 13125, 13200, 13230,
492 13312, 13365, 13377, 13440, 13475, 13500, 13608, 13650, 13720, 13750,
493 13824, 13860, 14000, 14040, 14080, 14112, 14175, 14256, 14336, 14400,
494 14406, 14553, 14560, 14580, 14625, 14700, 14742, 14784, 14850, 14976,
495 15000, 15092, 15120, 15288, 15309, 15360, 15400, 15435, 15552, 15600,
496 15625, 15680, 15750, 15795, 15840, 15876, 15925, 16000, 16038, 16128,
497 16170, 16200, 16250, 16380, 16384, 16464, 16500, 16632, 16640, 16800,
498 16807, 16848, 16875, 16896, 17010, 17150, 17199, 17248, 17280, 17325,
499 17472, 17496, 17500, 17550, 17600, 17640, 17820, 17836, 17920, 18000,
500 18144, 18200, 18225, 18375, 18432, 18480, 18522, 18711, 18720, 18750,
501 18816, 18865, 18900, 18954, 19008, 19110, 19200, 19208, 19250, 19404,
502 19440, 19500, 19600, 19656, 19683, 19712, 19800, 19845, 19968, 20000,
503 20160, 20250, 20384, 20412, 20475, 20480, 20580, 20625, 20736, 20790,
504 20800, 21000, 21060, 21120, 21168, 21384, 21504, 21560, 21600, 21609,
505 21840, 21870, 21875, 21952, 22000, 22050, 22113, 22176, 22275, 22295,
506 22400, 22464, 22500, 22528, 22638, 22680, 22750, 22932, 23040, 23100,
507 23296, 23328, 23400, 23520, 23625, 23760, 23814, 24000, 24010, 24057,
508 24192, 24255, 24300, 24375, 24500, 24570, 24576, 24640, 24696, 24750,
509 24948, 24960, 25000, 25088, 25200, 25272, 25344, 25480, 25515, 25600,
510 25725, 25872, 25920, 26000, 26208, 26244, 26250, 26325, 26400, 26411,
511 26460, 26624, 26730, 26754, 26880, 26950, 27000, 27216, 27300, 27440,
512 27500, 27648, 27720, 27783, 28000, 28080, 28125, 28160, 28224, 28350,
513 28431, 28512, 28665, 28672, 28800, 28812, 28875, 29106, 29120, 29160,
514 29250, 29400, 29484, 29568, 29700, 29952, 30000, 30184, 30240, 30375,
515 30576, 30618, 30625, 30720, 30800, 30870, 31104, 31185, 31200, 31213,
516 31250, 31360, 31500, 31590, 31680, 31752, 31850, 32000, 32076, 32256,
517 32340, 32400, 32500, 32760, 32768, 32805, 32928, 33000, 33075, 33264,
518 33280, 33600, 33614, 33696, 33750, 33792, 33957, 34020, 34125, 34300,
519 34375, 34398, 34496, 34560, 34650, 34944, 34992, 35000, 35100, 35200,
520 35280, 35640, 35672, 35721, 35840, 36000, 36015, 36288, 36400, 36450,
521 36750, 36855, 36864, 36960, 37044, 37125, 37422, 37440, 37500, 37632,
522 37730, 37800, 37908, 38016, 38220, 38400, 38416, 38500, 38808, 38880,
523 39000, 39200, 39312, 39366, 39375, 39424, 39600, 39690, 39936, 40000,
524 40095, 40131, 40320, 40425, 40500, 40625, 40768, 40824, 40950, 40960,
525 41160, 41250, 41472, 41580, 41600, 42000, 42120, 42240, 42336, 42525,
526 42768, 42875, 43008, 43120, 43200, 43218, 43659, 43680, 43740, 43750,
527 43875, 43904, 44000, 44100, 44226, 44352, 44550, 44590, 44800, 44928,
528 45000, 45056, 45276, 45360, 45500, 45864, 45927, 46080, 46200, 46305,
529 46592, 46656, 46800, 46875, 47040, 47250, 47385, 47520, 47628, 47775,
530 48000, 48020, 48114, 48125, 48384, 48510, 48600, 48750, 49000, 49140,
531 49152, 49280, 49392, 49500, 49896, 49920, 50000, 50176, 50400, 50421,
532 50544, 50625, 50688, 50960, 51030, 51200, 51450, 51597, 51744, 51840,
533 51975, 52000, 52416, 52488, 52500, 52650, 52800, 52822, 52920, 53248,
534 53460, 53508, 53760, 53900, 54000, 54432, 54600, 54675, 54880, 55000,
535 55125, 55296, 55440, 55566, 56000, 56133, 56160, 56250, 56320, 56448,
536 56595, 56700, 56862, 56875, 57024, 57330, 57344, 57600, 57624, 57750,
537 58212, 58240, 58320, 58500, 58800, 58968, 59049, 59136, 59400, 59535,
538 59904, 60000, 60025, 60368, 60480, 60750, 61152, 61236, 61250, 61425,
539 61440, 61600, 61740, 61875, 62208, 62370, 62400, 62426, 62500, 62720,
540 63000, 63180, 63360, 63504, 63700, 64000, 64152, 64512, 64680, 64800,
541 64827, 65000, 65520, 65536, 65610, 65625, 65856, 66000, 66150, 66339,
542 66528, 66560, 66825, 66885, 67200, 67228, 67375, 67392, 67500, 67584,
543 67914, 68040, 68250, 68600, 68750, 68796, 68992, 69120, 69300, 69888,
544 69984, 70000, 70200, 70400, 70560, 70875, 71280, 71344, 71442, 71680,
545 72000, 72030, 72171, 72576, 72765, 72800, 72900, 73125, 73500, 73710,
546 73728, 73920, 74088, 74250, 74844, 74880, 75000, 75264, 75460, 75600,
547 75816, 76032, 76440, 76545, 76800, 76832, 77000, 77175, 77616, 77760,
548 78000, 78125, 78400, 78624, 78732, 78750, 78848, 78975, 79200, 79233,
549 79380, 79625, 79872, 80000, 80190, 80262, 80640, 80850, 81000, 81250,
550 81536, 81648, 81900, 81920, 82320, 82500, 82944, 83160, 83200, 83349,
551 84000, 84035, 84240, 84375, 84480, 84672, 85050, 85293, 85536, 85750,
552 85995, 86016, 86240, 86400, 86436, 86625, 87318, 87360, 87480, 87500,
553 87750, 87808, 88000, 88200, 88452, 88704, 89100, 89180, 89600, 89856,
554 90000, 90112, 90552, 90720, 91000, 91125, 91728, 91854, 91875, 92160,
555 92400, 92610, 93184, 93312, 93555, 93600, 93639, 93750, 94080, 94325,
556 94500, 94770, 95040, 95256, 95550, 96000, 96040, 96228, 96250, 96768,
557 97020, 97200, 97500, 98000, 98280, 98304, 98415, 98560, 98784, 99000,
558 99225, 99792, 99840
559 };
559560
560561 template <int DUMMY>
561 int FFTWPaddingSize<DUMMY>::goodEvenSizes[evenSize] = {
562 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22,
563 24, 26, 28, 30, 32, 36, 40, 42, 44, 48, 50, 52, 54, 56, 60, 64, 66, 70,
564 72, 78, 80, 84, 88, 90, 96, 98, 100, 104, 108, 110, 112, 120, 126, 128,
565 130, 132, 140, 144, 150, 154, 156, 160, 162, 168, 176, 180, 182, 192,
566 196, 198, 200, 208, 210, 216, 220, 224, 234, 240, 250, 252, 256, 260,
567 264, 270, 280, 288, 294, 300, 308, 312, 320, 324, 330, 336, 350, 352,
568 360, 364, 378, 384, 390, 392, 396, 400, 416, 420, 432, 440, 448, 450,
569 462, 468, 480, 486, 490, 500, 504, 512, 520, 528, 540, 546, 550, 560,
570 576, 588, 594, 600, 616, 624, 630, 640, 648, 650, 660, 672, 686, 700,
571 702, 704, 720, 728, 750, 756, 768, 770, 780, 784, 792, 800, 810, 832,
572 840, 864, 880, 882, 896, 900, 910, 924, 936, 960, 972, 980, 990, 1000,
573 1008, 1024, 1040, 1050, 1056, 1078, 1080, 1092, 1100, 1120, 1134, 1152,
574 1170, 1176, 1188, 1200, 1232, 1248, 1250, 1260, 1274, 1280, 1296, 1300,
575 1320, 1344, 1350, 1372, 1386, 1400, 1404, 1408, 1440, 1456, 1458, 1470,
576 1500, 1512, 1536, 1540, 1560, 1568, 1584, 1600, 1620, 1638, 1650, 1664,
577 1680, 1728, 1750, 1760, 1764, 1782, 1792, 1800, 1820, 1848, 1872, 1890,
578 1920, 1944, 1950, 1960, 1980, 2000, 2016, 2048, 2058, 2080, 2100, 2106,
579 2112, 2156, 2160, 2184, 2200, 2240, 2250, 2268, 2304, 2310, 2340, 2352,
580 2376, 2400, 2430, 2450, 2464, 2496, 2500, 2520, 2548, 2560, 2592, 2600,
581 2640, 2646, 2688, 2700, 2730, 2744, 2750, 2772, 2800, 2808, 2816, 2880,
582 2912, 2916, 2940, 2970, 3000, 3024, 3072, 3080, 3120, 3136, 3150, 3168,
583 3200, 3234, 3240, 3250, 3276, 3300, 3328, 3360, 3402, 3430, 3456, 3500,
584 3510, 3520, 3528, 3564, 3584, 3600, 3640, 3696, 3744, 3750, 3780, 3822,
585 3840, 3850, 3888, 3900, 3920, 3960, 4000, 4032, 4050, 4096, 4116, 4158,
586 4160, 4200, 4212, 4224, 4312, 4320, 4368, 4374, 4400, 4410, 4480, 4500,
587 4536, 4550, 4608, 4620, 4680, 4704, 4752, 4800, 4802, 4860, 4900, 4914,
588 4928, 4950, 4992, 5000, 5040, 5096, 5120, 5184, 5200, 5250, 5280, 5292,
589 5346, 5376, 5390, 5400, 5460, 5488, 5500, 5544, 5600, 5616, 5632, 5670,
590 5760, 5824, 5832, 5850, 5880, 5940, 6000, 6048, 6144, 6160, 6174, 6240,
591 6250, 6272, 6300, 6318, 6336, 6370, 6400, 6468, 6480, 6500, 6552, 6600,
592 6656, 6720, 6750, 6804, 6860, 6912, 6930, 7000, 7020, 7040, 7056, 7128,
593 7168, 7200, 7280, 7290, 7350, 7392, 7488, 7500, 7546, 7560, 7644, 7680,
594 7700, 7776, 7800, 7840, 7920, 7938, 8000, 8064, 8100, 8190, 8192, 8232,
595 8250, 8316, 8320, 8400, 8424, 8448, 8624, 8640, 8736, 8748, 8750, 8800,
596 8820, 8910, 8918, 8960, 9000, 9072, 9100, 9216, 9240, 9360, 9408, 9450,
597 9504, 9600, 9604, 9702, 9720, 9750, 9800, 9828, 9856, 9900, 9984, 10000,
598 10080, 10192, 10206, 10240, 10290, 10368, 10400, 10500, 10530, 10560,
599 10584, 10692, 10752, 10780, 10800, 10920, 10976, 11000, 11088, 11200,
600 11232, 11250, 11264, 11340, 11466, 11520, 11550, 11648, 11664, 11700,
601 11760, 11880, 12000, 12096, 12150, 12250, 12288, 12320, 12348, 12474,
602 12480, 12500, 12544, 12600, 12636, 12672, 12740, 12800, 12936, 12960,
603 13000, 13104, 13122, 13200, 13230, 13312, 13440, 13500, 13608, 13650,
604 13720, 13750, 13824, 13860, 14000, 14040, 14080, 14112, 14256, 14336,
605 14400, 14406, 14560, 14580, 14700, 14742, 14784, 14850, 14976, 15000,
606 15092, 15120, 15288, 15360, 15400, 15552, 15600, 15680, 15750, 15840,
607 15876, 16000, 16038, 16128, 16170, 16200, 16250, 16380, 16384, 16464,
608 16500, 16632, 16640, 16800, 16848, 16896, 17010, 17150, 17248, 17280,
609 17472, 17496, 17500, 17550, 17600, 17640, 17820, 17836, 17920, 18000,
610 18144, 18200, 18432, 18480, 18522, 18720, 18750, 18816, 18900, 18954,
611 19008, 19110, 19200, 19208, 19250, 19404, 19440, 19500, 19600, 19656,
612 19712, 19800, 19968, 20000, 20160, 20250, 20384, 20412, 20480, 20580,
613 20736, 20790, 20800, 21000, 21060, 21120, 21168, 21384, 21504, 21560,
614 21600, 21840, 21870, 21952, 22000, 22050, 22176, 22400, 22464, 22500,
615 22528, 22638, 22680, 22750, 22932, 23040, 23100, 23296, 23328, 23400,
616 23520, 23760, 23814, 24000, 24010, 24192, 24300, 24500, 24570, 24576,
617 24640, 24696, 24750, 24948, 24960, 25000, 25088, 25200, 25272, 25344,
618 25480, 25600, 25872, 25920, 26000, 26208, 26244, 26250, 26400, 26460,
619 26624, 26730, 26754, 26880, 26950, 27000, 27216, 27300, 27440, 27500,
620 27648, 27720, 28000, 28080, 28160, 28224, 28350, 28512, 28672, 28800,
621 28812, 29106, 29120, 29160, 29250, 29400, 29484, 29568, 29700, 29952,
622 30000, 30184, 30240, 30576, 30618, 30720, 30800, 30870, 31104, 31200,
623 31250, 31360, 31500, 31590, 31680, 31752, 31850, 32000, 32076, 32256,
624 32340, 32400, 32500, 32760, 32768, 32928, 33000, 33264, 33280, 33600,
625 33614, 33696, 33750, 33792, 34020, 34300, 34398, 34496, 34560, 34650,
626 34944, 34992, 35000, 35100, 35200, 35280, 35640, 35672, 35840, 36000,
627 36288, 36400, 36450, 36750, 36864, 36960, 37044, 37422, 37440, 37500,
628 37632, 37730, 37800, 37908, 38016, 38220, 38400, 38416, 38500, 38808,
629 38880, 39000, 39200, 39312, 39366, 39424, 39600, 39690, 39936, 40000,
630 40320, 40500, 40768, 40824, 40950, 40960, 41160, 41250, 41472, 41580,
631 41600, 42000, 42120, 42240, 42336, 42768, 43008, 43120, 43200, 43218,
632 43680, 43740, 43750, 43904, 44000, 44100, 44226, 44352, 44550, 44590,
633 44800, 44928, 45000, 45056, 45276, 45360, 45500, 45864, 46080, 46200,
634 46592, 46656, 46800, 47040, 47250, 47520, 47628, 48000, 48020, 48114,
635 48384, 48510, 48600, 48750, 49000, 49140, 49152, 49280, 49392, 49500,
636 49896, 49920, 50000, 50176, 50400, 50544, 50688, 50960, 51030, 51200,
637 51450, 51744, 51840, 52000, 52416, 52488, 52500, 52650, 52800, 52822,
638 52920, 53248, 53460, 53508, 53760, 53900, 54000, 54432, 54600, 54880,
639 55000, 55296, 55440, 55566, 56000, 56160, 56250, 56320, 56448, 56700,
640 56862, 57024, 57330, 57344, 57600, 57624, 57750, 58212, 58240, 58320,
641 58500, 58800, 58968, 59136, 59400, 59904, 60000, 60368, 60480, 60750,
642 61152, 61236, 61250, 61440, 61600, 61740, 62208, 62370, 62400, 62426,
643 62500, 62720, 63000, 63180, 63360, 63504, 63700, 64000, 64152, 64512,
644 64680, 64800, 65000, 65520, 65536, 65610, 65856, 66000, 66150, 66528,
645 66560, 67200, 67228, 67392, 67500, 67584, 67914, 68040, 68250, 68600,
646 68750, 68796, 68992, 69120, 69300, 69888, 69984, 70000, 70200, 70400,
647 70560, 71280, 71344, 71442, 71680, 72000, 72030, 72576, 72800, 72900,
648 73500, 73710, 73728, 73920, 74088, 74250, 74844, 74880, 75000, 75264,
649 75460, 75600, 75816, 76032, 76440, 76800, 76832, 77000, 77616, 77760,
650 78000, 78400, 78624, 78732, 78750, 78848, 79200, 79380, 79872, 80000,
651 80190, 80262, 80640, 80850, 81000, 81250, 81536, 81648, 81900, 81920,
652 82320, 82500, 82944, 83160, 83200, 84000, 84240, 84480, 84672, 85050,
653 85536, 85750, 86016, 86240, 86400, 86436, 87318, 87360, 87480, 87500,
654 87750, 87808, 88000, 88200, 88452, 88704, 89100, 89180, 89600, 89856,
655 90000, 90112, 90552, 90720, 91000, 91728, 91854, 92160, 92400, 92610,
656 93184, 93312, 93600, 93750, 94080, 94500, 94770, 95040, 95256, 95550,
657 96000, 96040, 96228, 96250, 96768, 97020, 97200, 97500, 98000, 98280,
658 98304, 98560, 98784, 99000, 99792, 99840
659 };
562 int FFTWPaddingSize<DUMMY>::goodEvenSizes[evenSize] = {
563 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22,
564 24, 26, 28, 30, 32, 36, 40, 42, 44, 48, 50, 52, 54, 56, 60, 64, 66, 70,
565 72, 78, 80, 84, 88, 90, 96, 98, 100, 104, 108, 110, 112, 120, 126, 128,
566 130, 132, 140, 144, 150, 154, 156, 160, 162, 168, 176, 180, 182, 192,
567 196, 198, 200, 208, 210, 216, 220, 224, 234, 240, 250, 252, 256, 260,
568 264, 270, 280, 288, 294, 300, 308, 312, 320, 324, 330, 336, 350, 352,
569 360, 364, 378, 384, 390, 392, 396, 400, 416, 420, 432, 440, 448, 450,
570 462, 468, 480, 486, 490, 500, 504, 512, 520, 528, 540, 546, 550, 560,
571 576, 588, 594, 600, 616, 624, 630, 640, 648, 650, 660, 672, 686, 700,
572 702, 704, 720, 728, 750, 756, 768, 770, 780, 784, 792, 800, 810, 832,
573 840, 864, 880, 882, 896, 900, 910, 924, 936, 960, 972, 980, 990, 1000,
574 1008, 1024, 1040, 1050, 1056, 1078, 1080, 1092, 1100, 1120, 1134, 1152,
575 1170, 1176, 1188, 1200, 1232, 1248, 1250, 1260, 1274, 1280, 1296, 1300,
576 1320, 1344, 1350, 1372, 1386, 1400, 1404, 1408, 1440, 1456, 1458, 1470,
577 1500, 1512, 1536, 1540, 1560, 1568, 1584, 1600, 1620, 1638, 1650, 1664,
578 1680, 1728, 1750, 1760, 1764, 1782, 1792, 1800, 1820, 1848, 1872, 1890,
579 1920, 1944, 1950, 1960, 1980, 2000, 2016, 2048, 2058, 2080, 2100, 2106,
580 2112, 2156, 2160, 2184, 2200, 2240, 2250, 2268, 2304, 2310, 2340, 2352,
581 2376, 2400, 2430, 2450, 2464, 2496, 2500, 2520, 2548, 2560, 2592, 2600,
582 2640, 2646, 2688, 2700, 2730, 2744, 2750, 2772, 2800, 2808, 2816, 2880,
583 2912, 2916, 2940, 2970, 3000, 3024, 3072, 3080, 3120, 3136, 3150, 3168,
584 3200, 3234, 3240, 3250, 3276, 3300, 3328, 3360, 3402, 3430, 3456, 3500,
585 3510, 3520, 3528, 3564, 3584, 3600, 3640, 3696, 3744, 3750, 3780, 3822,
586 3840, 3850, 3888, 3900, 3920, 3960, 4000, 4032, 4050, 4096, 4116, 4158,
587 4160, 4200, 4212, 4224, 4312, 4320, 4368, 4374, 4400, 4410, 4480, 4500,
588 4536, 4550, 4608, 4620, 4680, 4704, 4752, 4800, 4802, 4860, 4900, 4914,
589 4928, 4950, 4992, 5000, 5040, 5096, 5120, 5184, 5200, 5250, 5280, 5292,
590 5346, 5376, 5390, 5400, 5460, 5488, 5500, 5544, 5600, 5616, 5632, 5670,
591 5760, 5824, 5832, 5850, 5880, 5940, 6000, 6048, 6144, 6160, 6174, 6240,
592 6250, 6272, 6300, 6318, 6336, 6370, 6400, 6468, 6480, 6500, 6552, 6600,
593 6656, 6720, 6750, 6804, 6860, 6912, 6930, 7000, 7020, 7040, 7056, 7128,
594 7168, 7200, 7280, 7290, 7350, 7392, 7488, 7500, 7546, 7560, 7644, 7680,
595 7700, 7776, 7800, 7840, 7920, 7938, 8000, 8064, 8100, 8190, 8192, 8232,
596 8250, 8316, 8320, 8400, 8424, 8448, 8624, 8640, 8736, 8748, 8750, 8800,
597 8820, 8910, 8918, 8960, 9000, 9072, 9100, 9216, 9240, 9360, 9408, 9450,
598 9504, 9600, 9604, 9702, 9720, 9750, 9800, 9828, 9856, 9900, 9984, 10000,
599 10080, 10192, 10206, 10240, 10290, 10368, 10400, 10500, 10530, 10560,
600 10584, 10692, 10752, 10780, 10800, 10920, 10976, 11000, 11088, 11200,
601 11232, 11250, 11264, 11340, 11466, 11520, 11550, 11648, 11664, 11700,
602 11760, 11880, 12000, 12096, 12150, 12250, 12288, 12320, 12348, 12474,
603 12480, 12500, 12544, 12600, 12636, 12672, 12740, 12800, 12936, 12960,
604 13000, 13104, 13122, 13200, 13230, 13312, 13440, 13500, 13608, 13650,
605 13720, 13750, 13824, 13860, 14000, 14040, 14080, 14112, 14256, 14336,
606 14400, 14406, 14560, 14580, 14700, 14742, 14784, 14850, 14976, 15000,
607 15092, 15120, 15288, 15360, 15400, 15552, 15600, 15680, 15750, 15840,
608 15876, 16000, 16038, 16128, 16170, 16200, 16250, 16380, 16384, 16464,
609 16500, 16632, 16640, 16800, 16848, 16896, 17010, 17150, 17248, 17280,
610 17472, 17496, 17500, 17550, 17600, 17640, 17820, 17836, 17920, 18000,
611 18144, 18200, 18432, 18480, 18522, 18720, 18750, 18816, 18900, 18954,
612 19008, 19110, 19200, 19208, 19250, 19404, 19440, 19500, 19600, 19656,
613 19712, 19800, 19968, 20000, 20160, 20250, 20384, 20412, 20480, 20580,
614 20736, 20790, 20800, 21000, 21060, 21120, 21168, 21384, 21504, 21560,
615 21600, 21840, 21870, 21952, 22000, 22050, 22176, 22400, 22464, 22500,
616 22528, 22638, 22680, 22750, 22932, 23040, 23100, 23296, 23328, 23400,
617 23520, 23760, 23814, 24000, 24010, 24192, 24300, 24500, 24570, 24576,
618 24640, 24696, 24750, 24948, 24960, 25000, 25088, 25200, 25272, 25344,
619 25480, 25600, 25872, 25920, 26000, 26208, 26244, 26250, 26400, 26460,
620 26624, 26730, 26754, 26880, 26950, 27000, 27216, 27300, 27440, 27500,
621 27648, 27720, 28000, 28080, 28160, 28224, 28350, 28512, 28672, 28800,
622 28812, 29106, 29120, 29160, 29250, 29400, 29484, 29568, 29700, 29952,
623 30000, 30184, 30240, 30576, 30618, 30720, 30800, 30870, 31104, 31200,
624 31250, 31360, 31500, 31590, 31680, 31752, 31850, 32000, 32076, 32256,
625 32340, 32400, 32500, 32760, 32768, 32928, 33000, 33264, 33280, 33600,
626 33614, 33696, 33750, 33792, 34020, 34300, 34398, 34496, 34560, 34650,
627 34944, 34992, 35000, 35100, 35200, 35280, 35640, 35672, 35840, 36000,
628 36288, 36400, 36450, 36750, 36864, 36960, 37044, 37422, 37440, 37500,
629 37632, 37730, 37800, 37908, 38016, 38220, 38400, 38416, 38500, 38808,
630 38880, 39000, 39200, 39312, 39366, 39424, 39600, 39690, 39936, 40000,
631 40320, 40500, 40768, 40824, 40950, 40960, 41160, 41250, 41472, 41580,
632 41600, 42000, 42120, 42240, 42336, 42768, 43008, 43120, 43200, 43218,
633 43680, 43740, 43750, 43904, 44000, 44100, 44226, 44352, 44550, 44590,
634 44800, 44928, 45000, 45056, 45276, 45360, 45500, 45864, 46080, 46200,
635 46592, 46656, 46800, 47040, 47250, 47520, 47628, 48000, 48020, 48114,
636 48384, 48510, 48600, 48750, 49000, 49140, 49152, 49280, 49392, 49500,
637 49896, 49920, 50000, 50176, 50400, 50544, 50688, 50960, 51030, 51200,
638 51450, 51744, 51840, 52000, 52416, 52488, 52500, 52650, 52800, 52822,
639 52920, 53248, 53460, 53508, 53760, 53900, 54000, 54432, 54600, 54880,
640 55000, 55296, 55440, 55566, 56000, 56160, 56250, 56320, 56448, 56700,
641 56862, 57024, 57330, 57344, 57600, 57624, 57750, 58212, 58240, 58320,
642 58500, 58800, 58968, 59136, 59400, 59904, 60000, 60368, 60480, 60750,
643 61152, 61236, 61250, 61440, 61600, 61740, 62208, 62370, 62400, 62426,
644 62500, 62720, 63000, 63180, 63360, 63504, 63700, 64000, 64152, 64512,
645 64680, 64800, 65000, 65520, 65536, 65610, 65856, 66000, 66150, 66528,
646 66560, 67200, 67228, 67392, 67500, 67584, 67914, 68040, 68250, 68600,
647 68750, 68796, 68992, 69120, 69300, 69888, 69984, 70000, 70200, 70400,
648 70560, 71280, 71344, 71442, 71680, 72000, 72030, 72576, 72800, 72900,
649 73500, 73710, 73728, 73920, 74088, 74250, 74844, 74880, 75000, 75264,
650 75460, 75600, 75816, 76032, 76440, 76800, 76832, 77000, 77616, 77760,
651 78000, 78400, 78624, 78732, 78750, 78848, 79200, 79380, 79872, 80000,
652 80190, 80262, 80640, 80850, 81000, 81250, 81536, 81648, 81900, 81920,
653 82320, 82500, 82944, 83160, 83200, 84000, 84240, 84480, 84672, 85050,
654 85536, 85750, 86016, 86240, 86400, 86436, 87318, 87360, 87480, 87500,
655 87750, 87808, 88000, 88200, 88452, 88704, 89100, 89180, 89600, 89856,
656 90000, 90112, 90552, 90720, 91000, 91728, 91854, 92160, 92400, 92610,
657 93184, 93312, 93600, 93750, 94080, 94500, 94770, 95040, 95256, 95550,
658 96000, 96040, 96228, 96250, 96768, 97020, 97200, 97500, 98000, 98280,
659 98304, 98560, 98784, 99000, 99792, 99840
660 };
660661
661662 template <int M>
662663 struct FFTEmbedKernel
663664 {
664665 template <unsigned int N, class Real, class C, class Shape>
665 static void
666 exec(MultiArrayView<N, Real, C> & out, Shape const & kernelShape,
666 static void
667 exec(MultiArrayView<N, Real, C> & out, Shape const & kernelShape,
667668 Shape & srcPoint, Shape & destPoint, bool copyIt)
668669 {
669670 for(srcPoint[M]=0; srcPoint[M]<kernelShape[M]; ++srcPoint[M])
686687 struct FFTEmbedKernel<0>
687688 {
688689 template <unsigned int N, class Real, class C, class Shape>
689 static void
690 exec(MultiArrayView<N, Real, C> & out, Shape const & kernelShape,
690 static void
691 exec(MultiArrayView<N, Real, C> & out, Shape const & kernelShape,
691692 Shape & srcPoint, Shape & destPoint, bool copyIt)
692693 {
693694 for(srcPoint[0]=0; srcPoint[0]<kernelShape[0]; ++srcPoint[0])
711712 };
712713
713714 template <unsigned int N, class Real, class C1, class C2>
714 void
715 void
715716 fftEmbedKernel(MultiArrayView<N, Real, C1> kernel,
716717 MultiArrayView<N, Real, C2> out,
717718 Real norm = 1.0)
719720 typedef typename MultiArrayShape<N>::type Shape;
720721
721722 MultiArrayView<N, Real, C2> kout = out.subarray(Shape(), kernel.shape());
722
723
723724 out.init(0.0);
724725 kout = kernel;
725726 if (norm != 1.0)
726727 kout *= norm;
727728 moveDCToUpperLeft(kout);
728
729 Shape srcPoint, destPoint;
729
730 Shape srcPoint, destPoint;
730731 FFTEmbedKernel<(int)N-1>::exec(out, kernel.shape(), srcPoint, destPoint, false);
731732 }
732733
733734 template <unsigned int N, class Real, class C1, class C2>
734 void
735 void
735736 fftEmbedArray(MultiArrayView<N, Real, C1> in,
736737 MultiArrayView<N, Real, C2> out)
737738 {
738739 typedef typename MultiArrayShape<N>::type Shape;
739
740 Shape diff = out.shape() - in.shape(),
740
741 Shape diff = out.shape() - in.shape(),
741742 leftDiff = div(diff, MultiArrayIndex(2)),
742743 rightDiff = diff - leftDiff,
743 right = in.shape() + leftDiff;
744
744 right = in.shape() + leftDiff;
745
745746 out.subarray(leftDiff, right) = in;
746
747
747748 typedef typename MultiArrayView<N, Real, C2>::traverser Traverser;
748749 typedef MultiArrayNavigator<Traverser, N> Navigator;
749750 typedef typename Navigator::iterator Iterator;
750
751
751752 for(unsigned int d = 0; d < N; ++d)
752753 {
753754 Navigator nav(out.traverser_begin(), out.shape(), d);
786787
787788 /** \brief Find frequency domain shape for a R2C Fourier transform.
788789
789 When a real valued array is transformed to the frequency domain, about half of the
790 Fourier coefficients are redundant. The transform can be optimized as a <a href="http://www.fftw.org/doc/Multi_002dDimensional-DFTs-of-Real-Data.html">R2C
790 When a real valued array is transformed to the frequency domain, about half of the
791 Fourier coefficients are redundant. The transform can be optimized as a <a href="http://www.fftw.org/doc/Multi_002dDimensional-DFTs-of-Real-Data.html">R2C
791792 transform</a> that doesn't compute and store the redundant coefficients. This function
792793 computes the appropriate frequency domain shape for a given shape in the spatial domain.
793794 It simply replaces <tt>shape[0]</tt> with <tt>shape[0] / 2 + 1</tt>.
794
795
795796 <b>\#include</b> \<vigra/multi_fft.hxx\><br/>
796797 Namespace: vigra
797798 */
825826 <tt>fftw_destroy_plan</tt> (and their <tt>float</tt> and <tt>long double</tt> counterparts)
826827 in an easy-to-use interface.
827828
828 Usually, you use this class only indirectly via \ref fourierTransform()
829 Usually, you use this class only indirectly via \ref fourierTransform()
829830 and \ref fourierTransformInverse(). You only need this class if you want to have more control
830831 about FFTW's planning process (by providing non-default planning flags) and/or want to re-use
831832 plans for several transformations.
832
833
833834 <b> Usage:</b>
834835
835836 <b>\#include</b> \<vigra/multi_fft.hxx\><br>
839840 // compute complex Fourier transform of a real image
840841 MultiArray<2, double> src(Shape2(w, h));
841842 MultiArray<2, FFTWComplex<double> > fourier(Shape2(w, h));
842
843
843844 // create an optimized plan by measuring the speed of several algorithm variants
844845 FFTWPlan<2, double> plan(src, fourier, FFTW_MEASURE);
845
846 plan.execute(src, fourier);
846
847 plan.execute(src, fourier);
847848 \endcode
848849 */
849850 template <unsigned int N, class Real = double>
852853 typedef ArrayVector<int> Shape;
853854 typedef typename FFTWReal2Complex<Real>::plan_type PlanType;
854855 typedef typename FFTWComplex<Real>::complex_type Complex;
855
856
856857 PlanType plan;
857858 Shape shape, instrides, outstrides;
858859 int sign;
859
860
860861 public:
861862 /** \brief Create an empty plan.
862
863
863864 The plan can be initialized later by one of the init() functions.
864865 */
865866 FFTWPlan()
866867 : plan(0)
867868 {}
868
869
869870 /** \brief Create a plan for a complex-to-complex transform.
870
871
871872 \arg SIGN must be <tt>FFTW_FORWARD</tt> or <tt>FFTW_BACKWARD</tt> according to the
872873 desired transformation direction.
873 \arg planner_flags must be a combination of the <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
874 \arg planner_flags must be a combination of the <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
874875 flags</a> defined by the FFTW library. The default <tt>FFTW_ESTIMATE</tt> will guess
875876 optimal algorithm settings or read them from pre-loaded <a href="http://www.fftw.org/doc/Wisdom.html">"wisdom"</a>.
876877 */
877878 template <class C1, class C2>
878 FFTWPlan(MultiArrayView<N, FFTWComplex<Real>, C1> in,
879 FFTWPlan(MultiArrayView<N, FFTWComplex<Real>, C1> in,
879880 MultiArrayView<N, FFTWComplex<Real>, C2> out,
880881 int SIGN, unsigned int planner_flags = FFTW_ESTIMATE)
881882 : plan(0)
882883 {
883884 init(in, out, SIGN, planner_flags);
884885 }
885
886
886887 /** \brief Create a plan for a real-to-complex transform.
887
888
888889 This always refers to a forward transform. The shape of the output determines
889 if a standard transform (when <tt>out.shape() == in.shape()</tt>) or an
890 <a href="http://www.fftw.org/doc/Multi_002dDimensional-DFTs-of-Real-Data.html">R2C
891 transform</a> (when <tt>out.shape() == fftwCorrespondingShapeR2C(in.shape())</tt>) will be executed.
892
893 \arg planner_flags must be a combination of the <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
890 if a standard transform (when <tt>out.shape() == in.shape()</tt>) or an
891 <a href="http://www.fftw.org/doc/Multi_002dDimensional-DFTs-of-Real-Data.html">R2C
892 transform</a> (when <tt>out.shape() == fftwCorrespondingShapeR2C(in.shape())</tt>) will be executed.
893
894 \arg planner_flags must be a combination of the <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
894895 flags</a> defined by the FFTW library. The default <tt>FFTW_ESTIMATE</tt> will guess
895896 optimal algorithm settings or read them from pre-loaded <a href="http://www.fftw.org/doc/Wisdom.html">"wisdom"</a>.
896897 */
897898 template <class C1, class C2>
898 FFTWPlan(MultiArrayView<N, Real, C1> in,
899 FFTWPlan(MultiArrayView<N, Real, C1> in,
899900 MultiArrayView<N, FFTWComplex<Real>, C2> out,
900901 unsigned int planner_flags = FFTW_ESTIMATE)
901902 : plan(0)
904905 }
905906
906907 /** \brief Create a plan for a complex-to-real transform.
907
908
908909 This always refers to a inverse transform. The shape of the input determines
909 if a standard transform (when <tt>in.shape() == out.shape()</tt>) or a
910 <a href="http://www.fftw.org/doc/Multi_002dDimensional-DFTs-of-Real-Data.html">C2R
911 transform</a> (when <tt>in.shape() == fftwCorrespondingShapeR2C(out.shape())</tt>) will be executed.
912
913 \arg planner_flags must be a combination of the <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
910 if a standard transform (when <tt>in.shape() == out.shape()</tt>) or a
911 <a href="http://www.fftw.org/doc/Multi_002dDimensional-DFTs-of-Real-Data.html">C2R
912 transform</a> (when <tt>in.shape() == fftwCorrespondingShapeR2C(out.shape())</tt>) will be executed.
913
914 \arg planner_flags must be a combination of the <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
914915 flags</a> defined by the FFTW library. The default <tt>FFTW_ESTIMATE</tt> will guess
915916 optimal algorithm settings or read them from pre-loaded <a href="http://www.fftw.org/doc/Wisdom.html">"wisdom"</a>.
916917 */
917918 template <class C1, class C2>
918 FFTWPlan(MultiArrayView<N, FFTWComplex<Real>, C1> in,
919 FFTWPlan(MultiArrayView<N, FFTWComplex<Real>, C1> in,
919920 MultiArrayView<N, Real, C2> out,
920921 unsigned int planner_flags = FFTW_ESTIMATE)
921922 : plan(0)
922923 {
923924 init(in, out, planner_flags);
924925 }
925
926
926927 /** \brief Copy constructor.
927928 */
928929 FFTWPlan(FFTWPlan const & other)
935936 outstrides.swap(o.outstrides);
936937 o.plan = 0; // act like std::auto_ptr
937938 }
938
939
939940 /** \brief Copy assigment.
940941 */
941942 FFTWPlan & operator=(FFTWPlan const & other)
962963 }
963964
964965 /** \brief Init a complex-to-complex transform.
965
966
966967 See the constructor with the same signature for details.
967968 */
968969 template <class C1, class C2>
969 void init(MultiArrayView<N, FFTWComplex<Real>, C1> in,
970 void init(MultiArrayView<N, FFTWComplex<Real>, C1> in,
970971 MultiArrayView<N, FFTWComplex<Real>, C2> out,
971972 int SIGN, unsigned int planner_flags = FFTW_ESTIMATE)
972973 {
973974 vigra_precondition(in.strideOrdering() == out.strideOrdering(),
974975 "FFTWPlan.init(): input and output must have the same stride ordering.");
975
976 initImpl(in.permuteStridesDescending(), out.permuteStridesDescending(),
976
977 initImpl(in.permuteStridesDescending(), out.permuteStridesDescending(),
977978 SIGN, planner_flags);
978979 }
979
980
980981 /** \brief Init a real-to-complex transform.
981
982
982983 See the constructor with the same signature for details.
983984 */
984985 template <class C1, class C2>
985 void init(MultiArrayView<N, Real, C1> in,
986 void init(MultiArrayView<N, Real, C1> in,
986987 MultiArrayView<N, FFTWComplex<Real>, C2> out,
987988 unsigned int planner_flags = FFTW_ESTIMATE)
988989 {
989990 vigra_precondition(in.strideOrdering() == out.strideOrdering(),
990991 "FFTWPlan.init(): input and output must have the same stride ordering.");
991992
992 initImpl(in.permuteStridesDescending(), out.permuteStridesDescending(),
993 initImpl(in.permuteStridesDescending(), out.permuteStridesDescending(),
993994 FFTW_FORWARD, planner_flags);
994995 }
995
996
996997 /** \brief Init a complex-to-real transform.
997
998
998999 See the constructor with the same signature for details.
9991000 */
10001001 template <class C1, class C2>
1001 void init(MultiArrayView<N, FFTWComplex<Real>, C1> in,
1002 void init(MultiArrayView<N, FFTWComplex<Real>, C1> in,
10021003 MultiArrayView<N, Real, C2> out,
10031004 unsigned int planner_flags = FFTW_ESTIMATE)
10041005 {
10051006 vigra_precondition(in.strideOrdering() == out.strideOrdering(),
10061007 "FFTWPlan.init(): input and output must have the same stride ordering.");
10071008
1008 initImpl(in.permuteStridesDescending(), out.permuteStridesDescending(),
1009 initImpl(in.permuteStridesDescending(), out.permuteStridesDescending(),
10091010 FFTW_BACKWARD, planner_flags);
10101011 }
1011
1012
10121013 /** \brief Execute a complex-to-complex transform.
1013
1014
10141015 The array shapes must be the same as in the corresponding init function
10151016 or constructor. However, execute() can be called several times on
1016 the same plan, even with different arrays, as long as they have the appropriate
1017 the same plan, even with different arrays, as long as they have the appropriate
10171018 shapes.
10181019 */
10191020 template <class C1, class C2>
1020 void execute(MultiArrayView<N, FFTWComplex<Real>, C1> in,
1021 void execute(MultiArrayView<N, FFTWComplex<Real>, C1> in,
10211022 MultiArrayView<N, FFTWComplex<Real>, C2> out) const
10221023 {
10231024 executeImpl(in.permuteStridesDescending(), out.permuteStridesDescending());
10241025 }
1025
1026
10261027 /** \brief Execute a real-to-complex transform.
1027
1028
10281029 The array shapes must be the same as in the corresponding init function
10291030 or constructor. However, execute() can be called several times on
1030 the same plan, even with different arrays, as long as they have the appropriate
1031 the same plan, even with different arrays, as long as they have the appropriate
10311032 shapes.
10321033 */
10331034 template <class C1, class C2>
1034 void execute(MultiArrayView<N, Real, C1> in,
1035 void execute(MultiArrayView<N, Real, C1> in,
10351036 MultiArrayView<N, FFTWComplex<Real>, C2> out) const
10361037 {
10371038 executeImpl(in.permuteStridesDescending(), out.permuteStridesDescending());
10381039 }
1039
1040
10401041 /** \brief Execute a complex-to-real transform.
1041
1042
10421043 The array shapes must be the same as in the corresponding init function
10431044 or constructor. However, execute() can be called several times on
1044 the same plan, even with different arrays, as long as they have the appropriate
1045 the same plan, even with different arrays, as long as they have the appropriate
10451046 shapes.
10461047 */
10471048 template <class C1, class C2>
1048 void execute(MultiArrayView<N, FFTWComplex<Real>, C1> in,
1049 void execute(MultiArrayView<N, FFTWComplex<Real>, C1> in,
10491050 MultiArrayView<N, Real, C2> out) const
10501051 {
10511052 executeImpl(in.permuteStridesDescending(), out.permuteStridesDescending());
10521053 }
1053
1054
10541055 private:
1055
1056
10561057 template <class MI, class MO>
10571058 void initImpl(MI ins, MO outs, int SIGN, unsigned int planner_flags);
1058
1059
10591060 template <class MI, class MO>
10601061 void executeImpl(MI ins, MO outs) const;
1061
1062 void checkShapes(MultiArrayView<N, FFTWComplex<Real>, StridedArrayTag> in,
1062
1063 void checkShapes(MultiArrayView<N, FFTWComplex<Real>, StridedArrayTag> in,
10631064 MultiArrayView<N, FFTWComplex<Real>, StridedArrayTag> out) const
10641065 {
10651066 vigra_precondition(in.shape() == out.shape(),
10661067 "FFTWPlan.init(): input and output must have the same shape.");
10671068 }
1068
1069 void checkShapes(MultiArrayView<N, Real, StridedArrayTag> ins,
1069
1070 void checkShapes(MultiArrayView<N, Real, StridedArrayTag> ins,
10701071 MultiArrayView<N, FFTWComplex<Real>, StridedArrayTag> outs) const
10711072 {
10721073 for(int k=0; k<(int)N-1; ++k)
10751076 vigra_precondition(ins.shape(N-1) / 2 + 1 == outs.shape(N-1),
10761077 "FFTWPlan.init(): input and output must have matching shapes.");
10771078 }
1078
1079 void checkShapes(MultiArrayView<N, FFTWComplex<Real>, StridedArrayTag> ins,
1079
1080 void checkShapes(MultiArrayView<N, FFTWComplex<Real>, StridedArrayTag> ins,
10801081 MultiArrayView<N, Real, StridedArrayTag> outs) const
10811082 {
10821083 for(int k=0; k<(int)N-1; ++k)
10931094 FFTWPlan<N, Real>::initImpl(MI ins, MO outs, int SIGN, unsigned int planner_flags)
10941095 {
10951096 checkShapes(ins, outs);
1096
1097
10971098 typename MultiArrayShape<N>::type logicalShape(SIGN == FFTW_FORWARD
10981099 ? ins.shape()
10991100 : outs.shape());
1100
1101
11011102 Shape newShape(logicalShape.begin(), logicalShape.end()),
11021103 newIStrides(ins.stride().begin(), ins.stride().end()),
11031104 newOStrides(outs.stride().begin(), outs.stride().end()),
1104 itotal(ins.shape().begin(), ins.shape().end()),
1105 itotal(ins.shape().begin(), ins.shape().end()),
11051106 ototal(outs.shape().begin(), outs.shape().end());
11061107
11071108 for(unsigned int j=1; j<N; ++j)
11091110 itotal[j] = ins.stride(j-1) / ins.stride(j);
11101111 ototal[j] = outs.stride(j-1) / outs.stride(j);
11111112 }
1112
1113
11131114 {
11141115 detail::FFTWLock<> lock;
1115 PlanType newPlan = detail::fftwPlanCreate(N, newShape.begin(),
1116 PlanType newPlan = detail::fftwPlanCreate(N, newShape.begin(),
11161117 ins.data(), itotal.begin(), ins.stride(N-1),
11171118 outs.data(), ototal.begin(), outs.stride(N-1),
11181119 SIGN, planner_flags);
11191120 detail::fftwPlanDestroy(plan);
11201121 plan = newPlan;
11211122 }
1122
1123
11231124 shape.swap(newShape);
11241125 instrides.swap(newIStrides);
11251126 outstrides.swap(newOStrides);
11351136 typename MultiArrayShape<N>::type lshape(sign == FFTW_FORWARD
11361137 ? ins.shape()
11371138 : outs.shape());
1138
1139
11391140 vigra_precondition((lshape == TinyVectorView<int, N>(shape.data())),
11401141 "FFTWPlan::execute(): shape mismatch between plan and data.");
11411142 vigra_precondition((ins.stride() == TinyVectorView<int, N>(instrides.data())),
11441145 "FFTWPlan::execute(): strides mismatch between plan and output data.");
11451146
11461147 detail::fftwPlanExecute(plan, ins.data(), outs.data());
1147
1148
11481149 typedef typename MO::value_type V;
11491150 if(sign == FFTW_BACKWARD)
11501151 outs *= V(1.0) / Real(outs.size());
11631164 in an easy-to-use interface. It always creates a pair of plans, one for the forward and one
11641165 for the inverse transform required for convolution.
11651166
1166 Usually, you use this class only indirectly via \ref convolveFFT() and its variants.
1167 You only need this class if you want to have more control about FFTW's planning process
1167 Usually, you use this class only indirectly via \ref convolveFFT() and its variants.
1168 You only need this class if you want to have more control about FFTW's planning process
11681169 (by providing non-default planning flags) and/or want to re-use plans for several convolutions.
1169
1170
11701171 <b> Usage:</b>
11711172
11721173 <b>\#include</b> \<vigra/multi_fft.hxx\><br>
11751176 \code
11761177 // convolve a real array with a real kernel
11771178 MultiArray<2, double> src(Shape2(w, h)), dest(Shape2(w, h));
1178
1179
11791180 MultiArray<2, double> spatial_kernel(Shape2(9, 9));
11801181 Gaussian<double> gauss(1.0);
1181
1182
11821183 for(int y=0; y<9; ++y)
11831184 for(int x=0; x<9; ++x)
11841185 spatial_kernel(x, y) = gauss(x-4.0)*gauss(y-4.0);
1185
1186
11861187 // create an optimized plan by measuring the speed of several algorithm variants
11871188 FFTWConvolvePlan<2, double> plan(src, spatial_kernel, dest, FFTW_MEASURE);
1188
1189 plan.execute(src, spatial_kernel, dest);
1189
1190 plan.execute(src, spatial_kernel, dest);
11901191 \endcode
11911192 */
11921193 template <unsigned int N, class Real>
11951196 typedef FFTWComplex<Real> Complex;
11961197 typedef MultiArrayView<N, Real, UnstridedArrayTag > RArray;
11971198 typedef MultiArray<N, Complex, FFTWAllocator<Complex> > CArray;
1198
1199
11991200 FFTWPlan<N, Real> forward_plan, backward_plan;
12001201 RArray realArray, realKernel;
12011202 CArray fourierArray, fourierKernel;
12021203 bool useFourierKernel;
12031204
12041205 public:
1205
1206
12061207 typedef typename MultiArrayShape<N>::type Shape;
12071208
12081209 /** \brief Create an empty plan.
1209
1210
12101211 The plan can be initialized later by one of the init() functions.
12111212 */
12121213 FFTWConvolvePlan()
12131214 : useFourierKernel(false)
12141215 {}
1215
1216
12161217 /** \brief Create a plan to convolve a real array with a real kernel.
1217
1218
12181219 The kernel must be defined in the spatial domain.
12191220 See \ref convolveFFT() for detailed information on required shapes and internal padding.
1220
1221 \arg planner_flags must be a combination of the
1222 <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
1221
1222 \arg planner_flags must be a combination of the
1223 <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
12231224 flags</a> defined by the FFTW library. The default <tt>FFTW_ESTIMATE</tt> will guess
1224 optimal algorithm settings or read them from pre-loaded
1225 optimal algorithm settings or read them from pre-loaded
12251226 <a href="http://www.fftw.org/doc/Wisdom.html">"wisdom"</a>.
12261227 */
12271228 template <class C1, class C2, class C3>
1228 FFTWConvolvePlan(MultiArrayView<N, Real, C1> in,
1229 FFTWConvolvePlan(MultiArrayView<N, Real, C1> in,
12291230 MultiArrayView<N, Real, C2> kernel,
12301231 MultiArrayView<N, Real, C3> out,
12311232 unsigned int planner_flags = FFTW_ESTIMATE)
12331234 {
12341235 init(in, kernel, out, planner_flags);
12351236 }
1236
1237
12371238 /** \brief Create a plan to convolve a real array with a complex kernel.
1238
1239 The kernel must be defined in the Fourier domain, using the half-space format.
1239
1240 The kernel must be defined in the Fourier domain, using the half-space format.
12401241 See \ref convolveFFT() for detailed information on required shapes and internal padding.
1241
1242 \arg planner_flags must be a combination of the
1243 <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
1242
1243 \arg planner_flags must be a combination of the
1244 <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
12441245 flags</a> defined by the FFTW library. The default <tt>FFTW_ESTIMATE</tt> will guess
1245 optimal algorithm settings or read them from pre-loaded
1246 optimal algorithm settings or read them from pre-loaded
12461247 <a href="http://www.fftw.org/doc/Wisdom.html">"wisdom"</a>.
12471248 */
12481249 template <class C1, class C2, class C3>
1249 FFTWConvolvePlan(MultiArrayView<N, Real, C1> in,
1250 FFTWConvolvePlan(MultiArrayView<N, Real, C1> in,
12501251 MultiArrayView<N, FFTWComplex<Real>, C2> kernel,
12511252 MultiArrayView<N, Real, C3> out,
12521253 unsigned int planner_flags = FFTW_ESTIMATE)
12541255 {
12551256 init(in, kernel, out, planner_flags);
12561257 }
1257
1258
12581259 /** \brief Create a plan to convolve a complex array with a complex kernel.
1259
1260
12601261 See \ref convolveFFT() for detailed information on required shapes and internal padding.
1261
1262
12621263 \arg fourierDomainKernel determines if the kernel is defined in the spatial or
12631264 Fourier domain.
1264 \arg planner_flags must be a combination of the
1265 <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
1265 \arg planner_flags must be a combination of the
1266 <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
12661267 flags</a> defined by the FFTW library. The default <tt>FFTW_ESTIMATE</tt> will guess
1267 optimal algorithm settings or read them from pre-loaded
1268 optimal algorithm settings or read them from pre-loaded
12681269 <a href="http://www.fftw.org/doc/Wisdom.html">"wisdom"</a>.
12691270 */
12701271 template <class C1, class C2, class C3>
12711272 FFTWConvolvePlan(MultiArrayView<N, FFTWComplex<Real>, C1> in,
12721273 MultiArrayView<N, FFTWComplex<Real>, C2> kernel,
1273 MultiArrayView<N, FFTWComplex<Real>, C3> out,
1274 MultiArrayView<N, FFTWComplex<Real>, C3> out,
12741275 bool fourierDomainKernel,
12751276 unsigned int planner_flags = FFTW_ESTIMATE)
12761277 {
12771278 init(in, kernel, out, fourierDomainKernel, planner_flags);
12781279 }
12791280
1280
1281
12811282 /** \brief Create a plan from just the shape information.
1282
1283
12831284 See \ref convolveFFT() for detailed information on required shapes and internal padding.
1284
1285
12851286 \arg fourierDomainKernel determines if the kernel is defined in the spatial or
12861287 Fourier domain.
1287 \arg planner_flags must be a combination of the
1288 <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
1288 \arg planner_flags must be a combination of the
1289 <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
12891290 flags</a> defined by the FFTW library. The default <tt>FFTW_ESTIMATE</tt> will guess
1290 optimal algorithm settings or read them from pre-loaded
1291 optimal algorithm settings or read them from pre-loaded
12911292 <a href="http://www.fftw.org/doc/Wisdom.html">"wisdom"</a>.
12921293 */
12931294 template <class C1, class C2, class C3>
1294 FFTWConvolvePlan(Shape inOut, Shape kernel,
1295 FFTWConvolvePlan(Shape inOut, Shape kernel,
12951296 bool useFourierKernel = false,
12961297 unsigned int planner_flags = FFTW_ESTIMATE)
12971298 {
13001301 else
13011302 initFourierKernel(inOut, kernel, planner_flags);
13021303 }
1303
1304
13041305 /** \brief Init a plan to convolve a real array with a real kernel.
1305
1306
13061307 See the constructor with the same signature for details.
13071308 */
13081309 template <class C1, class C2, class C3>
1309 void init(MultiArrayView<N, Real, C1> in,
1310 void init(MultiArrayView<N, Real, C1> in,
13101311 MultiArrayView<N, Real, C2> kernel,
13111312 MultiArrayView<N, Real, C3> out,
13121313 unsigned int planner_flags = FFTW_ESTIMATE)
13151316 "FFTWConvolvePlan::init(): input and output must have the same shape.");
13161317 init(in.shape(), kernel.shape(), planner_flags);
13171318 }
1318
1319
13191320 /** \brief Init a plan to convolve a real array with a complex kernel.
1320
1321
13211322 See the constructor with the same signature for details.
13221323 */
13231324 template <class C1, class C2, class C3>
1324 void init(MultiArrayView<N, Real, C1> in,
1325 void init(MultiArrayView<N, Real, C1> in,
13251326 MultiArrayView<N, FFTWComplex<Real>, C2> kernel,
13261327 MultiArrayView<N, Real, C3> out,
13271328 unsigned int planner_flags = FFTW_ESTIMATE)
13301331 "FFTWConvolvePlan::init(): input and output must have the same shape.");
13311332 initFourierKernel(in.shape(), kernel.shape(), planner_flags);
13321333 }
1333
1334
13341335 /** \brief Init a plan to convolve a complex array with a complex kernel.
1335
1336
13361337 See the constructor with the same signature for details.
13371338 */
13381339 template <class C1, class C2, class C3>
1339 void init(MultiArrayView<N, FFTWComplex<Real>, C1> in,
1340 void init(MultiArrayView<N, FFTWComplex<Real>, C1> in,
13401341 MultiArrayView<N, FFTWComplex<Real>, C2> kernel,
1341 MultiArrayView<N, FFTWComplex<Real>, C3> out,
1342 MultiArrayView<N, FFTWComplex<Real>, C3> out,
13421343 bool fourierDomainKernel,
13431344 unsigned int planner_flags = FFTW_ESTIMATE)
13441345 {
13471348 useFourierKernel = fourierDomainKernel;
13481349 initComplex(in.shape(), kernel.shape(), planner_flags);
13491350 }
1350
1351
13511352 /** \brief Init a plan to convolve a real array with a sequence of kernels.
1352
1353
13531354 The kernels can be either real or complex. The sequences \a kernels and \a outs
1354 must have the same length. See the corresponding constructors
1355 must have the same length. See the corresponding constructors
13551356 for single kernels for details.
13561357 */
13571358 template <class C1, class KernelIterator, class OutIterator>
1358 void initMany(MultiArrayView<N, Real, C1> in,
1359 void initMany(MultiArrayView<N, Real, C1> in,
13591360 KernelIterator kernels, KernelIterator kernelsEnd,
13601361 OutIterator outs, unsigned int planner_flags = FFTW_ESTIMATE)
13611362 {
13791380 }
13801381 else
13811382 {
1382 initFourierKernelMany(in.shape(),
1383 initFourierKernelMany(in.shape(),
13831384 checkShapesFourier(in.shape(), kernels, kernelsEnd, outs),
13841385 planner_flags);
13851386 }
13861387 }
1387
1388
13881389 /** \brief Init a plan to convolve a complex array with a sequence of kernels.
1389
1390
13901391 The kernels must be complex as well. The sequences \a kernels and \a outs
1391 must have the same length. See the corresponding constructors
1392 must have the same length. See the corresponding constructors
13921393 for single kernels for details.
13931394 */
13941395 template <class C1, class KernelIterator, class OutIterator>
1395 void initMany(MultiArrayView<N, FFTWComplex<Real>, C1> in,
1396 void initMany(MultiArrayView<N, FFTWComplex<Real>, C1> in,
13961397 KernelIterator kernels, KernelIterator kernelsEnd,
13971398 OutIterator outs,
13981399 bool fourierDomainKernels,
14091410 "FFTWConvolvePlan::initMany(): outputs have unsuitable value_type.");
14101411
14111412 useFourierKernel = fourierDomainKernels;
1412
1413
14131414 Shape paddedShape = checkShapesComplex(in.shape(), kernels, kernelsEnd, outs);
1414
1415
14151416 CArray newFourierArray(paddedShape), newFourierKernel(paddedShape);
1416
1417
14171418 FFTWPlan<N, Real> fplan(newFourierArray, newFourierArray, FFTW_FORWARD, planner_flags);
14181419 FFTWPlan<N, Real> bplan(newFourierArray, newFourierArray, FFTW_BACKWARD, planner_flags);
1419
1420
14201421 forward_plan = fplan;
14211422 backward_plan = bplan;
14221423 fourierArray.swap(newFourierArray);
14231424 fourierKernel.swap(newFourierKernel);
14241425 }
1425
1426
14261427 void init(Shape inOut, Shape kernel,
14271428 unsigned int planner_flags = FFTW_ESTIMATE);
1428
1429
14291430 void initFourierKernel(Shape inOut, Shape kernel,
14301431 unsigned int planner_flags = FFTW_ESTIMATE);
1431
1432
14321433 void initComplex(Shape inOut, Shape kernel,
14331434 unsigned int planner_flags = FFTW_ESTIMATE);
1434
1435
14351436 void initMany(Shape inOut, Shape maxKernel,
14361437 unsigned int planner_flags = FFTW_ESTIMATE)
14371438 {
14381439 init(inOut, maxKernel, planner_flags);
14391440 }
1440
1441
14411442 void initFourierKernelMany(Shape inOut, Shape kernels,
14421443 unsigned int planner_flags = FFTW_ESTIMATE)
14431444 {
14441445 initFourierKernel(inOut, kernels, planner_flags);
14451446 }
1446
1447
14471448 /** \brief Execute a plan to convolve a real array with a real kernel.
1448
1449
14491450 The array shapes must be the same as in the corresponding init function
14501451 or constructor. However, execute() can be called several times on
1451 the same plan, even with different arrays, as long as they have the appropriate
1452 the same plan, even with different arrays, as long as they have the appropriate
14521453 shapes.
14531454 */
14541455 template <class C1, class C2, class C3>
1455 void execute(MultiArrayView<N, Real, C1> in,
1456 void execute(MultiArrayView<N, Real, C1> in,
14561457 MultiArrayView<N, Real, C2> kernel,
14571458 MultiArrayView<N, Real, C3> out)
14581459 {
14591460 executeImpl(in, kernel, out);
14601461 }
1461
1462
14621463 /** \brief Execute a plan to convolve a real array with a complex kernel.
1463
1464
14641465 The array shapes must be the same as in the corresponding init function
14651466 or constructor. However, execute() can be called several times on
1466 the same plan, even with different arrays, as long as they have the appropriate
1467 the same plan, even with different arrays, as long as they have the appropriate
14671468 shapes.
14681469 */
14691470 template <class C1, class C2, class C3>
1470 void execute(MultiArrayView<N, Real, C1> in,
1471 void execute(MultiArrayView<N, Real, C1> in,
14711472 MultiArrayView<N, FFTWComplex<Real>, C2> kernel,
14721473 MultiArrayView<N, Real, C3> out);
14731474
14741475 /** \brief Execute a plan to convolve a complex array with a complex kernel.
1475
1476
14761477 The array shapes must be the same as in the corresponding init function
14771478 or constructor. However, execute() can be called several times on
1478 the same plan, even with different arrays, as long as they have the appropriate
1479 the same plan, even with different arrays, as long as they have the appropriate
14791480 shapes.
14801481 */
14811482 template <class C1, class C2, class C3>
14851486
14861487
14871488 /** \brief Execute a plan to convolve a complex array with a sequence of kernels.
1488
1489
14891490 The array shapes must be the same as in the corresponding init function
14901491 or constructor. However, executeMany() can be called several times on
1491 the same plan, even with different arrays, as long as they have the appropriate
1492 the same plan, even with different arrays, as long as they have the appropriate
14921493 shapes.
14931494 */
14941495 template <class C1, class KernelIterator, class OutIterator>
1495 void executeMany(MultiArrayView<N, FFTWComplex<Real>, C1> in,
1496 void executeMany(MultiArrayView<N, FFTWComplex<Real>, C1> in,
14961497 KernelIterator kernels, KernelIterator kernelsEnd,
14971498 OutIterator outs);
1498
1499
14991500 /** \brief Execute a plan to convolve a real array with a sequence of kernels.
1500
1501
15011502 The array shapes must be the same as in the corresponding init function
15021503 or constructor. However, executeMany() can be called several times on
1503 the same plan, even with different arrays, as long as they have the appropriate
1504 the same plan, even with different arrays, as long as they have the appropriate
15041505 shapes.
15051506 */
15061507 template <class C1, class KernelIterator, class OutIterator>
1507 void executeMany(MultiArrayView<N, Real, C1> in,
1508 void executeMany(MultiArrayView<N, Real, C1> in,
15081509 KernelIterator kernels, KernelIterator kernelsEnd,
15091510 OutIterator outs)
15101511 {
15261527 }
15271528
15281529 protected:
1529
1530
15301531 template <class KernelIterator, class OutIterator>
1531 Shape checkShapes(Shape in,
1532 Shape checkShapes(Shape in,
15321533 KernelIterator kernels, KernelIterator kernelsEnd,
15331534 OutIterator outs);
1534
1535
15351536 template <class KernelIterator, class OutIterator>
1536 Shape checkShapesFourier(Shape in,
1537 Shape checkShapesFourier(Shape in,
15371538 KernelIterator kernels, KernelIterator kernelsEnd,
15381539 OutIterator outs);
1539
1540
15401541 template <class KernelIterator, class OutIterator>
1541 Shape checkShapesComplex(Shape in,
1542 Shape checkShapesComplex(Shape in,
15421543 KernelIterator kernels, KernelIterator kernelsEnd,
15431544 OutIterator outs);
1544
1545
15451546 template <class C1, class C2, class C3>
1546 void executeImpl(MultiArrayView<N, Real, C1> in,
1547 void executeImpl(MultiArrayView<N, Real, C1> in,
15471548 MultiArrayView<N, Real, C2> kernel,
15481549 MultiArrayView<N, Real, C3> out,
15491550 bool do_correlation=false);
1550
1551
15511552 template <class C1, class KernelIterator, class OutIterator>
1552 void
1553 executeManyImpl(MultiArrayView<N, Real, C1> in,
1553 void
1554 executeManyImpl(MultiArrayView<N, Real, C1> in,
15541555 KernelIterator kernels, KernelIterator kernelsEnd,
15551556 OutIterator outs, VigraFalseType /* useFourierKernel*/);
1556
1557
15571558 template <class C1, class KernelIterator, class OutIterator>
1558 void
1559 executeManyImpl(MultiArrayView<N, Real, C1> in,
1559 void
1560 executeManyImpl(MultiArrayView<N, Real, C1> in,
15601561 KernelIterator kernels, KernelIterator kernelsEnd,
15611562 OutIterator outs, VigraTrueType /* useFourierKernel*/);
1562
1563 };
1564
1563
1564 };
1565
15651566 template <unsigned int N, class Real>
1566 void
1567 void
15671568 FFTWConvolvePlan<N, Real>::init(Shape in, Shape kernel,
15681569 unsigned int planner_flags)
15691570 {
15701571 Shape paddedShape = fftwBestPaddedShapeR2C(in + kernel - Shape(1)),
15711572 complexShape = fftwCorrespondingShapeR2C(paddedShape);
1572
1573
15731574 CArray newFourierArray(complexShape), newFourierKernel(complexShape);
1574
1575
15751576 Shape realStrides = 2*newFourierArray.stride();
15761577 realStrides[0] = 1;
15771578 RArray newRealArray(paddedShape, realStrides, (Real*)newFourierArray.data());
15781579 RArray newRealKernel(paddedShape, realStrides, (Real*)newFourierKernel.data());
1579
1580
15801581 FFTWPlan<N, Real> fplan(newRealArray, newFourierArray, planner_flags);
15811582 FFTWPlan<N, Real> bplan(newFourierArray, newRealArray, planner_flags);
1582
1583
15831584 forward_plan = fplan;
15841585 backward_plan = bplan;
15851586 realArray = newRealArray;
15901591 }
15911592
15921593 template <unsigned int N, class Real>
1593 void
1594 void
15941595 FFTWConvolvePlan<N, Real>::initFourierKernel(Shape in, Shape kernel,
15951596 unsigned int planner_flags)
15961597 {
15971598 Shape complexShape = kernel,
15981599 paddedShape = fftwCorrespondingShapeC2R(complexShape);
1599
1600
16001601 for(unsigned int k=0; k<N; ++k)
16011602 vigra_precondition(in[k] <= paddedShape[k],
16021603 "FFTWConvolvePlan::init(): kernel too small for given input.");
16031604
16041605 CArray newFourierArray(complexShape), newFourierKernel(complexShape);
1605
1606
16061607 Shape realStrides = 2*newFourierArray.stride();
16071608 realStrides[0] = 1;
16081609 RArray newRealArray(paddedShape, realStrides, (Real*)newFourierArray.data());
16091610 RArray newRealKernel(paddedShape, realStrides, (Real*)newFourierKernel.data());
1610
1611
16111612 FFTWPlan<N, Real> fplan(newRealArray, newFourierArray, planner_flags);
16121613 FFTWPlan<N, Real> bplan(newFourierArray, newRealArray, planner_flags);
1613
1614
16141615 forward_plan = fplan;
16151616 backward_plan = bplan;
16161617 realArray = newRealArray;
16211622 }
16221623
16231624 template <unsigned int N, class Real>
1624 void
1625 void
16251626 FFTWConvolvePlan<N, Real>::initComplex(Shape in, Shape kernel,
16261627 unsigned int planner_flags)
16271628 {
16281629 Shape paddedShape;
1629
1630
16301631 if(useFourierKernel)
16311632 {
16321633 for(unsigned int k=0; k<N; ++k)
16391640 {
16401641 paddedShape = fftwBestPaddedShape(in + kernel - Shape(1));
16411642 }
1642
1643
16431644 CArray newFourierArray(paddedShape), newFourierKernel(paddedShape);
1644
1645
16451646 FFTWPlan<N, Real> fplan(newFourierArray, newFourierArray, FFTW_FORWARD, planner_flags);
16461647 FFTWPlan<N, Real> bplan(newFourierArray, newFourierArray, FFTW_BACKWARD, planner_flags);
1647
1648
16481649 forward_plan = fplan;
16491650 backward_plan = bplan;
16501651 fourierArray.swap(newFourierArray);
16551656
16561657 template <unsigned int N, class Real>
16571658 template <class C1, class C2, class C3>
1658 void
1659 FFTWConvolvePlan<N, Real>::executeImpl(MultiArrayView<N, Real, C1> in,
1659 void
1660 FFTWConvolvePlan<N, Real>::executeImpl(MultiArrayView<N, Real, C1> in,
16601661 MultiArrayView<N, Real, C2> kernel,
16611662 MultiArrayView<N, Real, C3> out,
16621663 bool do_correlation)
16661667
16671668 vigra_precondition(in.shape() == out.shape(),
16681669 "FFTWConvolvePlan::execute(): input and output must have the same shape.");
1669
1670
16701671 Shape paddedShape = fftwBestPaddedShapeR2C(in.shape() + kernel.shape() - Shape(1)),
1671 diff = paddedShape - in.shape(),
1672 diff = paddedShape - in.shape(),
16721673 left = div(diff, MultiArrayIndex(2)),
16731674 right = in.shape() + left;
1674
1675
16751676 vigra_precondition(paddedShape == realArray.shape(),
16761677 "FFTWConvolvePlan::execute(): shape mismatch between input and plan.");
16771678
16801681
16811682 detail::fftEmbedKernel(kernel, realKernel);
16821683 forward_plan.execute(realKernel, fourierKernel);
1683
1684
16841685 if(do_correlation)
16851686 {
16861687 using namespace multi_math;
16901691 {
16911692 fourierArray *= fourierKernel;
16921693 }
1693
1694
16941695 backward_plan.execute(fourierArray, realArray);
1695
1696
16961697 out = realArray.subarray(left, right);
16971698 }
16981699
16991700 template <unsigned int N, class Real>
17001701 template <class C1, class C2, class C3>
1701 void
1702 FFTWConvolvePlan<N, Real>::execute(MultiArrayView<N, Real, C1> in,
1702 void
1703 FFTWConvolvePlan<N, Real>::execute(MultiArrayView<N, Real, C1> in,
17031704 MultiArrayView<N, FFTWComplex<Real>, C2> kernel,
17041705 MultiArrayView<N, Real, C3> out)
17051706 {
17081709
17091710 vigra_precondition(in.shape() == out.shape(),
17101711 "FFTWConvolvePlan::execute(): input and output must have the same shape.");
1711
1712
17121713 vigra_precondition(kernel.shape() == fourierArray.shape(),
17131714 "FFTWConvolvePlan::execute(): shape mismatch between kernel and plan.");
17141715
17151716 Shape paddedShape = fftwCorrespondingShapeC2R(kernel.shape(), odd(in.shape(0))),
1716 diff = paddedShape - in.shape(),
1717 diff = paddedShape - in.shape(),
17171718 left = div(diff, MultiArrayIndex(2)),
17181719 right = in.shape() + left;
1719
1720
17201721 vigra_precondition(paddedShape == realArray.shape(),
17211722 "FFTWConvolvePlan::execute(): shape mismatch between input and plan.");
17221723
17271728 moveDCToHalfspaceUpperLeft(fourierKernel);
17281729
17291730 fourierArray *= fourierKernel;
1730
1731
17311732 backward_plan.execute(fourierArray, realArray);
1732
1733
17331734 out = realArray.subarray(left, right);
17341735 }
17351736
17361737 template <unsigned int N, class Real>
17371738 template <class C1, class C2, class C3>
1738 void
1739 FFTWConvolvePlan<N, Real>::execute(MultiArrayView<N, FFTWComplex<Real>, C1> in,
1739 void
1740 FFTWConvolvePlan<N, Real>::execute(MultiArrayView<N, FFTWComplex<Real>, C1> in,
17401741 MultiArrayView<N, FFTWComplex<Real>, C2> kernel,
17411742 MultiArrayView<N, FFTWComplex<Real>, C3> out)
17421743 {
17431744 vigra_precondition(in.shape() == out.shape(),
17441745 "FFTWConvolvePlan::execute(): input and output must have the same shape.");
1745
1746
17461747 Shape paddedShape = fourierArray.shape(),
1747 diff = paddedShape - in.shape(),
1748 diff = paddedShape - in.shape(),
17481749 left = div(diff, MultiArrayIndex(2)),
17491750 right = in.shape() + left;
1750
1751
17511752 if(useFourierKernel)
17521753 {
17531754 vigra_precondition(kernel.shape() == fourierArray.shape(),
17541755 "FFTWConvolvePlan::execute(): shape mismatch between kernel and plan.");
1755
1756
17561757 fourierKernel = kernel;
17571758 moveDCToUpperLeft(fourierKernel);
17581759 }
17661767 forward_plan.execute(fourierArray, fourierArray);
17671768
17681769 fourierArray *= fourierKernel;
1769
1770
17701771 backward_plan.execute(fourierArray, fourierArray);
1771
1772
17721773 out = fourierArray.subarray(left, right);
17731774 }
17741775
17751776 template <unsigned int N, class Real>
17761777 template <class C1, class KernelIterator, class OutIterator>
1777 void
1778 FFTWConvolvePlan<N, Real>::executeManyImpl(MultiArrayView<N, Real, C1> in,
1778 void
1779 FFTWConvolvePlan<N, Real>::executeManyImpl(MultiArrayView<N, Real, C1> in,
17791780 KernelIterator kernels, KernelIterator kernelsEnd,
17801781 OutIterator outs, VigraFalseType /*useFourierKernel*/)
17811782 {
17841785
17851786 Shape kernelMax = checkShapes(in.shape(), kernels, kernelsEnd, outs),
17861787 paddedShape = fftwBestPaddedShapeR2C(in.shape() + kernelMax - Shape(1)),
1787 diff = paddedShape - in.shape(),
1788 diff = paddedShape - in.shape(),
17881789 left = div(diff, MultiArrayIndex(2)),
17891790 right = in.shape() + left;
1790
1791
17911792 vigra_precondition(paddedShape == realArray.shape(),
17921793 "FFTWConvolvePlan::executeMany(): shape mismatch between input and plan.");
17931794
17981799 {
17991800 detail::fftEmbedKernel(*kernels, realKernel);
18001801 forward_plan.execute(realKernel, fourierKernel);
1801
1802
18021803 fourierKernel *= fourierArray;
1803
1804
18041805 backward_plan.execute(fourierKernel, realKernel);
1805
1806
18061807 *outs = realKernel.subarray(left, right);
18071808 }
18081809 }
18091810
18101811 template <unsigned int N, class Real>
18111812 template <class C1, class KernelIterator, class OutIterator>
1812 void
1813 FFTWConvolvePlan<N, Real>::executeManyImpl(MultiArrayView<N, Real, C1> in,
1813 void
1814 FFTWConvolvePlan<N, Real>::executeManyImpl(MultiArrayView<N, Real, C1> in,
18141815 KernelIterator kernels, KernelIterator kernelsEnd,
18151816 OutIterator outs, VigraTrueType /*useFourierKernel*/)
18161817 {
18191820
18201821 Shape complexShape = checkShapesFourier(in.shape(), kernels, kernelsEnd, outs),
18211822 paddedShape = fftwCorrespondingShapeC2R(complexShape, odd(in.shape(0))),
1822 diff = paddedShape - in.shape(),
1823 diff = paddedShape - in.shape(),
18231824 left = div(diff, MultiArrayIndex(2)),
18241825 right = in.shape() + left;
1825
1826
18261827 vigra_precondition(complexShape == fourierArray.shape(),
18271828 "FFTWConvolvePlan::executeFourierKernelMany(): shape mismatch between kernels and plan.");
18281829
18371838 fourierKernel = *kernels;
18381839 moveDCToHalfspaceUpperLeft(fourierKernel);
18391840 fourierKernel *= fourierArray;
1840
1841
18411842 backward_plan.execute(fourierKernel, realKernel);
1842
1843
18431844 *outs = realKernel.subarray(left, right);
18441845 }
18451846 }
18461847
18471848 template <unsigned int N, class Real>
18481849 template <class C1, class KernelIterator, class OutIterator>
1849 void
1850 FFTWConvolvePlan<N, Real>::executeMany(MultiArrayView<N, FFTWComplex<Real>, C1> in,
1850 void
1851 FFTWConvolvePlan<N, Real>::executeMany(MultiArrayView<N, FFTWComplex<Real>, C1> in,
18511852 KernelIterator kernels, KernelIterator kernelsEnd,
18521853 OutIterator outs)
18531854 {
18621863 "FFTWConvolvePlan::executeMany(): outputs have unsuitable value_type.");
18631864
18641865 Shape paddedShape = checkShapesComplex(in.shape(), kernels, kernelsEnd, outs),
1865 diff = paddedShape - in.shape(),
1866 diff = paddedShape - in.shape(),
18661867 left = div(diff, MultiArrayIndex(2)),
18671868 right = in.shape() + left;
1868
1869
18691870 detail::fftEmbedArray(in, fourierArray);
18701871 forward_plan.execute(fourierArray, fourierArray);
18711872
18831884 }
18841885
18851886 fourierKernel *= fourierArray;
1886
1887
18871888 backward_plan.execute(fourierKernel, fourierKernel);
1888
1889
18891890 *outs = fourierKernel.subarray(left, right);
18901891 }
18911892 }
18941895
18951896 template <unsigned int N, class Real>
18961897 template <class KernelIterator, class OutIterator>
1897 typename FFTWConvolvePlan<N, Real>::Shape
1898 FFTWConvolvePlan<N, Real>::checkShapes(Shape in,
1898 typename FFTWConvolvePlan<N, Real>::Shape
1899 FFTWConvolvePlan<N, Real>::checkShapes(Shape in,
18991900 KernelIterator kernels, KernelIterator kernelsEnd,
19001901 OutIterator outs)
19011902 {
19021903 vigra_precondition(kernels != kernelsEnd,
19031904 "FFTWConvolvePlan::checkShapes(): empty kernel sequence.");
19041905
1905 Shape kernelMax;
1906 Shape kernelMax;
19061907 for(; kernels != kernelsEnd; ++kernels, ++outs)
19071908 {
19081909 vigra_precondition(in == outs->shape(),
19131914 "FFTWConvolvePlan::checkShapes(): all kernels have size 0.");
19141915 return kernelMax;
19151916 }
1916
1917
19171918 template <unsigned int N, class Real>
19181919 template <class KernelIterator, class OutIterator>
1919 typename FFTWConvolvePlan<N, Real>::Shape
1920 FFTWConvolvePlan<N, Real>::checkShapesFourier(Shape in,
1920 typename FFTWConvolvePlan<N, Real>::Shape
1921 FFTWConvolvePlan<N, Real>::checkShapesFourier(Shape in,
19211922 KernelIterator kernels, KernelIterator kernelsEnd,
19221923 OutIterator outs)
19231924 {
19431944
19441945 template <unsigned int N, class Real>
19451946 template <class KernelIterator, class OutIterator>
1946 typename FFTWConvolvePlan<N, Real>::Shape
1947 FFTWConvolvePlan<N, Real>::checkShapesComplex(Shape in,
1947 typename FFTWConvolvePlan<N, Real>::Shape
1948 FFTWConvolvePlan<N, Real>::checkShapesComplex(Shape in,
19481949 KernelIterator kernels, KernelIterator kernelsEnd,
19491950 OutIterator outs)
19501951 {
19511952 vigra_precondition(kernels != kernelsEnd,
19521953 "FFTWConvolvePlan::checkShapesComplex(): empty kernel sequence.");
19531954
1954 Shape kernelShape = kernels->shape();
1955 Shape kernelShape = kernels->shape();
19551956 for(; kernels != kernelsEnd; ++kernels, ++outs)
19561957 {
19571958 vigra_precondition(in == outs->shape(),
19681969 }
19691970 vigra_precondition(prod(kernelShape) > 0,
19701971 "FFTWConvolvePlan::checkShapesComplex(): all kernels have size 0.");
1971
1972
19721973 if(useFourierKernel)
19731974 {
19741975 for(unsigned int k=0; k<N; ++k)
19811982 return fftwBestPaddedShape(in + kernelShape - Shape(1));
19821983 }
19831984 }
1984
1985
19851986 /********************************************************/
19861987 /* */
19871988 /* FFTWCorrelatePlan */
19891990 /********************************************************/
19901991
19911992 /** Like FFTWConvolvePlan, but performs correlation rather than convolution.
1992
1993
19931994 See \ref vigra::FFTWConvolvePlan for details.
1994
1995
19951996 <b> Usage:</b>
1996
1997
19971998 <b>\#include</b> \<vigra/multi_fft.hxx\><br>
19981999 Namespace: vigra
1999
2000
20002001 \code
20012002 // convolve a real array with a real kernel
20022003 MultiArray<2, double> src(Shape2(w, h)), dest(Shape2(w, h));
2003
2004
20042005 MultiArray<2, double> spatial_kernel(Shape2(9, 9));
20052006 Gaussian<double> gauss(1.0);
2006
2007
20072008 for(int y=0; y<9; ++y)
20082009 for(int x=0; x<9; ++x)
20092010 spatial_kernel(x, y) = gauss(x-4.0)*gauss(y-4.0);
2010
2011
20112012 // create an optimized plan by measuring the speed of several algorithm variants
20122013 FFTWCorrelatePlan<2, double> plan(src, spatial_kernel, dest, FFTW_MEASURE);
2013
2014
20142015 plan.execute(src, spatial_kernel, dest);
20152016 \endcode
20162017 */
20202021 {
20212022 typedef FFTWConvolvePlan<N, Real> BaseType;
20222023 public:
2023
2024
20242025 typedef typename MultiArrayShape<N>::type Shape;
2025
2026
20262027 /** \brief Create an empty plan.
2027
2028
20282029 The plan can be initialized later by one of the init() functions.
20292030 */
20302031 FFTWCorrelatePlan()
20312032 : BaseType()
20322033 {}
2033
2034
20342035 /** \brief Create a plan to correlate a real array with a real kernel.
2035
2036
20362037 The kernel must be defined in the spatial domain.
20372038 See \ref correlateFFT() for detailed information on required shapes and internal padding.
2038
2039
20392040 \arg planner_flags must be a combination of the
20402041 <a href="http://www.fftw.org/doc/Planner-Flags.html">planner
20412042 flags</a> defined by the FFTW library. The default <tt>FFTW_ESTIMATE</tt> will guess
20492050 unsigned int planner_flags = FFTW_ESTIMATE)
20502051 : BaseType(in, kernel, out, planner_flags)
20512052 {}
2052
2053
20532054 /** \brief Create a plan from just the shape information.
2054
2055
20552056 See \ref convolveFFT() for detailed information on required shapes and internal padding.
2056
2057
20572058 \arg fourierDomainKernel determines if the kernel is defined in the spatial or
20582059 Fourier domain.
20592060 \arg planner_flags must be a combination of the
20672068 bool useFourierKernel = false,
20682069 unsigned int planner_flags = FFTW_ESTIMATE)
20692070 : BaseType(inOut, kernel, false, planner_flags)
2070 {}
2071
2071 {
2072 ignore_argument(useFourierKernel);
2073 }
2074
20722075 /** \brief Init a plan to convolve a real array with a real kernel.
2073
2076
20742077 See the constructor with the same signature for details.
20752078 */
20762079 template <class C1, class C2, class C3>
20832086 "FFTWCorrelatePlan::init(): input and output must have the same shape.");
20842087 BaseType::init(in.shape(), kernel.shape(), planner_flags);
20852088 }
2086
2089
20872090 /** \brief Execute a plan to correlate a real array with a real kernel.
2088
2091
20892092 The array shapes must be the same as in the corresponding init function
20902093 or constructor. However, execute() can be called several times on
20912094 the same plan, even with different arrays, as long as they have the appropriate
21072110 /********************************************************/
21082111
21092112 template <unsigned int N, class Real, class C1, class C2>
2110 inline void
2111 fourierTransform(MultiArrayView<N, FFTWComplex<Real>, C1> in,
2113 inline void
2114 fourierTransform(MultiArrayView<N, FFTWComplex<Real>, C1> in,
21122115 MultiArrayView<N, FFTWComplex<Real>, C2> out)
21132116 {
21142117 FFTWPlan<N, Real>(in, out, FFTW_FORWARD).execute(in, out);
21152118 }
21162119
21172120 template <unsigned int N, class Real, class C1, class C2>
2118 inline void
2119 fourierTransformInverse(MultiArrayView<N, FFTWComplex<Real>, C1> in,
2121 inline void
2122 fourierTransformInverse(MultiArrayView<N, FFTWComplex<Real>, C1> in,
21202123 MultiArrayView<N, FFTWComplex<Real>, C2> out)
21212124 {
21222125 FFTWPlan<N, Real>(in, out, FFTW_BACKWARD).execute(in, out);
21232126 }
21242127
21252128 template <unsigned int N, class Real, class C1, class C2>
2126 void
2127 fourierTransform(MultiArrayView<N, Real, C1> in,
2129 void
2130 fourierTransform(MultiArrayView<N, Real, C1> in,
21282131 MultiArrayView<N, FFTWComplex<Real>, C2> out)
21292132 {
21302133 if(in.shape() == out.shape())
21432146 }
21442147
21452148 template <unsigned int N, class Real, class C1, class C2>
2146 void
2147 fourierTransformInverse(MultiArrayView<N, FFTWComplex<Real>, C1> in,
2149 void
2150 fourierTransformInverse(MultiArrayView<N, FFTWComplex<Real>, C1> in,
21482151 MultiArrayView<N, Real, C2> out)
21492152 {
21502153 vigra_precondition(in.shape() == fftwCorrespondingShapeR2C(out.shape()),
21542157
21552158 //@}
21562159
2157 /** \addtogroup MultiArrayConvolutionFilters
2160 /** \addtogroup ConvolutionFilters
21582161 */
21592162 //@{
21602163
21692172 Thanks to the convolution theorem of Fourier theory, a convolution in the spatial domain
21702173 is equivalent to a multiplication in the frequency domain. Thus, for certain kernels
21712174 (especially large, non-separable ones), it is advantageous to perform the convolution by first
2172 transforming both array and kernel to the frequency domain, multiplying the frequency
2173 representations, and transforming the result back into the spatial domain.
2174 Some kernels have a much simpler definition in the frequency domain, so that they are readily
2175 computed there directly, avoiding Fourier transformation of those kernels.
2176
2175 transforming both array and kernel to the frequency domain, multiplying the frequency
2176 representations, and transforming the result back into the spatial domain.
2177 Some kernels have a much simpler definition in the frequency domain, so that they are readily
2178 computed there directly, avoiding Fourier transformation of those kernels.
2179
21772180 The following functions implement various variants of FFT-based convolution:
2178
2181
21792182 <DL>
2180 <DT><b>convolveFFT</b><DD> Convolve a real-valued input array with a kernel such that the
2183 <DT><b>convolveFFT</b><DD> Convolve a real-valued input array with a kernel such that the
21812184 result is also real-valued. That is, the kernel is either provided
2182 as a real-valued array in the spatial domain, or as a
2183 complex-valued array in the Fourier domain, using the half-space format
2185 as a real-valued array in the spatial domain, or as a
2186 complex-valued array in the Fourier domain, using the half-space format
21842187 of the R2C Fourier transform (see below).
2185 <DT><b>convolveFFTMany</b><DD> Like <tt>convolveFFT</tt>, but you may provide many kernels at once
2186 (using an iterator pair specifying the kernel sequence).
2187 This has the advantage that the forward transform of the input array needs
2188 <DT><b>convolveFFTMany</b><DD> Like <tt>convolveFFT</tt>, but you may provide many kernels at once
2189 (using an iterator pair specifying the kernel sequence).
2190 This has the advantage that the forward transform of the input array needs
21882191 to be executed only once.
2189 <DT><b>convolveFFTComplex</b><DD> Convolve a complex-valued input array with a complex-valued kernel,
2190 resulting in a complex-valued output array. An additional flag is used to
2192 <DT><b>convolveFFTComplex</b><DD> Convolve a complex-valued input array with a complex-valued kernel,
2193 resulting in a complex-valued output array. An additional flag is used to
21912194 specify whether the kernel is defined in the spatial or frequency domain.
2192 <DT><b>convolveFFTComplexMany</b><DD> Like <tt>convolveFFTComplex</tt>, but you may provide many
2193 kernels at once (using an iterator pair specifying the kernel sequence).
2194 This has the advantage that the forward transform of the input array needs
2195 <DT><b>convolveFFTComplexMany</b><DD> Like <tt>convolveFFTComplex</tt>, but you may provide many
2196 kernels at once (using an iterator pair specifying the kernel sequence).
2197 This has the advantage that the forward transform of the input array needs
21952198 to be executed only once.
21962199 </DL>
2197
2200
21982201 The output arrays must have the same shape as the input arrays. In the "Many" variants of the
21992202 convolution functions, the kernels must all have the same shape.
2200
2203
22012204 The origin of the kernel is always assumed to be in the center of the kernel array (precisely,
2202 at the point <tt>floor(kernel.shape() / 2.0)</tt>, except when the half-space format is used, see below).
2203 The function \ref moveDCToUpperLeft() will be called internally to align the kernel with the transformed
2205 at the point <tt>floor(kernel.shape() / 2.0)</tt>, except when the half-space format is used, see below).
2206 The function \ref moveDCToUpperLeft() will be called internally to align the kernel with the transformed
22042207 input as appropriate.
2205
2208
22062209 If a real input is combined with a real kernel, the kernel is automatically assumed to be defined
2207 in the spatial domain. If a real input is combined with a complex kernel, the kernel is assumed
2208 to be defined in the Fourier domain in half-space format. If the input array is complex, a flag
2210 in the spatial domain. If a real input is combined with a complex kernel, the kernel is assumed
2211 to be defined in the Fourier domain in half-space format. If the input array is complex, a flag
22092212 <tt>fourierDomainKernel</tt> determines where the kernel is defined.
2210
2213
22112214 When the kernel is defined in the spatial domain, the convolution functions will automatically pad
22122215 (enlarge) the input array by at least the kernel radius in each direction. The newly added space is
2213 filled according to reflective boundary conditions in order to minimize border artifacts during
2214 convolution. It is thus ensured that convolution in the Fourier domain yields the same results as
2215 convolution in the spatial domain (e.g. when \ref separableConvolveMultiArray() is called with the
2216 filled according to reflective boundary conditions in order to minimize border artifacts during
2217 convolution. It is thus ensured that convolution in the Fourier domain yields the same results as
2218 convolution in the spatial domain (e.g. when \ref separableConvolveMultiArray() is called with the
22162219 same kernel). A little further padding may be added to make sure that the padded array shape
22172220 uses integers which have only small prime factors, because FFTW is then able to use the fastest
22182221 possible algorithms. Any padding is automatically removed from the result arrays before the function
22192222 returns.
2220
2223
22212224 When the kernel is defined in the frequency domain, it must be complex-valued, and its shape determines
22222225 the shape of the Fourier representation (i.e. the input is padded according to the shape of the kernel).
2223 If we are going to perform a complex-valued convolution, the kernel must be defined for the entire
2224 frequency domain, and its shape directly determines the size of the FFT.
2225
2226 If we are going to perform a complex-valued convolution, the kernel must be defined for the entire
2227 frequency domain, and its shape directly determines the size of the FFT.
2228
22262229 In contrast, a frequency domain kernel for a real-valued convolution must have symmetry properties
2227 that allow to drop half of the kernel coefficients, as in the
2228 <a href="http://www.fftw.org/doc/Multi_002dDimensional-DFTs-of-Real-Data.html">R2C transform</a>.
2229 That is, the kernel must have the <i>half-space format</i>, that is the shape returned by <tt>fftwCorrespondingShapeR2C(fourier_shape)</tt>, where <tt>fourier_shape</tt> is the desired
2230 logical shape of the frequency representation (and thus the size of the padded input). The origin
2231 of the kernel must be at the point
2232 <tt>(0, floor(fourier_shape[0] / 2.0), ..., floor(fourier_shape[N-1] / 2.0))</tt>
2230 that allow to drop half of the kernel coefficients, as in the
2231 <a href="http://www.fftw.org/doc/Multi_002dDimensional-DFTs-of-Real-Data.html">R2C transform</a>.
2232 That is, the kernel must have the <i>half-space format</i>, that is the shape returned by <tt>fftwCorrespondingShapeR2C(fourier_shape)</tt>, where <tt>fourier_shape</tt> is the desired
2233 logical shape of the frequency representation (and thus the size of the padded input). The origin
2234 of the kernel must be at the point
2235 <tt>(0, floor(fourier_shape[0] / 2.0), ..., floor(fourier_shape[N-1] / 2.0))</tt>
22332236 (i.e. as in a regular kernel except for the first dimension).
2234
2235 The <tt>Real</tt> type in the declarations can be <tt>double</tt>, <tt>float</tt>, and
2237
2238 The <tt>Real</tt> type in the declarations can be <tt>double</tt>, <tt>float</tt>, and
22362239 <tt>long double</tt>. Your program must always link against <tt>libfftw3</tt>. If you use
2237 <tt>float</tt> or <tt>long double</tt> arrays, you must <i>additionally</i> link against
2240 <tt>float</tt> or <tt>long double</tt> arrays, you must <i>additionally</i> link against
22382241 <tt>libfftw3f</tt> and <tt>libfftw3l</tt> respectively.
2239
2242
22402243 The Fourier transform functions internally create <a href="http://www.fftw.org/doc/Using-Plans.html">FFTW plans</a>
22412244 which control the algorithm details. The plans are created with the flag <tt>FFTW_ESTIMATE</tt>, i.e.
22422245 optimal settings are guessed or read from saved "wisdom" files. If you need more control over planning,
22432246 you can use the class \ref FFTWConvolvePlan.
2244
2247
22452248 See also \ref applyFourierFilter() for corresponding functionality on the basis of the
22462249 old image iterator interface.
2247
2250
22482251 <b> Declarations:</b>
22492252
22502253 Real-valued convolution with kernel in the spatial domain:
22512254 \code
22522255 namespace vigra {
22532256 template <unsigned int N, class Real, class C1, class C2, class C3>
2254 void
2255 convolveFFT(MultiArrayView<N, Real, C1> in,
2257 void
2258 convolveFFT(MultiArrayView<N, Real, C1> in,
22562259 MultiArrayView<N, Real, C2> kernel,
22572260 MultiArrayView<N, Real, C3> out);
22582261 }
22622265 \code
22632266 namespace vigra {
22642267 template <unsigned int N, class Real, class C1, class C2, class C3>
2265 void
2266 convolveFFT(MultiArrayView<N, Real, C1> in,
2268 void
2269 convolveFFT(MultiArrayView<N, Real, C1> in,
22672270 MultiArrayView<N, FFTWComplex<Real>, C2> kernel,
22682271 MultiArrayView<N, Real, C3> out);
22692272 }
22702273 \endcode
22712274
2272 Series of real-valued convolutions with kernels in the spatial or Fourier domain
2275 Series of real-valued convolutions with kernels in the spatial or Fourier domain
22732276 (the kernel and out sequences must have the same length):
22742277 \code
22752278 namespace vigra {
2276 template <unsigned int N, class Real, class C1,
2279 template <unsigned int N, class Real, class C1,
22772280 class KernelIterator, class OutIterator>
2278 void
2279 convolveFFTMany(MultiArrayView<N, Real, C1> in,
2281 void
2282 convolveFFTMany(MultiArrayView<N, Real, C1> in,
22802283 KernelIterator kernels, KernelIterator kernelsEnd,
22812284 OutIterator outs);
22822285 }
22952298 }
22962299 \endcode
22972300
2298 Series of complex-valued convolutions (parameter <tt>fourierDomainKernel</tt>
2299 determines if the kernels are defined in the spatial or Fourier domain,
2301 Series of complex-valued convolutions (parameter <tt>fourierDomainKernel</tt>
2302 determines if the kernels are defined in the spatial or Fourier domain,
23002303 the kernel and out sequences must have the same length):
23012304 \code
23022305 namespace vigra {
2303 template <unsigned int N, class Real, class C1,
2306 template <unsigned int N, class Real, class C1,
23042307 class KernelIterator, class OutIterator>
2305 void
2306 convolveFFTComplexMany(MultiArrayView<N, FFTWComplex<Real>, C1> in,
2308 void
2309 convolveFFTComplexMany(MultiArrayView<N, FFTWComplex<Real>, C1> in,
23072310 KernelIterator kernels, KernelIterator kernelsEnd,
23082311 OutIterator outs,
23092312 bool fourierDomainKernel);
23192322 // convolve real array with a Gaussian (sigma=1) defined in the spatial domain
23202323 // (implicitly uses padding by at least 4 pixels)
23212324 MultiArray<2, double> src(Shape2(w, h)), dest(Shape2(w,h));
2322
2325
23232326 MultiArray<2, double> spatial_kernel(Shape2(9, 9));
23242327 Gaussian<double> gauss(1.0);
2325
2328
23262329 for(int y=0; y<9; ++y)
23272330 for(int x=0; x<9; ++x)
23282331 spatial_kernel(x, y) = gauss(x-4.0)*gauss(y-4.0);
23292332
23302333 convolveFFT(src, spatial_kernel, dest);
2331
2334
23322335 // convolve real array with a Gaussian (sigma=1) defined in the Fourier domain
23332336 // (uses no padding, because the kernel size corresponds to the input size)
23342337 MultiArray<2, FFTWComplex<double> > fourier_kernel(fftwCorrespondingShapeR2C(src.shape()));
23352338 int y0 = h / 2;
2336
2339
23372340 for(int y=0; y<fourier_kernel.shape(1); ++y)
23382341 for(int x=0; x<fourier_kernel.shape(0); ++x)
23392342 fourier_kernel(x, y) = exp(-0.5*sq(x / double(w))) * exp(-0.5*sq((y-y0)/double(h)));
23442347 doxygen_overloaded_function(template <...> void convolveFFT)
23452348
23462349 template <unsigned int N, class Real, class C1, class C2, class C3>
2347 void
2348 convolveFFT(MultiArrayView<N, Real, C1> in,
2350 void
2351 convolveFFT(MultiArrayView<N, Real, C1> in,
23492352 MultiArrayView<N, Real, C2> kernel,
23502353 MultiArrayView<N, Real, C3> out)
23512354 {
23532356 }
23542357
23552358 template <unsigned int N, class Real, class C1, class C2, class C3>
2356 void
2357 convolveFFT(MultiArrayView<N, Real, C1> in,
2359 void
2360 convolveFFT(MultiArrayView<N, Real, C1> in,
23582361 MultiArrayView<N, FFTWComplex<Real>, C2> kernel,
23592362 MultiArrayView<N, Real, C3> out)
23602363 {
23832386 */
23842387 doxygen_overloaded_function(template <...> void convolveFFTMany)
23852388
2386 template <unsigned int N, class Real, class C1,
2389 template <unsigned int N, class Real, class C1,
23872390 class KernelIterator, class OutIterator>
2388 void
2389 convolveFFTMany(MultiArrayView<N, Real, C1> in,
2391 void
2392 convolveFFTMany(MultiArrayView<N, Real, C1> in,
23902393 KernelIterator kernels, KernelIterator kernelsEnd,
23912394 OutIterator outs)
23922395 {
24012404 */
24022405 doxygen_overloaded_function(template <...> void convolveFFTComplexMany)
24032406
2404 template <unsigned int N, class Real, class C1,
2407 template <unsigned int N, class Real, class C1,
24052408 class KernelIterator, class OutIterator>
2406 void
2407 convolveFFTComplexMany(MultiArrayView<N, FFTWComplex<Real>, C1> in,
2409 void
2410 convolveFFTComplexMany(MultiArrayView<N, FFTWComplex<Real>, C1> in,
24082411 KernelIterator kernels, KernelIterator kernelsEnd,
24092412 OutIterator outs,
24102413 bool fourierDomainKernel)
24132416 plan.initMany(in, kernels, kernelsEnd, outs, fourierDomainKernel);
24142417 plan.executeMany(in, kernels, kernelsEnd, outs);
24152418 }
2416
2419
24172420 /********************************************************/
24182421 /* */
24192422 /* correlateFFT */
24212424 /********************************************************/
24222425
24232426 /** \brief Correlate an array with a kernel by means of the Fourier transform.
2424
2425 This function correlates a real-valued input array with a real-valued kernel
2426 such that the result is also real-valued. Thanks to the correlation theorem of
2427 Fourier theory, a correlation in the spatial domain is equivalent to a multiplication
2427
2428 This function correlates a real-valued input array with a real-valued kernel
2429 such that the result is also real-valued. Thanks to the correlation theorem of
2430 Fourier theory, a correlation in the spatial domain is equivalent to a multiplication
24282431 with the complex conjugate in the frequency domain. Thus, for
2429 certain kernels (especially large, non-separable ones), it is advantageous to perform the
2432 certain kernels (especially large, non-separable ones), it is advantageous to perform the
24302433 correlation by first transforming both array and kernel to the frequency domain, multiplying
24312434 the frequency representations, and transforming the result back into the spatial domain.
2432
2435
24332436 The output arrays must have the same shape as the input arrays.
2434
2437
24352438 See also \ref convolveFFT() for corresponding functionality.
2436
2439
24372440 <b> Declarations:</b>
2438
2441
24392442 \code
24402443 namespace vigra {
24412444 template <unsigned int N, class Real, class C1, class C2, class C3>
24452448 MultiArrayView<N, Real, C3> out);
24462449 }
24472450 \endcode
2448
2451
24492452 <b> Usage:</b>
2450
2453
24512454 <b>\#include</b> \<vigra/multi_fft.hxx\><br>
24522455 Namespace: vigra
2453
2456
24542457 \code
24552458 // correlate real array with a template to find best matches
24562459 // (implicitly uses padding by at least 4 pixels)
24572460 MultiArray<2, double> src(Shape2(w, h)), dest(Shape2(w, h));
2458
2461
24592462 MultiArray<2, double> template(Shape2(9, 9));
2460 template = ...;
2463 template = ...;
24612464
24622465 correlateFFT(src, template, dest);
24632466 \endcode
30803080
30813081 } // namespace std
30823082
3083 #ifdef WITH_BOOST_GRAPH
3084 namespace boost {
3085 using vigra::boost_graph::out_edges;
3086 using vigra::boost_graph::out_degree;
3087 using vigra::boost_graph::source;
3088 using vigra::boost_graph::target;
3089 using vigra::boost_graph::in_edges;
3090 using vigra::boost_graph::in_degree;
3091 using vigra::boost_graph::adjacent_vertices;
3092 using vigra::boost_graph::vertices;
3093 using vigra::boost_graph::edges;
3094 using vigra::boost_graph::edge;
3095 using vigra::boost_graph::num_vertices;
3096 using vigra::boost_graph::num_edges;
3097 }
3098 #endif /* WITH_BOOST_GRAPH */
30833099
30843100
30853101 #endif /* VIGRA_MULTI_GRIDGRAPH_HXX */
570570 }
571571
572572 template <class U>
573 void internal_reset(U const & p)
573 void internal_reset(U const &)
574574 {
575575 vigra_fail("CoupledHandle<Multiband<T>>::internal_reset(): not implemented.");
576576 }
852852 }
853853
854854 template <class V>
855 void internal_reset(V const & p)
855 void internal_reset(V const &)
856856 {
857857 vigra_fail("CoupledHandle<ChunkedMemory<T>>::internal_reset(): not implemented.");
858858 }
108108 template< unsigned int DIM , class T_DATA, class T_HIST >
109109 void multiGaussianCoHistogram(
110110 const MultiArrayView<DIM, T_DATA > & imageA,
111 const MultiArrayView<DIM, T_DATA > & imageB,
111 const MultiArrayView<DIM, T_DATA > & /*imageB*/,
112112 const TinyVector<T_DATA,2> & minVals,
113113 const TinyVector<T_DATA,2> & maxVals,
114114 const TinyVector<int,2> & nBins,
145145
146146 const float fiA = binIndexA;
147147 const unsigned int biA = std::floor(fiA+0.5);
148 const float fiB = binIndexB;
149148 const unsigned int biB = std::floor(fiA+0.5);
150149 histCoord[DIM]=std::min(biA,static_cast<unsigned int>(nBins[0]-1));
151150 histCoord[DIM+1]=std::min(biB,static_cast<unsigned int>(nBins[1]-1));
209208 typedef MultiArray<DIM+1, U> OutType;
210209 typedef typename OutType::difference_type OutCoord;
211210
212 // FIXME: crashes on Python3
211 // FIXME: crashes on Python3
213212
214213 HistCoord histShape;
215214 std::copy(image.shape().begin(), image.shape().end(), histShape.begin());
462462 inline void
463463 readVolumeImpl(DestIterator d, Shape const & shape, std::ifstream & s, ArrayVector<T> & buffer, MetaInt<0>)
464464 {
465 s.read((char*)buffer.begin(), shape[0]*sizeof(T));
465 s.read(reinterpret_cast<char*>(buffer.begin()), shape[0]*sizeof(T));
466466
467467 DestIterator dend = d + shape[0];
468468 int k = 0;
4949
5050 /** \brief Iterate over a virtual array where each element contains its coordinate.
5151
52 MultiCoordinateIterator behaves like a read-only random access iterator.
52 MultiCoordinateIterator behaves like a read-only random access iterator.
5353 It moves accross the given region of interest in scan-order (with the first
54 index changing most rapidly), and dereferencing the iterator returns the
55 coordinate (i.e. multi-dimensional index) of the current array element.
56 The functionality is thus similar to a meshgrid in Matlab or numpy.
57
54 index changing most rapidly), and dereferencing the iterator returns the
55 coordinate (i.e. multi-dimensional index) of the current array element.
56 The functionality is thus similar to a meshgrid in Matlab or numpy.
57
5858 Internally, it is just a wrapper of a \ref CoupledScanOrderIterator that
59 has been created without any array and whose reference type is not a
59 has been created without any array and whose reference type is not a
6060 \ref CoupledHandle, but the coordinate itself.
61
62 The iterator supports all functions listed in the STL documentation for
61
62 The iterator supports all functions listed in the STL documentation for
6363 <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterators</a>.
6464
6565 <b>Usage:</b>
6666
6767 <b>\#include</b> \<vigra/multi_iterator.hxx\><br/>
6868 Namespace: vigra
69
69
7070 \code
7171 MultiCoordinateIterator<3> i(Shape3(3,2,1)), end = i.getEndIterator();
72
72
7373 for(; i != end; ++i)
7474 std::cout << *i << "\n";
75
75
7676 // Output:
7777 // (0, 0, 0)
7878 // (1, 0, 0)
101101 typedef typename handle_type::pointer pointer;
102102 typedef typename handle_type::const_pointer const_pointer;
103103
104 MultiCoordinateIterator()
104 MultiCoordinateIterator()
105105 : base_type(handle_type())
106106 {}
107107
108 explicit MultiCoordinateIterator(shape_type const & shape)
108 explicit MultiCoordinateIterator(shape_type const & shape)
109109 : base_type(handle_type(shape))
110110 {}
111111
112 explicit MultiCoordinateIterator(shape_type const & start, shape_type const & end)
112 explicit MultiCoordinateIterator(shape_type const & start, shape_type const & end)
113113 : base_type(handle_type(end))
114114 {
115115 this->restrictToSubarray(start, end);
116116 }
117117
118118 template<class DirectedTag>
119 explicit MultiCoordinateIterator(GridGraph<N, DirectedTag> const & g)
119 explicit MultiCoordinateIterator(GridGraph<N, DirectedTag> const & g)
120120 : base_type(handle_type(g.shape()))
121121 {}
122122
123123
124124 template<class DirectedTag>
125 explicit MultiCoordinateIterator(GridGraph<N, DirectedTag> const & g, const typename GridGraph<N, DirectedTag>::Node & node)
125 explicit MultiCoordinateIterator(GridGraph<N, DirectedTag> const & g, const typename GridGraph<N, DirectedTag>::Node & node)
126126 : base_type(handle_type(g.shape()))
127127 {
128128 if( isInside(g,node))
139139 {
140140 return this->template get<0>();
141141 }
142
142
143143 const_reference operator*() const
144144 {
145145 return this->template get<0>();
146146 }
147
147
148148 operator value_type() const
149149 {
150150 return *(*this);
154154 {
155155 return &this->template get<0>();
156156 }
157
157
158158 const_pointer operator->() const
159159 {
160160 return &this->template get<0>();
170170 base_type::operator++();
171171 return *this;
172172 }
173
173
174174 MultiCoordinateIterator operator++(int)
175175 {
176176 MultiCoordinateIterator res(*this);
242242 {
243243 return base_type::operator-(other);
244244 }
245
245
246246 protected:
247 MultiCoordinateIterator(base_type const & base)
247 MultiCoordinateIterator(base_type const & base)
248248 : base_type(base)
249249 {}
250250 };
251251
252252 /** \brief Sequential iterator for MultiArrayView.
253
254 This iterator provides STL-compatible random access iterator functionality for arbitrary
253
254 This iterator provides STL-compatible random access iterator functionality for arbitrary
255255 \ref MultiArrayView instances, regardless of their shapes and strides. The
256 class uses an implementation that minimizes speed penalties that could result from
257 non-trivial strides. The <i>scan-order</i> is defined such that dimensions are iterated
256 class uses an implementation that minimizes speed penalties that could result from
257 non-trivial strides. The <i>scan-order</i> is defined such that dimensions are iterated
258258 from front to back (first to last).
259
260 You normally construct instances of this class by calling \ref MultiArrayView::begin()
261 and \ref MultiArrayView::end().
262
263 The iterator supports all functions listed in the STL documentation for
259
260 You normally construct instances of this class by calling \ref MultiArrayView::begin()
261 and \ref MultiArrayView::end().
262
263 The iterator supports all functions listed in the STL documentation for
264264 <a href="http://www.sgi.com/tech/stl/RandomAccessIterator.html">Random Access Iterators</a>.
265
265
266266 <b>\#include</b> \<vigra/multi_iterator.hxx\><br/>
267267 Namespace: vigra
268268 */
286286 typedef POINTER pointer;
287287 typedef T const * const_pointer;
288288
289 StridedScanOrderIterator()
289 StridedScanOrderIterator()
290290 : base_type()
291291 {}
292292
293293 template <class S>
294 explicit StridedScanOrderIterator(MultiArrayView<N, T, S> const & view)
294 explicit StridedScanOrderIterator(MultiArrayView<N, T, S> const & view)
295295 : base_type(createCoupledIterator(view))
296296 {}
297297
298 StridedScanOrderIterator(POINTER p, shape_type const & shape, shape_type const & strides)
298 StridedScanOrderIterator(POINTER p, shape_type const & shape, shape_type const & strides)
299299 : base_type(createCoupledIterator(MultiArrayView<N, T, StridedArrayTag>(shape, strides, const_cast<T *>(p))))
300300 {}
301301
302 StridedScanOrderIterator(handle_type const & handle)
302 StridedScanOrderIterator(handle_type const & handle)
303303 : base_type(handle)
304304 {}
305305
307307 {
308308 return this->template get<1>();
309309 }
310
310
311311 const_reference operator*() const
312312 {
313313 return this->template get<1>();
317317 {
318318 return &this->template get<1>();
319319 }
320
320
321321 const_pointer operator->() const
322322 {
323323 return &this->template get<1>();
348348 base_type::operator++();
349349 return *this;
350350 }
351
351
352352 StridedScanOrderIterator operator++(int)
353353 {
354354 StridedScanOrderIterator res(*this);
420420 {
421421 return StridedScanOrderIterator(*this) -= coordOffset;
422422 }
423
423
424424 MultiArrayIndex index() const
425425 {
426426 return this->scanOrderIndex();
427427 }
428428
429 StridedScanOrderIterator &
429 StridedScanOrderIterator &
430430 restrictToSubarray(shape_type const & start, shape_type const & stop)
431431 {
432432 base_type::restrictToSubarray(start, stop);
433433 return *this;
434434 }
435
435
436436 protected:
437 StridedScanOrderIterator(base_type const & base)
437 StridedScanOrderIterator(base_type const & base)
438438 : base_type(base)
439439 {}
440440 };
462462
463463 <p>
464464 The Multidimensional Iterator concept allows navigation on arrays
465 of arbitrary dimension. It provides two modes of iteration:
465 of arbitrary dimension. It provides two modes of iteration:
466466 <em>direct traversal</em>, and <em>hierarchical traversal</em>.
467 In general, hierarchical traversal will be faster, while only
467 In general, hierarchical traversal will be faster, while only
468468 direct traversal allows for true random access in all dimensions.
469469 Via the <tt>dim<K>()</tt> function, operations applying to a particular
470470 dimension can be used in the direct traversal mode. In contrast,
584584 navigation functions referring to a particular dimension.<br>
585585 Example (assuming <tt>i, j</tt> are 3-dimensional):<br>
586586 \code
587 i.dim<0>()++; // increment dimension 0
588 i.dim<1>()++; // increment dimension 1
589 i.dim<2>()++; // increment dimension 2
590
587 i.dim<0>()++; // increment dimension 0
588 i.dim<1>()++; // increment dimension 1
589 i.dim<2>()++; // increment dimension 2
590
591591 j += MultiIterator::multi_difference_type(1,1,1); // same effect
592592 \endcode
593593 </td>
604604 Note that it is impossible to support an <tt>operator-</tt> between two iterators which returns
605605 a <tt>MultiIterator::multi_difference_type</tt> because it is impossible to decide to which
606606 dimension a difference applies. Consider for example, a 2-dimensional iterator <tt>i</tt>, and
607 let <tt>j = i + multi_difference_type(width, 0)</tt>, <tt>k = i + multi_difference_type(0,1)</tt>,
608 where <tt>width</tt> is the array's total width. In general, <tt>j</tt> and <tt>k</tt> point to
607 let <tt>j = i + multi_difference_type(width, 0)</tt>, <tt>k = i + multi_difference_type(0,1)</tt>,
608 where <tt>width</tt> is the array's total width. In general, <tt>j</tt> and <tt>k</tt> point to
609609 the same memory location, so that the two cases cannot easily be distinguished (it is possible,
610 but iterator performance will suffer significantly, as is experienced with
610 but iterator performance will suffer significantly, as is experienced with
611611 \ref vigra::ImageIterator where differencing is allowed).
612612 </p>
613613
686686 </tr>
687687 <tr>
688688 <td><tt>i.begin()</tt></td><td><tt>next_type</tt></td>
689 <td>create the hierarchical iterator pointing to the first element in the
689 <td>create the hierarchical iterator pointing to the first element in the
690690 next lower dimension.<br>
691691 <em>Note:</em> The result of this operation is undefined if the iterator
692692 doesn't point to element 0 in all dimensions below its current dimension.<br>
705705 }
706706 }
707707 }
708
708
709709 \endcode
710710 </td>
711711 </tr>
712712 <tr>
713713 <td><tt>i.end()</tt></td><td><tt>next_type</tt></td>
714 <td>create the hierarchical iterator pointing to the past-the-end location in the
714 <td>create the hierarchical iterator pointing to the past-the-end location in the
715715 next lower dimension.<br>
716716 <em>Note:</em> The result of this operation is undefined if the iterator
717717 doesn't point to element 0 in all dimensions below its current dimension.</td>
726726
727727 */
728728
729 /** \addtogroup MultiIteratorGroup
729 /** \addtogroup MultiIteratorGroup
730730 */
731731 //@{
732732
943943
944944 protected:
945945
946 difference_type
946 difference_type
947947 total_stride(typename multi_difference_type::const_iterator d) const
948948 {
949949 return d[level];
11241124
11251125 protected:
11261126
1127 difference_type
1127 difference_type
11281128 total_stride(typename multi_difference_type::const_iterator d) const
11291129 {
11301130 return d[level]*m_stride[level] + base_type::total_stride(d);
11371137 /* */
11381138 /********************************************************/
11391139
1140 /** \brief A multi-dimensional hierarchical iterator to be used with
1140 /** \brief A multi-dimensional hierarchical iterator to be used with
11411141 \ref vigra::MultiArrayView if it is not strided.
11421142
11431143 See \ref MultiIteratorPage for further documentation.
11461146 Namespace: vigra
11471147 */
11481148 template <unsigned int N, class T, class REFERENCE, class POINTER>
1149 class MultiIterator
1149 class MultiIterator
11501150 #ifndef DOXYGEN // doxygen doesn't understand this inheritance
11511151 : public MultiIterator<N-1, T, REFERENCE, POINTER>
11521152 #endif
11561156 /** the type of the parent in the inheritance hierarchy.
11571157 */
11581158 typedef MultiIterator<N-1, T, REFERENCE, POINTER> base_type;
1159
1159
11601160 /** the iterator's level in the dimension hierarchy
11611161 */
11621162 enum { level = N-1 };
11811181 */
11821182 typedef const value_type *const_pointer;
11831183
1184 /** multi difference type
1184 /** multi difference type
11851185 (used for offsetting along all axes simultaneously)
11861186 */
11871187 typedef typename MultiArrayShape<N>::type multi_difference_type;
1188
1188
11891189 /** difference type (used for offsetting)
11901190 */
11911191 #ifndef DOXYGEN
12091209 /** the iterator tag (image traverser)
12101210 */
12111211 typedef multi_dimensional_traverser_tag iterator_category;
1212
1212
12131213 /* use default copy constructor and assignment operator */
12141214
12151215 /** default constructor.
13721372 /** greater than.
13731373 */
13741374 bool operator> (const MultiIterator &rhs) const;
1375
1375
13761376 /** greater or equal.
13771377 */
13781378 bool operator>= (const MultiIterator &rhs) const;
13791379 #endif
13801380
1381 /** access the array element at the given offset in
1381 /** access the array element at the given offset in
13821382 the current dimension.
13831383 */
13841384 reference operator[] (difference_type n) const
13931393 return this->m_ptr [total_stride(d.begin())];
13941394 }
13951395
1396 /** Return the (N-1)-dimensional multi-iterator that points to
1397 the first (N-1)-dimensional subarray of the
1396 /** Return the (N-1)-dimensional multi-iterator that points to
1397 the first (N-1)-dimensional subarray of the
13981398 N-dimensional array this iterator is referring to.
13991399 The result is only valid if this iterator refers to location
14001400 0 in <em>all</em> dimensions below its current dimension N,
14161416 return *this;
14171417 }
14181418
1419 /** Return the (N-1)-dimensional multi-iterator that points beyond
1420 the last (N-1)-dimensional subarray of the
1419 /** Return the (N-1)-dimensional multi-iterator that points beyond
1420 the last (N-1)-dimensional subarray of the
14211421 N-dimensional array this iterator is referring to.
14221422 The result is only valid if this iterator refers to location
14231423 0 in <em>all</em> dimensions below its current dimension N,
14441444 {
14451445 // go down the current column starting at the location of 'outer'
14461446 }
1447 \endcode
1447 \endcode
14481448 */
14491449 iterator iteratorForDimension(unsigned int d) const
14501450 {
14541454 }
14551455 /** Return the multi-iterator that operates on dimension K in order
14561456 to manipulate this dimension directly. Usage:
1457
1457
14581458 \code
1459
1459
14601460 MultiIterator<3, int> i3 = ...;
1461
1461
14621462 i3.template dim<2>()++; // increment outer dimension
14631463 i3.template dim<0>()++; // increment inner dimension
14641464 \endcode
1465
1465
14661466 For convenience, the same functionality is also available
14671467 as <tt>dim0()</tt>, <tt>dim1()</tt> etc. up to <tt>dim4()</tt>:
1468
1468
14691469 \code
1470
1470
14711471 MultiIterator<3, int> i3 = ...;
1472
1472
14731473 i3.dim2()++; // increment outer dimension
14741474 i3.dim0()++; // increment inner dimension
1475 \endcode
1475 \endcode
14761476 */
14771477 template <unsigned int K>
14781478 MultiIterator<K+1, T, REFERENCE, POINTER> &
14941494
14951495 protected:
14961496
1497 difference_type
1497 difference_type
14981498 total_stride(typename multi_difference_type::const_iterator d) const
14991499 {
15001500 return d[level]*this->m_stride[level] + base_type::total_stride(d);
17061706
17071707 protected:
17081708
1709 difference_type
1709 difference_type
17101710 total_stride(typename multi_difference_type::const_iterator d) const
17111711 {
17121712 return d[level] * m_stride;
18871887
18881888 protected:
18891889
1890 difference_type
1890 difference_type
18911891 total_stride(typename multi_difference_type::const_iterator d) const
18921892 {
18931893 return d[level]*m_stride[level] + base_type::total_stride(d);
19001900 /* */
19011901 /********************************************************/
19021902
1903 /** \brief A multi-dimensional hierarchical iterator to be used with
1903 /** \brief A multi-dimensional hierarchical iterator to be used with
19041904 \ref vigra::MultiArrayView if it is not strided.
19051905
19061906 See \ref MultiIteratorPage for further documentation.
19091909 Namespace: vigra
19101910 */
19111911 template <unsigned int N, class T, class REFERENCE, class POINTER>
1912 class StridedMultiIterator
1912 class StridedMultiIterator
19131913 #ifndef DOXYGEN // doxygen doesn't understand this inheritance
19141914 : public StridedMultiIterator<N-1, T, REFERENCE, POINTER>
19151915 #endif
19191919 /** the type of the parent in the inheritance hierarchy.
19201920 */
19211921 typedef StridedMultiIterator<N-1, T, REFERENCE, POINTER> base_type;
1922
1922
19231923 /** the iterator's level in the dimension hierarchy
19241924 */
19251925 enum { level = N-1 };
19441944 */
19451945 typedef const value_type *const_pointer;
19461946
1947 /** multi difference type
1947 /** multi difference type
19481948 (used for offsetting along all axes simultaneously)
19491949 */
19501950 typedef typename MultiArrayShape<N>::type multi_difference_type;
19581958 #else
19591959 typedef MultiArrayIndex difference_type;
19601960 #endif
1961
1961
19621962 /** the StridedMultiIterator for the next lower dimension.
19631963 */
19641964 typedef base_type next_type;
19711971 /** the iterator tag (image traverser)
19721972 */
19731973 typedef multi_dimensional_traverser_tag iterator_category;
1974
1974
19751975 /* use default copy constructor and assignment operator */
19761976
19771977 /** default constructor.
21342134 /** greater than.
21352135 */
21362136 bool operator> (const StridedMultiIterator &rhs) const;
2137
2137
21382138 /** greater or equal.
21392139 */
21402140 bool operator>= (const StridedMultiIterator &rhs) const;
21412141 #endif
21422142
2143 /** access the array element at the given offset in
2143 /** access the array element at the given offset in
21442144 the current dimension.
21452145 */
21462146 reference operator[] (difference_type n) const
21552155 return this->m_ptr [total_stride(d.begin())];
21562156 }
21572157
2158 /** Return the (N-1)-dimensional multi-iterator that points to
2159 the first (N-1)-dimensional subarray of the
2158 /** Return the (N-1)-dimensional multi-iterator that points to
2159 the first (N-1)-dimensional subarray of the
21602160 N-dimensional array this iterator is referring to.
21612161 The result is only valid if this iterator refers to location
21622162 0 in <em>all</em> dimensions below its current dimension N,
21782178 return *this;
21792179 }
21802180
2181 /** Return the (N-1)-dimensional multi-iterator that points beyond
2182 the last (N-1)-dimensional subarray of the
2181 /** Return the (N-1)-dimensional multi-iterator that points beyond
2182 the last (N-1)-dimensional subarray of the
21832183 N-dimensional array this iterator is referring to.
21842184 The result is only valid if this iterator refers to location
21852185 0 in <em>all</em> dimensions below its current dimension N,
22062206 {
22072207 // go down the current column starting at the location of 'outer'
22082208 }
2209 \endcode
2209 \endcode
22102210 */
22112211 iterator iteratorForDimension(unsigned int d) const
22122212 {
22162216 }
22172217 /** Return the multi-iterator that operates on dimension K in order
22182218 to manipulate this dimension directly. Usage:
2219
2219
22202220 \code
2221
2221
22222222 StridedMultiIterator<3, int> i3 = ...;
2223
2223
22242224 i3.template dim<2>()++; // increment outer dimension
22252225 i3.template dim<0>()++; // increment inner dimension
22262226 \endcode
2227
2227
22282228 For convenience, the same functionality is also available
22292229 as <tt>dim0()</tt>, <tt>dim1()</tt> etc. up to <tt>dim4()</tt>:
2230
2230
22312231 \code
2232
2232
22332233 StridedMultiIterator<3, int> i3 = ...;
2234
2234
22352235 i3.dim2()++; // increment outer dimension
22362236 i3.dim0()++; // increment inner dimension
2237 \endcode
2237 \endcode
22382238 */
22392239 template <unsigned int K>
22402240 StridedMultiIterator<K+1, T, REFERENCE, POINTER> &
22562256
22572257 protected:
22582258
2259 difference_type
2259 difference_type
22602260 total_stride(typename multi_difference_type::const_iterator d) const
22612261 {
22622262 return d[level]*this->m_stride[level] + base_type::total_stride(d);
22702270
22712271 namespace std {
22722272
2273 // output the current coordinate of the iterator
2274 // (note: this also works when the iterator is an end-iterator)
22732275 template <unsigned int N, class T, class REFERENCE, class POINTER>
22742276 ostream & operator<<(ostream & o, vigra::StridedScanOrderIterator<N, T, REFERENCE, POINTER> const & i)
22752277 {
2276 o << *i;
2278 o << i.point();
22772279 return o;
22782280 }
22792281
8585 return equal(u_data, v_data, diff);
8686 }
8787 template <class Equal, class Data, class Shape>
88 bool callEqualImpl(Equal& equal, const Data& u_data, const Data& v_data, const Shape& diff, VigraFalseType)
88 bool callEqualImpl(Equal& equal, const Data& u_data, const Data& v_data, const Shape&, VigraFalseType)
8989 {
9090 return equal(u_data, v_data);
9191 }
5151 template<class G>
5252 struct NodeAtBorder{
5353 template<class NODE_ITER>
54 static bool atBorder(const NODE_ITER & node ){
54 static bool atBorder(const NODE_ITER &){
5555 return false;
5656 }
5757 };
4343
4444 namespace vigra {
4545
46 /** \defgroup MultiMathModule vigra::multi_math
47
48 Namespace <tt>vigra::multi_math</tt> holds VIGRA's support for efficient arithmetic and algebraic functions on multi-dimensional arrays (that is, \ref MultiArrayView and its subclasses). All <tt>multi_math</tt> functions operate element-wise. If you need matrix multiplication, use \ref LinearAlgebraModule instead.
49
50 In order to avoid overload ambiguities, multi-array arithmetic must be explicitly activated by
51 \code
52 using namespace vigra::multi_math;
53 \endcode
54 (this should not be done globally, but only in the scope where the functionality is actually used).
55
56 You can then use the standard operators in the expected way:
57 \code
58 MultiArray<2, float> i(Shape2(100, 100)), j(Shape2(100, 100));
59
60 MultiArray<2, float> h = i + 4.0 * j;
61 h += (i.transpose() - j) / 2.0;
62 \endcode
63 etc. (supported operators are <tt>+ - * / ! ~ % && || == != &lt; &lt;= &gt; &gt;= &lt;&lt; &gt;&gt; & | ^ = += -= *= /=</tt>, with both scalar and array arguments).
64
65 Algebraic functions are available as well:
66 \code
67 h = exp(-(sq(i) + sq(j)));
68 h *= atan2(-i, j);
69 \endcode
70 The following functions are implemented: <tt>abs, erf, even, odd, sign, signi, round, roundi, sqrt, sqrti, sq,
71 norm, squaredNorm, gamma, loggamma, exp, log, log10, sin, sin_pi, cos, cos_pi, asin, acos, tan, atan,
72 floor, ceil, conj, real, imag, arg, atan2, pow, fmod, min, max</tt>,
73 provided the array's element type supports the respective function.
74
75 Supported element types currently include the built-in numeric types, \ref TinyVector, \ref RGBValue,
76 <tt>std::complex</tt>, and \ref FFTWComplex.
77
78 In addition, <tt>multi_math</tt> supports a number of functions that reduce arrays to scalars:
79 \code
80 double s = sum<double>(i); // compute the sum of the elements, using 'double' as accumulator type
81 double p = product<double>(abs(i)); // compute the product of the elements' absolute values
82
83 bool a = any(i < 0.0); // check if any element of i is negative
84 bool b = all(i > 0.0); // check if all elements of i are positive
85 \endcode
86
87 Expressions are expanded so that no temporary arrays have to be created. To optimize cache locality,
88 loops are executed in the stride ordering of the left-hand-side array.
89
90 <b>\#include</b> \<vigra/multi_math.hxx\>
91
92 Namespace: vigra::multi_math
93 */
46 // namespace documentation is in multi_array.hxx
9447 namespace multi_math {
9548
9649 template <class ARG>
9750 struct MultiMathOperand
9851 {
9952 typedef typename ARG::result_type result_type;
100
53
10154 static const int ndim = ARG::ndim;
102
55
10356 MultiMathOperand(ARG const & a)
10457 : arg_(a)
10558 {}
106
59
10760 // Check if all arrays involved in the expression have compatible shapes
10861 // (including transparent expansion of singleton axes).
109 // 's' is the shape of the LHS array. If 's' is zero (i.e. the LHS is
62 // 's' is the shape of the LHS array. If 's' is zero (i.e. the LHS is
11063 // not yet initialized), it is set to the maximal RHS shape.
11164 //
11265 template <class SHAPE>
11467 {
11568 return arg_.checkShape(s);
11669 }
117
70
11871 // increment the pointer of all RHS arrays along the given 'axis'
11972 void inc(unsigned int axis) const
12073 {
12174 arg_.inc(axis);
12275 }
123
76
12477 // reset the pointer of all RHS arrays along the given 'axis'
12578 void reset(unsigned int axis) const
12679 {
12780 arg_.reset(axis);
12881 }
129
82
13083 // get the value of the expression at the current pointer location
13184 result_type operator*() const
13285 {
13386 return *arg_;
13487 }
135
88
13689 // get the value of the expression at an offset of the current pointer location
13790 template <class SHAPE>
13891 result_type operator[](SHAPE const & s) const
13992 {
14093 return arg_[s];
14194 }
142
95
14396 ARG arg_;
14497 };
14598
14699 template <unsigned int N, class T, class C>
147100 struct MultiMathOperand<MultiArrayView<N, T, C> >
148101 {
149 typedef MultiMathOperand AllowOverload;
102 typedef MultiMathOperand AllowOverload;
150103 typedef typename MultiArrayShape<N>::type Shape;
151104
152105 typedef T result_type;
153
106
154107 static const int ndim = (int)N;
155
108
156109 MultiMathOperand(MultiArrayView<N, T, C> const & a)
157110 : p_(a.data()),
158111 shape_(a.shape()),
163116 if(shape_[k] == 1)
164117 strides_[k] = 0;
165118 }
166
119
167120 bool checkShape(Shape & s) const
168121 {
169122 // support:
186139 }
187140 return true;
188141 }
189
142
190143 T const & operator[](Shape const & s) const
191144 {
192145 return p_[dot(s, strides_)];
193146 }
194
147
195148 void inc(unsigned int axis) const
196149 {
197150 p_ += strides_[axis];
198151 }
199
152
200153 void reset(unsigned int axis) const
201154 {
202155 p_ -= shape_[axis]*strides_[axis];
203156 }
204
157
205158 result_type operator*() const
206159 {
207160 return *p_;
208161 }
209
162
210163 mutable T const * p_;
211164 Shape shape_, strides_;
212165 };
216169 : public MultiMathOperand<MultiArrayView<N, T, UnstridedArrayTag> >
217170 {
218171 typedef MultiMathOperand AllowOverload;
219
172
220173 MultiMathOperand(MultiArray<N, T, A> const & a)
221174 : MultiMathOperand<MultiArrayView<N, T, UnstridedArrayTag> >(a)
222175 {}
227180 {
228181 typedef MultiMathOperand<T> AllowOverload;
229182 typedef T result_type;
230
183
231184 static const int ndim = 0;
232
185
233186 MultiMathScalarOperand(T const & v)
234187 : v_(v)
235188 {}
236
189
237190 template <class SHAPE>
238191 bool checkShape(SHAPE const &) const
239192 {
240193 return true;
241194 }
242
195
243196 template <class SHAPE>
244197 T const & operator[](SHAPE const &) const
245198 {
246199 return v_;
247200 }
248
201
249202 void inc(unsigned int /* axis */) const
250203 {}
251
204
252205 void reset(unsigned int /* axis */) const
253206 {}
254
207
255208 T const & operator*() const
256209 {
257210 return v_;
258211 }
259
212
260213 T v_;
261214 };
262215
303256 struct MultiMathUnaryOperator
304257 {
305258 typedef typename F::template Result<typename O::result_type>::type result_type;
306
259
307260 static const int ndim = O::ndim;
308
261
309262 MultiMathUnaryOperator(O const & o)
310263 : o_(o)
311264 {}
312
265
313266 template <class SHAPE>
314267 bool checkShape(SHAPE & s) const
315268 {
316269 return o_.checkShape(s);
317270 }
318
271
319272 //
320273 void inc(unsigned int axis) const
321274 {
322275 o_.inc(axis);
323276 }
324
277
325278 void reset(unsigned int axis) const
326279 {
327280 o_.reset(axis);
328281 }
329
282
330283 template <class POINT>
331284 result_type operator[](POINT const & p) const
332285 {
333286 return f_(o_[p]);
334287 }
335
288
336289 result_type operator*() const
337290 {
338291 return f_(*o_);
339292 }
340
293
341294 O o_;
342295 F f_;
343296 };
416369 VIGRA_MULTIMATH_UNARY_OPERATOR(Sqrti, vigra::sqrti, sqrti, T)
417370 VIGRA_MULTIMATH_UNARY_OPERATOR(Sq, vigra::sq, sq, typename NumericTraits<T>::Promote)
418371 VIGRA_MULTIMATH_UNARY_OPERATOR(Norm, vigra::norm, norm, typename NormTraits<T>::NormType)
419 VIGRA_MULTIMATH_UNARY_OPERATOR(SquaredNorm, vigra::squaredNorm, squaredNorm,
372 VIGRA_MULTIMATH_UNARY_OPERATOR(SquaredNorm, vigra::squaredNorm, squaredNorm,
420373 typename NormTraits<T>::SquaredNormType)
421374 VIGRA_MULTIMATH_UNARY_OPERATOR(Sin_pi, vigra::sin_pi, sin_pi, VIGRA_REALPROMOTE)
422375 VIGRA_MULTIMATH_UNARY_OPERATOR(Cos_pi, vigra::cos_pi, cos_pi, VIGRA_REALPROMOTE)
456409 {
457410 typedef typename F::template Result<typename O1::result_type,
458411 typename O2::result_type>::type result_type;
459
412
460413 static const int ndim = O1::ndim > O2::ndim ? O1::ndim : O2::ndim;
461
414
462415 MultiMathBinaryOperator(O1 const & o1, O2 const & o2)
463416 : o1_(o1),
464417 o2_(o2)
465418 {}
466
419
467420 template <class SHAPE>
468421 bool checkShape(SHAPE & s) const
469422 {
470423 return o1_.checkShape(s) && o2_.checkShape(s);
471424 }
472
425
473426 template <class POINT>
474427 result_type operator[](POINT const & p) const
475428 {
476429 return f_(o1_[p], o2_[p]);
477430 }
478
431
479432 void inc(unsigned int axis) const
480433 {
481434 o1_.inc(axis);
482435 o2_.inc(axis);
483436 }
484
437
485438 void reset(unsigned int axis) const
486439 {
487440 o1_.reset(axis);
488441 o2_.reset(axis);
489442 }
490
443
491444 result_type operator*() const
492445 {
493446 return f_(*o1_, *o2_);
494447 }
495
448
496449 O1 o1_;
497450 O2 o2_;
498451 F f_;
501454
502455 // In the sequel, the nested type 'MultiMathOperand<T>::AllowOverload'
503456 // ensures that template functions only participate in overload
504 // resolution when this type is defined, i.e. when T is a number
457 // resolution when this type is defined, i.e. when T is a number
505458 // or array type. It thus prevents 'ambiguous overload' errors.
506459 //
507460 #define VIGRA_MULTIMATH_BINARY_OPERATOR(NAME, FCT, OPNAME, SEP, RESTYPE) \
675628
676629 // We pass 'strideOrder' to the recursion in order to make sure
677630 // that the inner loop iterates over the output's major axis.
678 // Of course, this does not help when the RHS arrays are ordered
631 // Of course, this does not help when the RHS arrays are ordered
679632 // differently -- maybe it is better to find the most common order
680633 // among all arguments (both RHS and LHS)?
681634 //
683636 struct MultiMathExec
684637 {
685638 enum { LEVEL = N-1 };
686
639
687640 template <class T, class Shape, class Expression>
688 static void exec(T * data, Shape const & shape, Shape const & strides,
641 static void exec(T * data, Shape const & shape, Shape const & strides,
689642 Shape const & strideOrder, Expression const & e)
690643 {
691644 MultiArrayIndex axis = strideOrder[LEVEL];
702655 struct MultiMathExec<1, Assign>
703656 {
704657 enum { LEVEL = 0 };
705
658
706659 template <class T, class Shape, class Expression>
707 static void exec(T * data, Shape const & shape, Shape const & strides,
660 static void exec(T * data, Shape const & shape, Shape const & strides,
708661 Shape const & strideOrder, Expression const & e)
709662 {
710663 MultiArrayIndex axis = strideOrder[LEVEL];
766719 struct MultiMathReduce
767720 {
768721 enum { LEVEL = N-1 };
769
722
770723 template <class T, class Shape, class Expression>
771724 static void exec(T & t, Shape const & shape, Expression const & e)
772725 {
782735 struct MultiMathReduce<1, Assign>
783736 {
784737 enum { LEVEL = 0 };
785
738
786739 template <class T, class Shape, class Expression>
787740 static void exec(T & t, Shape const & shape, Expression const & e)
788741 {
817770
818771 template <class U, class T>
819772 U
820 sum(MultiMathOperand<T> const & v, U res = NumericTraits<U>::zero())
821 {
773 sum(MultiMathOperand<T> const & v, U res = NumericTraits<U>::zero())
774 {
822775 static const int ndim = MultiMathOperand<T>::ndim;
823776 typename MultiArrayShape<ndim>::type shape;
824777 v.checkShape(shape);
828781
829782 template <class U, unsigned int N, class T, class S>
830783 U
831 sum(MultiArrayView<N, T, S> const & v, U res = NumericTraits<U>::zero())
832 {
784 sum(MultiArrayView<N, T, S> const & v, U res = NumericTraits<U>::zero())
785 {
833786 return v.template sum<U>() + res;
834787 }
835788
836789 template <class U, class T>
837790 U
838 product(MultiMathOperand<T> const & v, U res = NumericTraits<U>::one())
839 {
791 product(MultiMathOperand<T> const & v, U res = NumericTraits<U>::one())
792 {
840793 static const int ndim = MultiMathOperand<T>::ndim;
841794 typename MultiArrayShape<ndim>::type shape;
842795 v.checkShape(shape);
846799
847800 template <class U, unsigned int N, class T, class S>
848801 U
849 product(MultiArrayView<N, T, S> const & v, U res = NumericTraits<U>::one())
850 {
802 product(MultiArrayView<N, T, S> const & v, U res = NumericTraits<U>::one())
803 {
851804 return v.template product<U>() * res;
852805 }
853806
854807 template <class T>
855808 bool
856 all(MultiMathOperand<T> const & v)
857 {
809 all(MultiMathOperand<T> const & v)
810 {
858811 static const int ndim = MultiMathOperand<T>::ndim;
859812 typename MultiArrayShape<ndim>::type shape;
860813 v.checkShape(shape);
865818
866819 template <class T>
867820 bool
868 any(MultiMathOperand<T> const & v)
869 {
821 any(MultiMathOperand<T> const & v)
822 {
870823 static const int ndim = MultiMathOperand<T>::ndim;
871824 typename MultiArrayShape<ndim>::type shape;
872825 v.checkShape(shape);
123123 template <class SrcIterator, class SrcShape, class SrcAccessor,
124124 class DestIterator, class DestAccessor>
125125 static void
126 exec( SrcIterator s, SrcShape const & shape, SrcAccessor src,
127 DestIterator d, DestAccessor dest, double radius, bool dilation)
126 exec( SrcIterator /*s*/, SrcShape const & /*shape*/, SrcAccessor /*src*/,
127 DestIterator /*d*/, DestAccessor /*dest*/, double /*radius*/, bool /*dilation*/)
128128 {
129129 vigra_fail("multiBinaryMorphology(): Internal error (this function should never be called).");
130130 }
614614 copyMultiArray(MultiArrayView<N, T1, S1> const & source,
615615 MultiArrayView<N, T2, S2> dest)
616616 {
617 for(int k=0; k<N; ++k)
617 for(unsigned k=0; k<N; ++k)
618618 vigra_precondition(source.shape(k) == dest.shape(k) || source.shape(k) == 1 || 1 == dest.shape(k),
619619 "copyMultiArray(): shape mismatch between input and output.");
620620 if(source.shape() == dest.shape())
5050 void
5151 internalResizeMultiArrayOneDimension(
5252 SrcIterator si, Shape const & sshape, SrcAccessor src,
53 DestIterator di, Shape const & dshape, DestAccessor dest,
53 DestIterator di, Shape const & dshape, DestAccessor dest,
5454 Kernel const & spline, unsigned int d)
5555 {
5656 enum { N = 1 + SrcIterator::level };
6262
6363 SNavigator snav( si, sshape, d );
6464 DNavigator dnav( di, dshape, d );
65
65
6666 int ssize = sshape[d];
6767 int dsize = dshape[d];
6868
7474 Rational<int> offset(0);
7575 resampling_detail::MapTargetToSourceCoordinate mapCoordinate(ratio, offset);
7676 int period = lcm(ratio.numerator(), ratio.denominator());
77
77
7878 ArrayVector<double> const & prefilterCoeffs = spline.prefilterCoefficients();
7979 ArrayVector<Kernel1D<double> > kernels(period);
8080 createResamplingKernels(spline, mapCoordinate, kernels);
8383 ArrayVector<TmpType> tmp( ssize );
8484 typename ArrayVector<TmpType>::iterator t = tmp.begin(), tend = tmp.end();
8585 typename AccessorTraits<TmpType>::default_accessor ta;
86
86
8787 for( ; snav.hasMore(); snav++, dnav++ )
8888 {
8989 // first copy source to temp for maximum cache efficiency
102102
103103 } // namespace detail
104104
105 /** \addtogroup GeometricTransformations Geometric Transformations
105 /** \addtogroup GeometricTransformations
106106 */
107107 //@{
108108
121121 \code
122122 namespace vigra {
123123 template <unsigned int N, class T1, class S1,
124 class T2, class S2,
124 class T2, class S2,
125125 class Kernel = BSpline<3, double> >
126126 void
127127 resizeMultiArraySplineInterpolation(MultiArrayView<N, T1, S1> const & source,
224224 doxygen_overloaded_function(template <...> void resizeMultiArraySplineInterpolation)
225225
226226 template <class SrcIterator, class Shape, class SrcAccessor,
227 class DestIterator, class DestAccessor,
227 class DestIterator, class DestAccessor,
228228 class Kernel>
229229 void
230230 resizeMultiArraySplineInterpolation(
231231 SrcIterator si, Shape const & sshape, SrcAccessor src,
232 DestIterator di, Shape const & dshape, DestAccessor dest,
232 DestIterator di, Shape const & dshape, DestAccessor dest,
233233 Kernel const & spline)
234234 {
235235 enum { N = 1 + SrcIterator::level };
236236 typedef typename NumericTraits<typename DestAccessor::value_type>::RealPromote TmpType;
237237 typedef typename AccessorTraits<TmpType>::default_accessor TmpAccessor;
238
238
239239 if(N==1)
240240 {
241 detail::internalResizeMultiArrayOneDimension(si, sshape, src,
241 detail::internalResizeMultiArrayOneDimension(si, sshape, src,
242242 di, dshape, dest, spline, 0);
243243 }
244244 else
248248 tmpShape[d] = dshape[d];
249249 MultiArray<N, TmpType> tmp(tmpShape);
250250 TmpAccessor ta;
251
252 detail::internalResizeMultiArrayOneDimension(si, sshape, src,
251
252 detail::internalResizeMultiArrayOneDimension(si, sshape, src,
253253 tmp.traverser_begin(), tmpShape, ta, spline, d);
254254 d = 1;
255255 for(; d<N-1; ++d)
256256 {
257257 tmpShape[d] = dshape[d];
258258 MultiArray<N, TmpType> dtmp(tmpShape);
259
260 detail::internalResizeMultiArrayOneDimension(tmp.traverser_begin(), tmp.shape(), ta,
259
260 detail::internalResizeMultiArrayOneDimension(tmp.traverser_begin(), tmp.shape(), ta,
261261 dtmp.traverser_begin(), tmpShape, ta, spline, d);
262262 dtmp.swap(tmp);
263263 }
264 detail::internalResizeMultiArrayOneDimension(tmp.traverser_begin(), tmp.shape(), ta,
264 detail::internalResizeMultiArrayOneDimension(tmp.traverser_begin(), tmp.shape(), ta,
265265 di, dshape, dest, spline, d);
266266 }
267267 }
277277 }
278278
279279 template <class SrcIterator, class Shape, class SrcAccessor,
280 class DestIterator, class DestAccessor,
280 class DestIterator, class DestAccessor,
281281 class Kernel>
282282 inline void
283283 resizeMultiArraySplineInterpolation(triple<SrcIterator, Shape, SrcAccessor> src,
299299 }
300300
301301 template <unsigned int N, class T1, class S1,
302 class T2, class S2,
302 class T2, class S2,
303303 class Kernel>
304304 inline void
305305 resizeMultiArraySplineInterpolation(MultiArrayView<N, T1, S1> const & source,
4343
4444 namespace vigra {
4545
46 /** \addtogroup MultiIteratorGroup
46 /** \addtogroup RangesAndPoints
4747 */
4848 //@{
4949
5454 /********************************************************/
5555
5656 template <class T>
57 struct Singleband // the resulting MultiArray has no explicit channel axis
57 struct Singleband // the resulting MultiArray has no explicit channel axis
5858 // (i.e. the number of channels is implicitly one)
5959 {
6060 typedef T value_type;
236236
237237 } // namespace detail
238238
239 /** Traits class for the difference type of all MultiIterator, MultiArrayView, and
239 /** Metafucntion to obtain the difference type of all MultiIterator, MultiArrayView, and
240240 MultiArray variants.
241
242 <b>Usage:</b>
243
244 This metafunction is mainly used in functions weren the array dimension <tt>N</tt> is
245 provided as a templat parameter, and we need a shape object of the corresponding length.
246 Then, a typedef like this is typically placed at the beginning of the function:
247
248 \code
249 typedef typename MultiArrayShape<N>::type Shape;
250
251 Shape shape(1); // all ones of dimension N
252 \endcode
253
254 The following typedefs are provided for convenience:
255
256 \code
257 typedef MultiArrayShape<1>::type Shape1;
258 typedef MultiArrayShape<2>::type Shape2;
259 typedef MultiArrayShape<3>::type Shape3;
260 typedef MultiArrayShape<4>::type Shape4;
261 typedef MultiArrayShape<5>::type Shape5;
262 \endcode
241263 */
242264 template <unsigned int N>
243265 class MultiArrayShape
249271 typedef TinyVector<MultiArrayIndex, N> type;
250272 };
251273
252 typedef MultiArrayShape<1>::type Shape1; ///< shape type for MultiArray<1, T>
253 typedef MultiArrayShape<2>::type Shape2; ///< shape type for MultiArray<2, T>
254 typedef MultiArrayShape<3>::type Shape3; ///< shape type for MultiArray<3, T>
255 typedef MultiArrayShape<4>::type Shape4; ///< shape type for MultiArray<4, T>
256 typedef MultiArrayShape<5>::type Shape5; ///< shape type for MultiArray<5, T>
274 typedef MultiArrayShape<1>::type Shape1;
275 typedef MultiArrayShape<2>::type Shape2;
276 typedef MultiArrayShape<3>::type Shape3;
277 typedef MultiArrayShape<4>::type Shape4;
278 typedef MultiArrayShape<5>::type Shape5;
257279
258280 namespace detail
259281 {
484506 /* */
485507 /********************************************************/
486508
487 /* transforms a coordinate object with negative indices into the corresponding
509 /* transforms a coordinate object with negative indices into the corresponding
488510 'shape - abs(index)'.
489511 */
490512 template <int M>
518540 /* */
519541 /********************************************************/
520542
521 // a border type is a compact bit-wise encoding of the fact that a
543 // a border type is a compact bit-wise encoding of the fact that a
522544 // given coordinate is at the border of the ROI. Each border corresponds
523545 // to one bit in the encoding, e.g. the left, right, top, bottom borders
524 // of a 2D image are represented by bits 0 to 3 respectively.
546 // of a 2D image are represented by bits 0 to 3 respectively.
525547 // If a bit is set, the point in question is at the corresponding border.
526548 // A code of all zeros therefore means that the point is in the interior
527549 // of the ROI
529551 struct BorderTypeImpl
530552 {
531553 typedef TinyVectorView<MultiArrayIndex, N> shape_type;
532
554
533555 static unsigned int exec(shape_type const & point, shape_type const & shape)
534556 {
535557 unsigned int res = BorderTypeImpl<N, DIMENSION-1>::exec(point, shape);
546568 {
547569 typedef TinyVectorView<MultiArrayIndex, N> shape_type;
548570 static const unsigned int DIMENSION = 0;
549
571
550572 static unsigned int exec(shape_type const & point, shape_type const & shape)
551573 {
552574 unsigned int res = 0;
565587 /********************************************************/
566588
567589 // Create the offsets to all direct neighbors, starting from the given Level (=dimension)
568 // and append them to the given array. The algorithm is designed so that the offsets are
590 // and append them to the given array. The algorithm is designed so that the offsets are
569591 // sorted by ascending strides. This has two important consequences:
570592 // * The first half of the array contains the causal neighbors (negative strides),
571593 // the second half the anti-causal ones (positive strides), where 'causal' refers
572594 // to all scan-order predecessors of the center pixel, and 'anticausal' to its successors.
573595 // * For any neighbor k, its opposite (=point-reflected) neighbor is located at index
574596 // 'N-1-k', where N is the total number of neighbors.
575 // The function 'exists' returns an array of flags that contains 'true' when the corresponding
576 // neighbor is inside the ROI for the given borderType, 'false' otherwise.
597 // The function 'exists' returns an array of flags that contains 'true' when the corresponding
598 // neighbor is inside the ROI for the given borderType, 'false' otherwise.
577599 template <unsigned int Level>
578600 struct MakeDirectArrayNeighborhood
579601 {
581603 static void offsets(Array & a)
582604 {
583605 typedef typename Array::value_type Shape;
584
606
585607 Shape point;
586608 point[Level] = -1;
587609 a.push_back(point);
589611 point[Level] = 1;
590612 a.push_back(point);
591613 }
592
614
593615 template <class Array>
594616 static void exists(Array & a, unsigned int borderType)
595617 {
606628 static void offsets(Array & a)
607629 {
608630 typedef typename Array::value_type Shape;
609
631
610632 Shape point;
611633 point[0] = -1;
612634 a.push_back(point);
613635 point[0] = 1;
614636 a.push_back(point);
615637 }
616
638
617639 template <class Array>
618640 static void exists(Array & a, unsigned int borderType)
619641 {
636658 point[Level] = 1;
637659 MakeIndirectArrayNeighborhood<Level-1>::offsets(a, point, false);
638660 }
639
661
640662 template <class Array>
641663 static void exists(Array & a, unsigned int borderType, bool isCenter = true)
642664 {
643665 if((borderType & (1 << 2*Level)) == 0)
644666 MakeIndirectArrayNeighborhood<Level-1>::exists(a, borderType, false);
645 else
667 else
646668 MakeIndirectArrayNeighborhood<Level-1>::markOutside(a);
647669
648670 MakeIndirectArrayNeighborhood<Level-1>::exists(a, borderType, isCenter);
680702 point[0] = 1;
681703 a.push_back(point);
682704 }
683
705
684706 template <class Array>
685707 static void exists(Array & a, unsigned int borderType, bool isCenter = true)
686708 {
695717 template <class Array>
696718 static void markOutside(Array & a)
697719 {
698 // Push 'false' three times, for each possible offset at level 0, whenever the point was
720 // Push 'false' three times, for each possible offset at level 0, whenever the point was
699721 // outside the ROI in one of the higher levels.
700722 a.push_back(false);
701723 a.push_back(false);
703725 }
704726 };
705727
706 // Create the list of neighbor offsets for the given neighborhood type
728 // Create the list of neighbor offsets for the given neighborhood type
707729 // and dimension (the dimension is implicitly defined by the Shape type)
708730 // an return it in 'neighborOffsets'. Moreover, create a list of flags
709731 // for each BorderType that is 'true' when the corresponding neighbor exists
710732 // in this border situation and return the result in 'neighborExists'.
711733 template <class Shape>
712734 void
713 makeArrayNeighborhood(ArrayVector<Shape> & neighborOffsets,
735 makeArrayNeighborhood(ArrayVector<Shape> & neighborOffsets,
714736 ArrayVector<ArrayVector<bool> > & neighborExists,
715737 NeighborhoodType neighborhoodType = DirectNeighborhood)
716738 {
717739 enum { N = Shape::static_size };
718
740
719741 neighborOffsets.clear();
720742 if(neighborhoodType == DirectNeighborhood)
721743 {
726748 Shape point; // represents the center
727749 MakeIndirectArrayNeighborhood<N-1>::offsets(neighborOffsets, point);
728750 }
729
751
730752 unsigned int borderTypeCount = 1 << 2*N;
731753 neighborExists.resize(borderTypeCount);
732754
9292 }
9393
9494 template <int N2>
95 void exec(argument_type const & v, result_type & r, MetaInt<N2>) const
95 void exec(argument_type const &, result_type &, MetaInt<N2>) const
9696 {
9797 vigra_fail("tensorTraceMultiArray(): Sorry, can only handle dimensions up to 3.");
9898 }
127127 }
128128
129129 template <int N2>
130 void exec(argument_type const & v, result_type & r, MetaInt<N2>) const
130 void exec(argument_type const &, result_type &, MetaInt<N2>) const
131131 {
132132 vigra_fail("tensorEigenvaluesMultiArray(): Sorry, can only handle dimensions up to 3.");
133133 }
167167 }
168168
169169 template <int N2>
170 void exec(argument_type const & v, result_type & r, MetaInt<N2>) const
170 void exec(argument_type const &, result_type &, MetaInt<N2>) const
171171 {
172172 vigra_fail("tensorDeterminantMultiArray(): Sorry, can only handle dimensions up to 3.");
173173 }
4949
5050 namespace vigra {
5151
52 /** \addtogroup SeededRegionGrowing
52 /** \addtogroup Superpixels
5353 */
5454 //@{
5555 namespace lemon_graph {
7575 return g.id(*n);
7676 }
7777
78 static index_type invalidIndex(Graph const & g)
78 static index_type invalidIndex(Graph const &)
7979 {
8080 return std::numeric_limits<index_type>::max();
8181 }
9090 typedef UInt16 index_type;
9191
9292 template <class NodeIter, class ArcIter>
93 static index_type get(Graph const & g, NodeIter const &, ArcIter const & a)
93 static index_type get(Graph const &, NodeIter const &, ArcIter const & a)
9494 {
9595 return a.neighborIndex();
9696 }
100100 {
101101 return g.oppositeIndex(a.neighborIndex());
102102 }
103 static index_type invalidIndex(Graph const & g)
103 static index_type invalidIndex(Graph const &)
104104 {
105105 return std::numeric_limits<index_type>::max();
106106 }
137137 template <class Graph, class T1Map, class T2Map, class T3Map>
138138 typename T2Map::value_type
139139 unionFindWatersheds(Graph const & g,
140 T1Map const & data,
140 T1Map const &,
141141 T2Map const & lowestNeighborIndex,
142142 T3Map & labels)
143143 {
218218 return labelGraphWithBackground(g, minima, seeds, MarkerType(0), std::equal_to<MarkerType>());
219219 }
220220
221 #ifdef __GNUC__
222 #pragma GCC diagnostic push
223 #pragma GCC diagnostic ignored "-Wsign-compare"
224 #endif
221225
222226 template <class Graph, class T1Map, class T2Map>
223227 typename T2Map::value_type
322326 return maxRegionLabel;
323327 }
324328
329 #ifdef __GNUC__
330 #pragma GCC diagnostic pop
331 #endif
332
325333 } // namespace graph_detail
326334
327335 template <class Graph, class T1Map, class T2Map>
4040 /************************************************************************/
4141 /* The ONLM filter is described in: */
4242 /* */
43 /* P. Coup���, P. Yger, S. Prima, P. Hellier, C. Kervrann, C. Barillot. */
43 /* P. Coupe, P. Yger, S. Prima, P. Hellier, C. Kervrann, C. Barillot. */
4444 /* An Optimized Blockwise Non Local Means Denoising Filter */
4545 /* for 3D Magnetic Resonance Images */
4646 /* . IEEE Transactions on Medical Imaging, 27(4):425-441, */
151151 return (m > meanRatio_ && m < (1.0 / meanRatio_) && v > varRatio_ && v < (1.0 / varRatio_));
152152 }
153153
154 ValueType distanceToWeight(const PixelType & meanA, const PixelType & varA, const ValueType distance){
154 ValueType distanceToWeight(const PixelType & /*meanA*/, const PixelType & /*varA*/, const ValueType distance){
155155 return exp(-distance /sigmaSquared_);
156156 }
157157
262262
263263 }
264264
265 bool usePixel(const PixelType & meanA, const PixelType & varA)const{
265 bool usePixel(const PixelType & /*meanA*/, const PixelType & varA)const{
266266 return sum(varA)>epsilon_;
267267 }
268268
278278 return (m < meanDist_ && v > varRatio_ && v < (1.0 / varRatio_));
279279 }
280280
281 ValueType distanceToWeight(const PixelType & meanA, const PixelType & varA, const ValueType distance){
281 ValueType distanceToWeight(const PixelType & /*meanA*/, const PixelType & /*varA*/, const ValueType distance){
282282 return exp(-distance /sigmaSquared_);
283283 }
284284
549549 const Coordinate & xyz
550550 ){
551551 Coordinate nxyz(SkipInitialization);
552 const int searchRadius = param_.searchRadius_;
553552 std::fill(average_.begin(),average_.end(),RealPromotePixelType(0.0));
554553 RealPromoteScalarType totalweight = 0.0;
555554
935934 if(param.iterations_>1){
936935
937936 vigra::MultiArray<DIM,PIXEL_TYPE_OUT> tmp(outImage.shape());
938 for(size_t i=0;i<param.iterations_-1;++i){
937 for(size_t i=0;i<static_cast<size_t>(param.iterations_-1);++i){
939938 tmp=outImage;
940939 detail_non_local_means::nonLocalMean1Run<DIM,PIXEL_TYPE_OUT,PIXEL_TYPE_OUT,SMOOTH_POLICY>(tmp,smoothPolicy,param,outImage);
941940 }
12591259 }
12601260
12611261 this->m_stride /= sizeof(value_type);
1262 // make sure that singleton axes have non-zero stride
1263 for(int k=0; k<actual_dimension; ++k)
1264 {
1265 if(this->m_stride[k] == 0)
1266 {
1267 vigra_precondition(this->m_shape[k] == 1,
1268 "NumpyArray::setupArrayView(): only singleton axes may have zero stride.");
1269 this->m_stride[k] = 1;
1270 }
1271 }
1272
12621273 this->m_ptr = reinterpret_cast<pointer>(PyArray_DATA(pyArray()));
12631274 vigra_precondition(this->checkInnerStride(Stride()),
12641275 "NumpyArray<..., UnstridedArrayTag>::setupArrayView(): First dimension of given array is not unstrided (should never happen).");
8989
9090 converter::registration const * reg = converter::registry::query(type_id<ArrayType>());
9191
92 // register the to_python_converter only once
93 // FIXME: I'm not sure if this is correct.
92 // register the converters only once
9493 if(!reg || !reg->rvalue_chain)
9594 {
9695 to_python_converter<ArrayType, NumpyArrayConverter>();
97 }
98 converter::registry::insert(&convertible, &construct, type_id<ArrayType>());
96 converter::registry::insert(&convertible, &construct, type_id<ArrayType>());
97 }
9998 }
10099
101100 template <unsigned int N, class T, class Stride>
807806 template <class Functor>
808807 inline typename std::enable_if<std::is_base_of<PythonMultidefFunctor, Functor>::value,
809808 void>::type
810 def(char const* python_name, Functor const & f)
811 {
812 static_assert(!std::is_base_of<PythonMultidefFunctor, Functor>::value,
809 def(char const*, Functor const &)
810 {
811 static_assert(!std::is_base_of<PythonMultidefFunctor, Functor>::value,
813812 "def(): use multidef() to export multiple overloads.");
814813 }
815814
816815 template <class Functor, class Args>
817816 inline typename std::enable_if<std::is_base_of<PythonMultidefFunctor, Functor>::value,
818817 void>::type
819 def(char const* python_name, Functor const & f, Args const& args)
818 def(char const*, Functor const &, Args const& )
820819 {
821820 static_assert(!std::is_base_of<PythonMultidefFunctor, Functor>::value,
822821 "def(): use multidef() to export multiple overloads.");
825824 template <class Functor>
826825 inline typename std::enable_if<std::is_base_of<PythonMultidefFunctor, Functor>::value,
827826 void>::type
828 def(char const* python_name, Functor const & f, const char * help)
827 def(char const*, Functor const &, const char *)
829828 {
830829 static_assert(!std::is_base_of<PythonMultidefFunctor, Functor>::value,
831830 "def(): use multidef() to export multiple overloads.");
834833 template <class Functor, class Args>
835834 inline typename std::enable_if<std::is_base_of<PythonMultidefFunctor, Functor>::value,
836835 void>::type
837 def(char const* python_name, Functor const & f, Args const& args, const char * help)
836 def(char const*, Functor const &, Args const&, const char *)
838837 {
839838 static_assert(!std::is_base_of<PythonMultidefFunctor, Functor>::value,
840839 "def(): use multidef() to export multiple overloads.");
7171 if(order == "")
7272 order = defaultOrder();
7373 python_ptr arraytype = getArrayTypeObject();
74 python_ptr func(pythonFromData("defaultAxistags"));
75 python_ptr d(pythonFromData(ndim));
76 python_ptr o(pythonFromData(order));
77 python_ptr axistags(PyObject_CallMethodObjArgs(arraytype, func.get(), d.get(), o.get(), NULL),
74 python_ptr func(pythonFromData("defaultAxistags"));
75 python_ptr d(pythonFromData(ndim));
76 python_ptr o(pythonFromData(order));
77 python_ptr axistags(PyObject_CallMethodObjArgs(arraytype, func.get(), d.get(), o.get(), NULL),
7878 python_ptr::keep_count);
7979 if(axistags)
8080 return axistags;
8686 python_ptr emptyAxistags(int ndim)
8787 {
8888 python_ptr arraytype = getArrayTypeObject();
89 python_ptr func(pythonFromData("_empty_axistags"));
90 python_ptr d(pythonFromData(ndim));
91 python_ptr axistags(PyObject_CallMethodObjArgs(arraytype, func.get(), d.get(), NULL),
89 python_ptr func(pythonFromData("_empty_axistags"));
90 python_ptr d(pythonFromData(ndim));
91 python_ptr axistags(PyObject_CallMethodObjArgs(arraytype, func.get(), d.get(), NULL),
9292 python_ptr::keep_count);
9393 if(axistags)
9494 return axistags;
102102 python_ptr object, const char * name,
103103 AxisInfo::AxisType type, bool ignoreErrors)
104104 {
105 python_ptr func(pythonFromData(name));
106 python_ptr t(pythonFromData((long)type));
107 python_ptr permutation(PyObject_CallMethodObjArgs(object, func.get(), t.get(), NULL),
105 python_ptr func(pythonFromData(name));
106 python_ptr t(pythonFromData((long)type));
107 python_ptr permutation(PyObject_CallMethodObjArgs(object, func.get(), t.get(), NULL),
108108 python_ptr::keep_count);
109109 if(!permutation && ignoreErrors)
110110 {
127127 {
128128 python_ptr i(PySequence_GetItem(permutation, k), python_ptr::keep_count);
129129 #if PY_MAJOR_VERSION < 3
130 if(!PyInt_Check(i))
130 if(!PyInt_Check(i))
131131 #else
132 if (!PyLong_Check(i))
132 if (!PyLong_Check(i))
133133 #endif
134 {
134 {
135135 if(ignoreErrors)
136136 return;
137137 std::string message = std::string(name) + "() did not return a sequence of int.";
139139 pythonToCppException(false);
140140 }
141141 #if PY_MAJOR_VERSION < 3
142 res[k] = PyInt_AsLong(i);
142 res[k] = PyInt_AsLong(i);
143143 #else
144 res[k] = PyLong_AsLong(i);
144 res[k] = PyLong_AsLong(i);
145145 #endif
146 }
146 }
147147 res.swap(permute);
148148 }
149149
193193
194194 if(createCopy)
195195 {
196 python_ptr func(pythonFromData("__copy__"));
197 axistags = python_ptr(PyObject_CallMethodObjArgs(tags, func.get(), NULL),
196 python_ptr func(pythonFromData("__copy__"));
197 axistags = python_ptr(PyObject_CallMethodObjArgs(tags, func.get(), NULL),
198198 python_ptr::keep_count);
199199 }
200200 else
209209 return;
210210 if(createCopy)
211211 {
212 python_ptr func(pythonFromData("__copy__"));
213 axistags = python_ptr(PyObject_CallMethodObjArgs(other.axistags, func.get(), NULL),
212 python_ptr func(pythonFromData("__copy__"));
213 axistags = python_ptr(PyObject_CallMethodObjArgs(other.axistags, func.get(), NULL),
214214 python_ptr::keep_count);
215215 }
216216 else
263263 {
264264 if(!axistags)
265265 return;
266 python_ptr d(pythonFromData(description));
267 python_ptr func(pythonFromData("setChannelDescription"));
268 python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), d.get(), NULL),
266 python_ptr d(pythonFromData(description));
267 python_ptr func(pythonFromData("setChannelDescription"));
268 python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), d.get(), NULL),
269269 python_ptr::keep_count);
270270 pythonToCppException(res);
271271 }
274274 {
275275 if(!axistags)
276276 return 0.0;
277 python_ptr func(pythonFromData("resolution"));
278 python_ptr i(pythonFromData(index));
279 python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), i.get(), NULL),
277 python_ptr func(pythonFromData("resolution"));
278 python_ptr i(pythonFromData(index));
279 python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), i.get(), NULL),
280280 python_ptr::keep_count);
281281 pythonToCppException(res);
282282 if(!PyFloat_Check(res))
291291 {
292292 if(!axistags)
293293 return;
294 python_ptr func(pythonFromData("setResolution"));
295 python_ptr i(pythonFromData(index));
296 python_ptr r(PyFloat_FromDouble(resolution), python_ptr::keep_count);
294 python_ptr func(pythonFromData("setResolution"));
295 python_ptr i(pythonFromData(index));
296 python_ptr r(PyFloat_FromDouble(resolution), python_ptr::keep_count);
297297 python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), i.get(), r.get(), NULL),
298298 python_ptr::keep_count);
299299 pythonToCppException(res);
303303 {
304304 if(!axistags)
305305 return;
306 python_ptr func(pythonFromData("scaleResolution"));
307 python_ptr i(pythonFromData(index));
308 python_ptr f(PyFloat_FromDouble(factor), python_ptr::keep_count);
306 python_ptr func(pythonFromData("scaleResolution"));
307 python_ptr i(pythonFromData(index));
308 python_ptr f(PyFloat_FromDouble(factor), python_ptr::keep_count);
309309 python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), i.get(), f.get(), NULL),
310310 python_ptr::keep_count);
311311 pythonToCppException(res);
316316 if(!axistags)
317317 return;
318318 python_ptr func(sign == 1
319 ? pythonFromData("toFrequencyDomain")
320 : pythonFromData("fromFrequencyDomain"));
319 ? pythonFromData("toFrequencyDomain")
320 : pythonFromData("fromFrequencyDomain"));
321321 python_ptr i(pythonFromData(index));
322322 python_ptr s(pythonFromData(size));
323 python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), i.get(), s.get(), NULL),
323 python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), i.get(), s.get(), NULL),
324324 python_ptr::keep_count);
325325 pythonToCppException(res);
326326 }
371371 return;
372372 python_ptr func(pythonFromData("dropChannelAxis"));
373373 python_ptr res(PyObject_CallMethodObjArgs(axistags, func.get(), NULL),
374 python_ptr::keep_count);
374 python_ptr::keep_count);
375375 pythonToCppException(res);
376376 }
377377
3737
3838 #ifndef NPY_NO_DEPRECATED_API
3939 # define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
40 #endif
40 #endif
4141
4242 #include "numerictraits.hxx"
4343 #include "multi_array.hxx"
5656 template<class ValueType>
5757 struct NumpyArrayValuetypeTraits
5858 {
59 static bool isValuetypeCompatible(PyArrayObject const * obj)
59 static bool isValuetypeCompatible(PyArrayObject const *)
6060 {
6161 return ERROR_NumpyArrayValuetypeTraits_not_specialized_for_<ValueType>();
6262 }
203203 {
204204 return isShapeCompatible(obj) && isValuetypeCompatible(obj);
205205 }
206
207 // Construct a tagged shape from a 'shape - axistags' pair (called in
206
207 // Construct a tagged shape from a 'shape - axistags' pair (called in
208208 // NumpyArray::taggedShape()).
209209 template <class U>
210210 static TaggedShape taggedShape(TinyVector<U, N> const & shape, PyAxisTags axistags)
233233 vigra_precondition(tagged_shape.size() == N,
234234 "reshapeIfEmpty(): tagged_shape has wrong size.");
235235 }
236
236
237237 // This function is used to synchronize the axis re-ordering of 'data'
238238 // with that of 'array'. For example, when we want to apply Gaussian smoothing
239239 // with a different scale for each axis, 'data' would contains those scales,
244244 {
245245 vigra_precondition((int)data.size() == N,
246246 "NumpyArray::permuteLikewise(): size mismatch.");
247
247
248248 ArrayVector<npy_intp> permute;
249 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
249 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
250250 AxisInfo::AllAxes, true);
251251
252252 if(permute.size() != 0)
254254 applyPermutation(permute.begin(), permute.end(), data.begin(), res.begin());
255255 }
256256 }
257
257
258258 // This function is called in NumpyArray::setupArrayView() to determine the
259259 // desired axis re-ordering.
260260 template <class U>
261261 static void permutationToSetupOrder(python_ptr array, ArrayVector<U> & permute)
262262 {
263 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
263 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
264264 AxisInfo::AllAxes, true);
265265
266266 if(permute.size() == 0)
279279 T *data, TinyVector<U, N> const & stride)
280280 {
281281 TinyVector<npy_intp, N> npyStride(stride * sizeof(T));
282 return constructNumpyArrayFromData(shape, npyStride.begin(),
282 return constructNumpyArrayFromData(shape, npyStride.begin(),
283283 ValuetypeTraits::typeCode, data);
284284 }
285285 };
300300 long channelIndex = pythonGetAttr(obj, "channelIndex", ndim);
301301 long majorIndex = pythonGetAttr(obj, "innerNonchannelIndex", ndim);
302302 npy_intp * strides = PyArray_STRIDES(array);
303
303
304304 if(channelIndex < ndim)
305305 {
306306 // When we have a channel axis, it will become the innermost dimension
312312 // axis will be the innermost dimension
313313 return (ndim == N && strides[majorIndex] == sizeof(T));
314314 }
315 else
315 else
316316 {
317317 // When we have no axistags, the first axis will be the innermost dimension
318318 return (ndim == N && strides[0] == sizeof(T));
339339 PyObject * obj = (PyObject *)array;
340340 int ndim = PyArray_NDIM(array);
341341 long channelIndex = pythonGetAttr(obj, "channelIndex", ndim);
342
343 // If we have no channel axis (because either we don't have axistags,
342
343 // If we have no channel axis (because either we don't have axistags,
344344 // or the tags do not contain a channel axis), ndim must match.
345345 if(channelIndex == ndim)
346346 return ndim == N;
347
347
348348 // Otherwise, the channel axis must be a singleton axis that we can drop.
349349 return ndim == N+1 && PyArray_DIM(array, channelIndex) == 1;
350350 }
363363 template <class U>
364364 static TaggedShape taggedShape(TinyVector<U, N> const & shape, std::string const & order = "")
365365 {
366 return TaggedShape(shape,
366 return TaggedShape(shape,
367367 PyAxisTags(detail::defaultAxistags(shape.size()+1, order))).setChannelCount(1);
368368 }
369369
382382 "reshapeIfEmpty(): tagged_shape has wrong size.");
383383 }
384384 }
385
385
386386 template <class ARRAY>
387387 static void permuteLikewise(python_ptr array, ARRAY const & data, ARRAY & res)
388388 {
389389 vigra_precondition((int)data.size() == N,
390390 "NumpyArray::permuteLikewise(): size mismatch.");
391
391
392392 ArrayVector<npy_intp> permute;
393 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
393 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
394394 AxisInfo::NonChannel, true);
395395
396396 if(permute.size() == 0)
398398 permute.resize(N);
399399 linearSequence(permute.begin(), permute.end());
400400 }
401
401
402402 applyPermutation(permute.begin(), permute.end(), data.begin(), res.begin());
403403 }
404
404
405405 template <class U>
406406 static void permutationToSetupOrder(python_ptr array, ArrayVector<U> & permute)
407407 {
408 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
408 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
409409 AxisInfo::AllAxes, true);
410410 if(permute.size() == 0)
411411 {
436436 long channelIndex = pythonGetAttr(obj, "channelIndex", ndim);
437437 long majorIndex = pythonGetAttr(obj, "innerNonchannelIndex", ndim);
438438 npy_intp * strides = PyArray_STRIDES(array);
439
439
440440 // If we have no axistags, ndim must match, and axis 0 must be unstrided.
441 if(majorIndex == ndim)
441 if(majorIndex == ndim)
442442 return N == ndim && strides[0] == sizeof(T);
443
444 // If we have axistags, but no channel axis, ndim must match,
443
444 // If we have axistags, but no channel axis, ndim must match,
445445 // and the major non-channel axis must be unstrided.
446 if(channelIndex == ndim)
446 if(channelIndex == ndim)
447447 return N == ndim && strides[majorIndex] == sizeof(T);
448
448
449449 // Otherwise, the channel axis must be a singleton axis that we can drop,
450450 // and the major non-channel axis must be unstrided.
451 return ndim == N+1 && PyArray_DIM(array, channelIndex) == 1 &&
451 return ndim == N+1 && PyArray_DIM(array, channelIndex) == 1 &&
452452 strides[majorIndex] == sizeof(T);
453453 }
454454
473473 int ndim = PyArray_NDIM(array);
474474 long channelIndex = pythonGetAttr(obj, "channelIndex", ndim);
475475 long majorIndex = pythonGetAttr(obj, "innerNonchannelIndex", ndim);
476
476
477477 if(channelIndex < ndim)
478478 {
479479 // When we have a channel axis, ndim must match.
505505 template <class U>
506506 static TaggedShape taggedShape(TinyVector<U, N> const & shape, std::string const & order = "")
507507 {
508 return TaggedShape(shape,
508 return TaggedShape(shape,
509509 PyAxisTags(detail::defaultAxistags(shape.size(), order))).setChannelIndexLast();
510510 }
511511
530530 static void permuteLikewise(python_ptr array, ARRAY const & data, ARRAY & res)
531531 {
532532 ArrayVector<npy_intp> permute;
533
533
534534 if((int)data.size() == N)
535535 {
536536 vigra_precondition(PyArray_NDIM((PyArrayObject*)array.get()) == N,
537537 "NumpyArray::permuteLikewise(): input array has no channel axis.");
538538
539 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
539 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
540540 AxisInfo::AllAxes, true);
541541
542542 if(permute.size() == 0)
548548 {
549549 // rotate channel axis to last position
550550 int channelIndex = permute[0];
551 for(int k=1; k<N; ++k)
551 for(unsigned k=1; k<N; ++k)
552552 permute[k-1] = permute[k];
553553 permute[N-1] = channelIndex;
554554 }
558558 vigra_precondition((int)data.size() == N-1,
559559 "NumpyArray::permuteLikewise(): size mismatch.");
560560
561 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
561 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
562562 AxisInfo::NonChannel, true);
563563
564564 if(permute.size() == 0)
567567 linearSequence(permute.begin(), permute.end());
568568 }
569569 }
570
570
571571 applyPermutation(permute.begin(), permute.end(), data.begin(), res.begin());
572572 }
573
573
574574 template <class U>
575575 static void permutationToSetupOrder(python_ptr array, ArrayVector<U> & permute)
576576 {
577 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
577 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
578578 AxisInfo::AllAxes, true);
579579
580580 if(permute.size() == 0)
586586 {
587587 // if we have a channel axis, rotate it to last position
588588 int channelIndex = permute[0];
589 for(int k=1; k<N; ++k)
589 for(decltype(permute.size()) k=1; k<N; ++k)
590590 permute[k-1] = permute[k];
591591 permute[N-1] = channelIndex;
592592 }
659659 static bool isShapeCompatible(PyArrayObject * array) /* array must not be NULL */
660660 {
661661 PyObject * obj = (PyObject *)array;
662
662
663663 // We need an extra channel axis.
664664 if(PyArray_NDIM(array) != N+1)
665665 return false;
666
666
667667 // When there are no axistags, we assume that the last axis represents the channels.
668668 long channelIndex = pythonGetAttr(obj, "channelIndex", N);
669669 npy_intp * strides = PyArray_STRIDES(array);
670
671 return PyArray_DIM(array, channelIndex) == M && strides[channelIndex] == sizeof(T);
670
671 // find the non-channel axis with smallest stride
672 long majorIndex = pythonGetAttr(obj, "innerNonchannelIndex", N+1);
673 if(majorIndex >= N+1)
674 {
675 npy_intp smallest = NumericTraits<npy_intp>::max();
676 for(unsigned int k=0; k<N+1; ++k)
677 {
678 if(k == channelIndex)
679 continue;
680 if(strides[k] < smallest)
681 {
682 smallest = strides[k];
683 majorIndex = k;
684 }
685 }
686 }
687
688 return PyArray_DIM(array, channelIndex) == M &&
689 strides[channelIndex] == sizeof(T) &&
690 strides[majorIndex] % (M*sizeof(T)) == 0;
672691 }
673692
674693 static bool isPropertyCompatible(PyArrayObject * obj) /* obj must not be NULL */
685704 template <class U>
686705 static TaggedShape taggedShape(TinyVector<U, N> const & shape, std::string const & order = "")
687706 {
688 return TaggedShape(shape,
707 return TaggedShape(shape,
689708 PyAxisTags(detail::defaultAxistags(shape.size()+1, order))).setChannelCount(M);
690709 }
691710
701720 {
702721 vigra_precondition((int)data.size() == N,
703722 "NumpyArray::permuteLikewise(): size mismatch.");
704
723
705724 ArrayVector<npy_intp> permute;
706 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
725 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
707726 AxisInfo::NonChannel, true);
708727
709728 if(permute.size() == 0)
711730 permute.resize(N);
712731 linearSequence(permute.begin(), permute.end());
713732 }
714
733
715734 applyPermutation(permute.begin(), permute.end(), data.begin(), res.begin());
716735 }
717
736
718737 template <class U>
719738 static void permutationToSetupOrder(python_ptr array, ArrayVector<U> & permute)
720739 {
721 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
740 detail::getAxisPermutationImpl(permute, array, "permutationToNormalOrder",
722741 AxisInfo::AllAxes, true);
723742 if(permute.size() == 0)
724743 {
730749 permute.erase(permute.begin());
731750 }
732751 }
733
752
734753 template <class U>
735754 static python_ptr unsafeConstructorFromData(TinyVector<U, N> const & shape,
736755 value_type *data, TinyVector<U, N> const & stride)
745764 std::bind2nd(std::multiplies<npy_intp>(), sizeof(value_type)));
746765 npyStride[N] = sizeof(T);
747766
748 return constructNumpyArrayFromData(npyShape, npyStride.begin(),
767 return constructNumpyArrayFromData(npyShape, npyStride.begin(),
749768 ValuetypeTraits::typeCode, data);
750769 }
751770 };
764783 {
765784 PyObject * obj = (PyObject *)array;
766785 int ndim = PyArray_NDIM(array);
767
768 // We need an extra channel axis.
786
787 // We need an extra channel axis.
769788 if(ndim != N+1)
770789 return false;
771
790
772791 long channelIndex = pythonGetAttr(obj, "channelIndex", ndim);
773792 long majorIndex = pythonGetAttr(obj, "innerNonchannelIndex", ndim);
774793 npy_intp * strides = PyArray_STRIDES(array);
775
794
776795 if(majorIndex < ndim)
777796 {
778797 // We have axistags, but no channel axis => cannot be a TinyVector image
779798 if(channelIndex == ndim)
780799 return false;
781
800
782801 // We have an explicit channel axis => shapes and strides must match
783 return PyArray_DIM(array, channelIndex) == M &&
802 return PyArray_DIM(array, channelIndex) == M &&
784803 strides[channelIndex] == sizeof(T) &&
785804 strides[majorIndex] == sizeof(TinyVector<T, M>);
786
787
805
806
788807 }
789808 else
790809 {
791810 // we have no axistags => we assume that the channel axis is last
792 return PyArray_DIM(array, N) == M &&
811 return PyArray_DIM(array, N) == M &&
793812 strides[N] == sizeof(T) &&
794813 strides[0] == sizeof(TinyVector<T, M>);
795814 }
362362 }
363363 }
364364
365 ResultType operator()(int x, int y, VectorType const &) const
365 ResultType operator()(int /*x*/, int /*y*/, VectorType const &) const
366366 {
367367 // isotropic filtering
368368 return weights_(radius_, radius_);
0 #ifndef VIGRA_PERMUTATION_HXX
1 #define VIGRA_PERMUTATION_HXX
2
3 #include "config.hxx"
4 #include "error.hxx"
5 #include "array_vector.hxx"
6 #include "tinyvector.hxx"
7
8 namespace vigra {
9
10 template <unsigned int N>
11 class Permutation : public TinyVector<unsigned int, N>
12 {
13 public:
14 typedef TinyVector<unsigned int, N> base_type;
15 typedef typename base_type::size_type size_type;
16 typedef typename base_type::value_type value_type;
17 typedef typename base_type::iterator iterator;
18 typedef typename base_type::const_iterator const_iterator;
19 typedef typename base_type::reference reference;
20 typedef typename base_type::const_reference const_reference;
21 typedef typename base_type::pointer pointer;
22 typedef typename base_type::const_pointer const_pointer;
23 typedef int integral_type;
24
25 Permutation() : base_type() {}
26
27 Permutation(const Permutation<N-1> & other, const size_type index)
28 : base_type()
29 {
30 vigra_precondition(
31 index < N,
32 "Permutation::Permutation(): Invalid index");
33 for (size_type n = 0; n < N; n++)
34 {
35 if (n < index)
36 {
37 (*this)[n] = other[n];
38 }
39 else if (n == index)
40 {
41 (*this)[n] = N - 1;
42 }
43 else
44 {
45 (*this)[n] = other[n-1];
46 }
47 }
48 if ((N - 1 - index) % 2 == 0)
49 {
50 sign_ = (other.sign() == 1);
51 }
52 else
53 {
54 sign_ = (other.sign() == -1);
55 }
56 }
57
58 integral_type sign() const
59 {
60 if (sign_)
61 {
62 return +1;
63 }
64 else
65 {
66 return -1;
67 }
68 }
69
70 private:
71 bool sign_;
72
73 };
74
75 template <>
76 class Permutation<1> : public TinyVector<unsigned int, 1>
77 {
78 public:
79 typedef TinyVector<unsigned int, 1> base_type;
80 typedef base_type::size_type size_type;
81 typedef base_type::value_type value_type;
82 typedef base_type::iterator iterator;
83 typedef base_type::const_iterator const_iterator;
84 typedef base_type::reference reference;
85 typedef base_type::const_reference const_reference;
86 typedef base_type::pointer pointer;
87 typedef base_type::const_pointer const_pointer;
88 typedef int integral_type;
89
90 Permutation() : base_type()
91 {
92 (*this)[0] = 0;
93 (*this).sign_ = true;
94 }
95
96 integral_type sign() const
97 {
98 if (sign_)
99 {
100 return +1;
101 }
102 else
103 {
104 return -1;
105 }
106 }
107
108 private:
109 bool sign_;
110 };
111
112 template <unsigned int N>
113 class PlainChangesPermutations : public ArrayVector<Permutation<N> >
114 {
115 public:
116 typedef ArrayVector<Permutation<N> > base_type;
117 typedef typename base_type::size_type size_type;
118 typedef typename base_type::value_type value_type;
119 typedef typename base_type::iterator iterator;
120 typedef typename base_type::const_iterator const_iterator;
121 typedef typename base_type::reference reference;
122 typedef typename base_type::const_reference const_reference;
123 typedef typename base_type::pointer pointer;
124 typedef typename base_type::const_pointer const_pointer;
125
126 PlainChangesPermutations() : base_type()
127 {
128 PlainChangesPermutations<N-1> permutations;
129 for (auto permutation : permutations)
130 {
131 if (permutation.sign() == -1)
132 {
133 for (unsigned int n = 0; n < N; n++)
134 {
135 this->push_back(Permutation<N>(permutation, n));
136 }
137 }
138 else
139 {
140 for (unsigned int n = N; n > 0; n--)
141 {
142 this->push_back(Permutation<N>(permutation, n - 1));
143 }
144 }
145 }
146 }
147 };
148
149 template <>
150 class PlainChangesPermutations<1> : public ArrayVector<Permutation<1> >
151 {
152 public:
153 typedef ArrayVector<Permutation<1> > base_type;
154 typedef base_type::size_type size_type;
155 typedef base_type::value_type value_type;
156 typedef base_type::iterator iterator;
157 typedef base_type::const_iterator const_iterator;
158 typedef base_type::reference reference;
159 typedef base_type::const_reference const_reference;
160 typedef base_type::pointer pointer;
161 typedef base_type::const_pointer const_pointer;
162
163 PlainChangesPermutations() : base_type()
164 {
165 this->push_back(Permutation<1>());
166 }
167 };
168
169 } /* namespace vigra */
170
171 #endif
0 #ifndef VIGRA_POLYTOPE_HXX
1 #define VIGRA_POLYTOPE_HXX
2
3 #ifndef WITH_LEMON
4 #error "Should only be included with flag \"WITH_LEMON\""
5 #endif
6
7 #include <set>
8 #include <lemon/list_graph.h>
9 #include <lemon/maps.h>
10
11 #include "config.hxx"
12 #include "error.hxx"
13 #include "tinyvector.hxx"
14 #include "array_vector.hxx"
15 #include "linear_algebra.hxx"
16 #include "numerictraits.hxx"
17 #include "permutation.hxx"
18
19 namespace vigra {
20
21 /** \brief Represent an n-dimensional polytope.
22
23 \tparam N Dimension the polytope.
24 \tparam T Type of the vector components of the polytope vertices.
25 */
26 template <unsigned int N, class T>
27 class Polytope
28 {
29 public:
30
31 enum Dimension {dimension = N};
32 enum node_enum {INVALID, FACET, VERTEX};
33
34 template <node_enum NodeType>
35 struct node_type_iterator;
36
37 typedef T coordinate_type;
38 typedef typename NumericTraits<T>::RealPromote real_type;
39 typedef TinyVector<T, N> point_type;
40 typedef TinyVectorView<T, N> point_view_type;
41 typedef typename point_type::difference_type difference_type;
42 typedef typename lemon::ListDigraph graph_type;
43 typedef typename graph_type::Node node_type;
44 typedef typename graph_type::Arc arc_type;
45 typedef typename graph_type::NodeIt node_iterator;
46 typedef typename graph_type::OutArcIt out_arc_iterator;
47 typedef typename graph_type::InArcIt in_arc_iterator;
48 typedef node_type_iterator<FACET> facet_iterator;
49 typedef node_type_iterator<VERTEX> vertex_iterator;
50
51 /** Default constructor creates an empty polytope class.
52 */
53 Polytope()
54 : graph_()
55 , type_map_(graph_)
56 , vec_map_(graph_)
57 , aligns_map_(graph_)
58 {}
59
60 /** Copy constructor.
61 */
62 Polytope(const Polytope<N, T> & other)
63 : graph_()
64 , type_map_(graph_)
65 , vec_map_(graph_)
66 , aligns_map_(graph_)
67 {
68 *this = other;
69 }
70
71 /** Copy from another polytope.
72 */
73 virtual void operator=(const Polytope<N, T> & other)
74 {
75 lemon::digraphCopy(other.graph_, graph_);
76 lemon::mapCopy(other.graph_, other.type_map_, type_map_);
77 lemon::mapCopy(other.graph_, other.vec_map_, vec_map_);
78 lemon::mapCopy(other.graph_, other.aligns_map_, aligns_map_);
79 }
80
81 virtual bool contains(const point_view_type & p) const = 0;
82
83 virtual real_type nVolume() const = 0;
84
85 virtual real_type nSurface() const = 0;
86
87 /** Check if the facet aligns with other facets at each of its ridges.
88 */
89 virtual bool closed(const node_type n) const
90 {
91 vigra_assert(
92 type_map_[n] == FACET,
93 "Polytope::closed(): Node needs do be a facet");
94 return std::find(
95 aligns_map_[n].begin(),
96 aligns_map_[n].end(),
97 lemon::INVALID) == aligns_map_[n].end();
98 }
99
100 /** Check if the polytope has a closed surface
101 */
102 virtual bool closed() const
103 {
104 for (facet_iterator n(graph_, type_map_); n != lemon::INVALID; ++n)
105 {
106 if (!(this->closed(n)))
107 {
108 return false;
109 }
110 }
111 return true;
112 }
113
114
115 /** Add a vertex to the polytope.
116 */
117 virtual node_type addVertex(const point_view_type & p)
118 {
119 node_type ret = graph_.addNode();
120 type_map_[ret] = VERTEX;
121 vec_map_[ret] = p;
122 for (int i = 0; i < N; i++)
123 {
124 aligns_map_[ret][i] = lemon::INVALID;
125 }
126 return ret;
127 }
128
129 /** Erase a facet.
130 */
131 virtual void eraseFacet(const node_type u)
132 {
133 vigra_assert(
134 type_map_[u] == FACET,
135 "Polytope::eraseFacet(): Node needs to be a facet");
136 for (auto neighbor : aligns_map_[u])
137 {
138 if (neighbor != lemon::INVALID)
139 {
140 auto it = std::find(
141 aligns_map_[neighbor].begin(),
142 aligns_map_[neighbor].end(),
143 u);
144 vigra_assert(
145 it != aligns_map_[neighbor].end(),
146 "Polytope::eraseFacet(): Inconsistent aligns map");
147 *it = lemon::INVALID;
148 }
149 }
150 graph_.erase(u);
151 }
152
153 /** Get the connected elements in the graph that represents the polytope.
154 If a facet node is inserted, all of its vertices will be returned, if
155 a vertex node is inserted, all facets having this vertex will be
156 returned.
157 */
158 virtual std::set<node_type> getConnected(const node_type u) const
159 {
160 std::set<node_type> ret;
161 if (type_map_[u] == FACET)
162 {
163 for (out_arc_iterator a(graph_, u); a != lemon::INVALID; ++a)
164 {
165 ret.insert(graph_.target(a));
166 }
167 }
168 else
169 {
170 for (in_arc_iterator a(graph_, u); a != lemon::INVALID; ++a)
171 {
172 ret.insert(graph_.source(a));
173 }
174 }
175 return ret;
176 }
177
178 // TODO remove
179 virtual ArrayVector<point_view_type> getVertices(const node_type u) const
180 {
181 vigra_assert(
182 type_map_[u] == FACET,
183 "Polytope::getVertices(): Node must be a facet");
184 ArrayVector<point_view_type> ret;
185 for (out_arc_iterator a(graph_, u); a != lemon::INVALID; ++a)
186 {
187 ret.push_back(vec_map_[graph_.target(a)]);
188 }
189 return ret;
190 }
191
192 /** Get all facets whose normal has a positive scalar product with the
193 vector to the given vertex.
194 */
195 virtual ArrayVector<node_type> litFacets(const point_view_type & p) const
196 {
197 ArrayVector<node_type> ret;
198 for (facet_iterator n(graph_, type_map_); n != lemon::INVALID; ++n)
199 {
200 if (distance(n, p) > 0)
201 {
202 ret.push_back(n);
203 }
204 }
205 return ret;
206 }
207
208 /** Remove all vertices that are not part of the polytope mesh.
209 */
210 virtual void tidyUp()
211 {
212 std::set<node_type> to_erase;
213 for (vertex_iterator v(graph_, type_map_); v != lemon::INVALID; ++v)
214 {
215 vigra_assert(
216 type_map_[v] == VERTEX,
217 "Polytope::tidyUp(): vertex not a vertex");
218 in_arc_iterator a(graph_, v);
219 if (a == lemon::INVALID)
220 {
221 to_erase.insert(v);
222 }
223 }
224 for (node_type v : to_erase)
225 {
226 graph_.erase(v);
227 }
228 }
229
230 /** Get the distance between a facet and a vertex */
231 virtual real_type distance(const node_type u, const point_view_type & p) const
232 {
233 vigra_assert(
234 type_map_[u] == FACET,
235 "Polytope::distance(): Node must be a facet");
236 out_arc_iterator a(graph_, u);
237 vigra_assert(
238 a != lemon::INVALID,
239 "Polytope::distance(): Invalid facet");
240
241 return dot(p - vec_map_[graph_.target(a)], vec_map_[u]);
242 }
243
244 /** Label all elements in the array which are inside the polytope.
245 */
246 virtual unsigned int fill(
247 MultiArrayView<N, unsigned int> & array,
248 const unsigned int label,
249 const point_view_type offset,
250 const point_view_type scale) const
251 {
252 typedef MultiArrayView<N, unsigned int> array_type;
253
254 unsigned int ret = 0;
255 typename array_type::iterator it = array.begin();
256 for (it = array.begin(); it != array.end(); it++)
257 {
258 const typename array_type::difference_type coord = it.template get<0>();
259 point_type vec;
260 for (unsigned int i = 0; i < vec.size(); i++)
261 {
262 vec[i] = coord[i]*scale[i] + offset[i];
263 }
264 if (this->contains(vec))
265 {
266 ret++;
267 *it = label;
268 }
269 }
270 return ret;
271 }
272
273 /** Label all elements in the array which are inside the polytope.
274 */
275 virtual unsigned int fill(
276 MultiArrayView<N, unsigned int> & array,
277 const unsigned int label,
278 const point_view_type offset) const
279 {
280 vigra_assert(
281 closed(),
282 "Polytope::fill(): Polytope not closed.");
283 typedef MultiArrayView<N, unsigned int> array_type;
284
285 unsigned int ret = 0;
286 typename array_type::iterator it = array.begin();
287 for (it = array.begin(); it != array.end(); it++)
288 {
289 const typename array_type::difference_type coord = it.template get<0>();
290 point_type vec;
291 for (unsigned int i = 0; i < vec.size(); i++)
292 {
293 vec[i] = coord[i] + offset[i];
294 }
295 if (this->contains(vec))
296 {
297 ret++;
298 *it = label;
299 }
300 }
301 return ret;
302 }
303
304 protected:
305
306 virtual bool isConnected(
307 const node_type vertex,
308 const node_type facet) const
309 {
310 vigra_assert(
311 type_map_[vertex] == VERTEX,
312 "Polytope::isConnected(): First node must be a vertex");
313 vigra_assert(
314 type_map_[facet] == FACET,
315 "Polytope::isConnected(): Second node must be a facet");
316 for (out_arc_iterator a(graph_, facet); a != lemon::INVALID; ++a)
317 {
318 if (graph_.target(a) == vertex)
319 {
320 return true;
321 }
322 }
323 return false;
324 }
325
326 virtual node_type findNeighbor(
327 const node_type u,
328 const difference_type index) const
329 {
330 vigra_assert(
331 type_map_[u] == FACET,
332 "Polytope::findNeighbor(): Node must be a facet");
333 vigra_assert(
334 index < dimension,
335 "Polytope::findNeighbor(): Invalid index");
336 vigra_assert(
337 countOutArcs(graph_, u) == dimension,
338 "Polytope::findNeighbor(): Bad facet");
339 out_skip_iterator a(graph_, u, index);
340 const node_type first_vertex = graph_.target(a);
341 for (node_type candidate : getConnected(first_vertex))
342 {
343 if (candidate != u)
344 {
345 out_skip_iterator b(a);
346 do
347 {
348 ++b;
349 if (b == lemon::INVALID)
350 {
351 return candidate;
352 }
353 } while (isConnected(graph_.target(b), candidate));
354 }
355 }
356 return lemon::INVALID;
357 }
358
359 void assignNeighbors(const node_type u)
360 {
361 vigra_assert(
362 type_map_[u] == FACET,
363 "Polytope::assignNeighbors(): Node must be facet");
364 for (int i = 0; i < dimension; i++)
365 {
366 aligns_map_[u][i] = this->findNeighbor(u, i);
367 }
368 }
369
370 void updateInvalidNeighbors(const node_type u)
371 {
372 vigra_assert(
373 type_map_[u] == FACET,
374 "Polytope::assignNeighbors(): Node must be facet");
375 for (int i = 0; i < dimension; i++)
376 {
377 if (aligns_map_[u][i] == lemon::INVALID)
378 {
379 aligns_map_[u][i] = this->findNeighbor(u, i);
380 }
381 }
382 }
383
384 ArrayVector<node_type> openEdge(const node_type u)
385 {
386 vigra_assert(
387 type_map_[u] == FACET,
388 "Polytope::openEdge(): Node must be facet");
389 vigra_assert(
390 lemon::countOutArcs(graph_, u) == dimension,
391 "Polytope::openEdge(): Got invalid facet");
392 ArrayVector<node_type> ret;
393 for (int i = 0; i < dimension; i++)
394 {
395 if (aligns_map_[u][i] == lemon::INVALID)
396 {
397 for (out_skip_iterator a(graph_, u, i); a != lemon::INVALID; ++a)
398 {
399 ret.push_back(graph_.target(a));
400 }
401 return ret;
402 }
403 }
404 return ret;
405 }
406
407 public:
408
409 template <node_enum NodeType>
410 struct node_type_iterator : public node_type
411 {
412 node_type_iterator()
413 {}
414
415 explicit node_type_iterator(
416 const graph_type & graph,
417 const typename graph_type::NodeMap<node_enum> & type_map)
418 : graph_(graph)
419 , type_map_(type_map)
420 {
421 graph_.first(static_cast<node_type &>(*this));
422 while (*this != lemon::INVALID && type_map_[*this] != NodeType)
423 {
424 graph_.next(*this);
425 }
426 }
427
428 node_type_iterator<NodeType> & operator++()
429 {
430 while (*this != lemon::INVALID)
431 {
432 graph_.next(*this);
433 if (type_map_[*this] == NodeType)
434 {
435 return *this;
436 }
437 }
438 return *this;
439 }
440
441 bool operator==(lemon::Invalid i) const
442 {
443 return (static_cast<node_type>(*this) == i);
444 }
445
446 bool operator!=(lemon::Invalid i) const
447 {
448 return (static_cast<node_type>(*this) != i);
449 }
450
451 const graph_type & graph_;
452 const typename graph_type::NodeMap<node_enum> & type_map_;
453 };
454
455 struct out_skip_iterator : public arc_type
456 {
457 out_skip_iterator()
458 {}
459
460 explicit out_skip_iterator(
461 const graph_type & graph,
462 const node_type & node,
463 const difference_type skip)
464 : graph_(graph)
465 , skip_(skip)
466 , index_(0)
467 {
468 graph_.firstOut(*this, node);
469 if (skip_ == 0)
470 {
471 graph_.nextOut(*this);
472 }
473 }
474
475 out_skip_iterator & operator++()
476 {
477 ++index_;
478 graph_.nextOut(*this);
479 if (index_ == skip_)
480 {
481 graph_.nextOut(*this);
482 }
483 return *this;
484 }
485
486 bool operator==(lemon::Invalid i) const
487 {
488 return (static_cast<arc_type>(*this) == i);
489 }
490
491 bool operator!=(lemon::Invalid i) const
492 {
493 return (static_cast<arc_type>(*this) != i);
494 }
495
496 difference_type index() const
497 {
498 return index_;
499 }
500
501 const graph_type & graph_;
502 const difference_type skip_;
503 difference_type index_;
504 };
505
506 graph_type graph_;
507 typename graph_type::NodeMap<node_enum> type_map_;
508 typename graph_type::NodeMap<point_type> vec_map_;
509 typename graph_type::NodeMap<TinyVector<node_type, N> > aligns_map_;
510 };
511
512 /** \brief Specialization of the polytope to polytopes which forms a star
513 domain.
514 */
515 template <unsigned int N, class T>
516 class StarPolytope : public Polytope<N, T>
517 {
518 public:
519
520 typedef Polytope<N, T> base_type;
521 typedef typename base_type::coordinate_type coordinate_type;
522 typedef typename base_type::real_type real_type;
523 typedef typename base_type::point_type point_type;
524 typedef typename base_type::point_view_type point_view_type;
525 typedef typename base_type::difference_type difference_type;
526 typedef typename base_type::graph_type graph_type;
527 typedef typename base_type::node_type node_type;
528 typedef typename base_type::arc_type arc_type;
529 typedef typename base_type::node_iterator node_iterator;
530 typedef typename base_type::in_arc_iterator in_arc_iterator;
531 typedef typename base_type::out_arc_iterator out_arc_iterator;
532 typedef typename base_type::out_skip_iterator out_skip_iterator;
533 typedef typename base_type::facet_iterator facet_iterator;
534 typedef typename base_type::vertex_iterator vertex_iterator;
535
536 using base_type::dimension;
537 using base_type::graph_;
538 using base_type::vec_map_;
539 using base_type::type_map_;
540 using base_type::aligns_map_;
541 using base_type::INVALID;
542 using base_type::FACET;
543 using base_type::VERTEX;
544
545 public:
546
547 /** Constructor creates an empty StarPolytope with its center a the orign.
548 */
549 StarPolytope()
550 : base_type()
551 , center_(point_type())
552 {}
553
554 /** Copy constructor.
555 */
556 StarPolytope(const point_view_type & center)
557 : base_type()
558 , center_(center)
559 {}
560
561 /** Constructor for the 2-dimensional case taking three vertices and the
562 center.
563 */
564 StarPolytope(
565 const point_view_type & a,
566 const point_view_type & b,
567 const point_view_type & c,
568 const point_view_type & center)
569 : base_type()
570 , center_(center)
571 {
572 vigra_precondition(
573 dimension == 2,
574 "StarPolytope::StarPolytope(): Signature only for use in 2D");
575 node_type na = this->addVertex(a);
576 node_type nb = this->addVertex(b);
577 node_type nc = this->addVertex(c);
578 this->addFacet(nb, nc);
579 this->addFacet(na, nc);
580 this->addFacet(na, nb);
581 }
582
583 /** Constructor for the 3-dimensional case taking four vertices and the
584 center.
585 */
586 StarPolytope(
587 const point_view_type & a,
588 const point_view_type & b,
589 const point_view_type & c,
590 const point_view_type & d,
591 const point_view_type & center)
592 : base_type()
593 , center_(center)
594 {
595 vigra_precondition(
596 dimension == 3,
597 "StarPolytope::StarPolytope(): Signature only for use in 3D");
598 node_type na = this->addVertex(a);
599 node_type nb = this->addVertex(b);
600 node_type nc = this->addVertex(c);
601 node_type nd = this->addVertex(d);
602 this->addFacet(nb, nc, nd);
603 this->addFacet(na, nc, nd);
604 this->addFacet(na, nb, nd);
605 this->addFacet(na, nb, nc);
606 }
607
608 /** Get the center of the star domain.
609 */
610 virtual point_type getCenter() const
611 {
612 return center_;
613 }
614
615 virtual void assignNormal(const node_type & u)
616 {
617 vigra_assert(
618 type_map_[u] == FACET,
619 "StarPolytope::assignNormal(): Node needs to be a facet node");
620 MultiArray<2, real_type> mat(dimension-1, dimension);
621 out_arc_iterator a(graph_, u);
622 point_view_type vertex = vec_map_[graph_.target(a)];
623 ++a;
624 for (int i = 0; a != lemon::INVALID; ++a, ++i)
625 {
626 const point_type vec = vec_map_[graph_.target(a)] - vertex;
627 std::copy(vec.begin(), vec.end(), rowVector(mat, i).begin());
628 }
629 point_view_type normal = vec_map_[u];
630 for (int i = 0; i < dimension; i++)
631 {
632 normal[i] = 0;
633 }
634 for (auto permutation : permutations_)
635 {
636 coordinate_type val = 1;
637 for (int i = 0; i < dimension - 1; i++)
638 {
639 val *= mat(i, permutation[i]);
640 }
641 val *= permutation.sign();
642 normal[permutation[dimension - 1]] += val;
643 }
644 if (dot(normal, vertex - center_) < 0)
645 {
646 normal *= -1;
647 }
648 }
649
650 /** Add a facet to a 2-dimensional polytope.
651 */
652 virtual node_type addFacet(const node_type & a, const node_type & b)
653 {
654 vigra_precondition(
655 dimension == 2,
656 "StarPolytope::addFacet(): Signature only for use in 2D");
657 node_type ret = graph_.addNode();
658 type_map_[ret] = FACET;
659 graph_.addArc(ret, a);
660 graph_.addArc(ret, b);
661 vigra_assert(
662 lemon::countOutArcs(graph_, ret) == dimension,
663 "StarPolytope::addFacet(): Invalid facet created");
664 this->assignNormal(ret);
665 this->assignNeighbors(ret);
666 for (auto facet : aligns_map_[ret])
667 {
668 if (facet != lemon::INVALID)
669 {
670 vigra_assert(
671 type_map_[facet] == FACET,
672 "StarPolytope::addFacet(): Node must be facet");
673 this->updateInvalidNeighbors(facet);
674 }
675 }
676 return ret;
677 }
678
679 /** Add a facet to a 3-dimensional polytope.
680 */
681 virtual node_type addFacet(
682 const node_type & a,
683 const node_type & b,
684 const node_type & c)
685 {
686 vigra_precondition(
687 dimension == 3,
688 "StarPolytope::addFacet(): Signature only for use in 3D");
689 node_type ret = graph_.addNode();
690 type_map_[ret] = FACET;
691 graph_.addArc(ret, a);
692 graph_.addArc(ret, b);
693 graph_.addArc(ret, c);
694 vigra_assert(
695 lemon::countOutArcs(graph_, ret) == dimension,
696 "StarPolytope::addFacet(): Invalid facet created");
697 this->assignNormal(ret);
698 this->assignNeighbors(ret);
699 for (auto facet : aligns_map_[ret])
700 {
701 if (facet != lemon::INVALID)
702 {
703 vigra_assert(
704 type_map_[facet] == FACET,
705 "StarPolytope::addFacet(): Node must be facet");
706 this->updateInvalidNeighbors(facet);
707 }
708 }
709 return ret;
710 }
711
712 virtual void close()
713 {
714 vigra_precondition(
715 lemon::countNodes(graph_) == dimension + 1,
716 "StarPolytope::close(): Can only close for dim+1 vertices");
717 // Set center of polytope
718 {
719 vertex_iterator v(graph_, type_map_);
720 center_ = vec_map_[v];
721 for (++v; v != lemon::INVALID; ++v)
722 {
723 center_ += vec_map_[v];
724 }
725 center_ /= static_cast<real_type>(dimension + 1);
726 }
727 // Create facets
728 for (int i = 0; i < dimension + 1; i++)
729 {
730 node_type facet = graph_.addNode();
731 type_map_[facet] = FACET;
732 vertex_iterator v(graph_, type_map_);
733 for (int j = 0; j < dimension; ++j, ++v)
734 {
735 if (i == j)
736 {
737 ++v;
738 }
739 graph_.addArc(facet, v);
740 }
741 vigra_assert(
742 lemon::countOutArcs(graph_, facet) == dimension,
743 "StarPolytope::close(): Invalid facet created");
744 this->assignNormal(facet);
745 this->assignNeighbors(facet);
746 for (auto neighbor : aligns_map_[facet])
747 {
748 if (neighbor != lemon::INVALID)
749 {
750 vigra_assert(
751 type_map_[neighbor] == FACET,
752 "StarPolytope::close(): Node must be facet");
753 this->updateInvalidNeighbors(neighbor);
754 }
755 }
756 }
757 }
758
759 virtual bool contains(const node_type & n, const point_view_type & p) const
760 {
761 vigra_assert(
762 type_map_[n] == FACET,
763 "StarPolytope::contains(): Node needs do be a facet");
764 ArrayVector<point_view_type> vertices = this->getVertices(n);
765 vertices.push_back(center_);
766 MultiArray<2, coordinate_type> jp_mat(dimension, dimension);
767 MultiArray<2, coordinate_type> jj_mat(dimension, dimension);
768 for (int j = 0; j < dimension + 1; j++)
769 {
770 for (int i = 0, ii = 0; i < dimension; i++, ii++)
771 {
772 if (i == j)
773 {
774 ii++;
775 }
776 {
777 const point_type vec = vertices[ii] - p;
778 std::copy(vec.begin(), vec.end(), rowVector(jp_mat, i).begin());
779 }
780 {
781 const point_type vec = vertices[ii] - vertices[j];
782 std::copy(vec.begin(), vec.end(), rowVector(jj_mat, i).begin());
783 }
784 }
785 const coordinate_type jj_det = linalg::determinant(jj_mat);
786 const coordinate_type jp_det = linalg::determinant(jp_mat);
787 const coordinate_type eps = std::numeric_limits<T>::epsilon() * 2;
788 if (((jj_det > 0) != (jp_det > 0)) && abs(jp_det) > eps)
789 {
790 return false;
791 }
792 }
793 return true;
794 }
795
796 /** Check if a point is inside the polytope.
797 */
798 virtual bool contains(const point_view_type & p) const
799 {
800 for (facet_iterator n(graph_, type_map_); n != lemon::INVALID; ++n)
801 {
802 if (contains(n, p))
803 {
804 return true;
805 }
806 }
807 return false;
808 }
809
810 virtual real_type nVolume(const node_type & n) const
811 {
812 vigra_assert(
813 type_map_[n] == FACET,
814 "StarPolytope::nVolume(): Node needs do be a facet");
815 MultiArray<2, coordinate_type> mat(dimension, dimension);
816 real_type fac = 1;
817 out_arc_iterator a(graph_, n);
818 for (int i = 0; i < dimension; ++i, ++a)
819 {
820 fac *= (i+1);
821 const point_type vec = vec_map_[graph_.target(a)] - center_;
822 std::copy(vec.begin(), vec.end(), rowVector(mat, i).begin());
823 }
824 return abs(linalg::determinant(mat) / fac);
825 }
826
827 /** Calculate the volume of the polytope.
828 */
829 virtual real_type nVolume() const
830 {
831 real_type ret = 0;
832 for (facet_iterator n(graph_, type_map_); n != lemon::INVALID; ++n)
833 {
834 ret += this->nVolume(n);
835 }
836 return ret;
837 }
838
839 virtual real_type nSurface(const node_type & n) const
840 {
841 vigra_assert(
842 type_map_[n] == FACET,
843 "StarPolytope::nVolume(): Node needs do be a facet");
844 MultiArray<2, coordinate_type> mat(dimension, dimension);
845 real_type factor = vec_map_[n].magnitude();
846 out_arc_iterator a(graph_, n);
847 const point_view_type vec = vec_map_[graph_.target(a)];
848 ++a;
849 for (int i = 1; i < dimension; ++i, ++a)
850 {
851 factor *= i;
852 const point_type tmp = vec_map_[graph_.target(a)] - vec;
853 std::copy(tmp.begin(), tmp.end(), rowVector(mat, i).begin());
854 }
855 const point_type tmp = vec_map_[n];
856 std::copy(tmp.begin(), tmp.end(), rowVector(mat, 0).begin());
857 return abs(linalg::determinant(mat)) / factor;
858 }
859
860 /** Calculate the surface of the polytope.
861 */
862 virtual real_type nSurface() const
863 {
864 real_type ret = 0;
865 for (facet_iterator n(graph_, type_map_); n != lemon::INVALID; ++n)
866 {
867 ret += this->nSurface(n);
868 }
869 return ret;
870 }
871
872 protected:
873
874 PlainChangesPermutations<N> permutations_;
875 point_type center_;
876 };
877
878 /** Specialization of the StarPolytope to polytopes which have a convex domain.
879 */
880 template <unsigned int N, class T>
881 class ConvexPolytope : public StarPolytope<N, T>
882 {
883 public:
884
885 typedef StarPolytope<N, T> base_type;
886 typedef typename base_type::coordinate_type coordinate_type;
887 typedef typename base_type::real_type real_type;
888 typedef typename base_type::point_type point_type;
889 typedef typename base_type::point_view_type point_view_type;
890 typedef typename base_type::difference_type difference_type;
891 typedef typename base_type::graph_type graph_type;
892 typedef typename base_type::node_type node_type;
893 typedef typename base_type::arc_type arc_type;
894 typedef typename base_type::node_iterator node_iterator;
895 typedef typename base_type::in_arc_iterator in_arc_iterator;
896 typedef typename base_type::out_arc_iterator out_arc_iterator;
897 typedef typename base_type::out_skip_iterator out_skip_iterator;
898 typedef typename base_type::facet_iterator facet_iterator;
899 typedef typename base_type::vertex_iterator vertex_iterator;
900
901 using base_type::dimension;
902 using base_type::graph_;
903 using base_type::vec_map_;
904 using base_type::type_map_;
905 using base_type::aligns_map_;
906 using base_type::INVALID;
907 using base_type::FACET;
908 using base_type::VERTEX;
909
910 public:
911
912 ConvexPolytope()
913 : base_type()
914 {}
915
916 ConvexPolytope(const point_view_type & center)
917 : base_type(center)
918 {}
919
920 ConvexPolytope(
921 const point_view_type & a,
922 const point_view_type & b,
923 const point_view_type & c)
924 : base_type(a, b, c, (a + b + c) / 3)
925 {}
926
927 ConvexPolytope(
928 const point_view_type & a,
929 const point_view_type & b,
930 const point_view_type & c,
931 const point_view_type & d)
932 : base_type(a, b, c, d, (a + b + c + d) / 4)
933 {}
934
935 protected:
936
937 virtual void closeFacet(
938 const node_type & vertex,
939 const node_type & facet)
940 {
941 vigra_assert(
942 type_map_[vertex] == VERTEX,
943 "ConvexPolytope::closeFacet(): Vertex needs to be a vertex node");
944 vigra_assert(
945 type_map_[facet] == FACET,
946 "ConvexPolytope::closeFacet(): Facet needs to be a facet node");
947 vigra_assert(
948 (this->getConnected(facet)).count(vertex) == 0,
949 "ConvexPolytope::closeFacet(): Cannot close facet with vertex");
950
951 while (!(this->closed(facet)))
952 {
953 ArrayVector<node_type> vertices = this->openEdge(facet);
954 vigra_assert(
955 vertices.size() == (dimension - 1),
956 "StarPolytope::closeFacet(): Invalid facet");
957 node_type new_facet = graph_.addNode();
958 type_map_[new_facet] = FACET;
959 graph_.addArc(new_facet, vertex);
960 for (auto n : vertices)
961 {
962 graph_.addArc(new_facet, n);
963 }
964 vigra_assert(
965 lemon::countOutArcs(graph_, new_facet) == dimension,
966 "ConvexPolytope::closeFacet(): Invalid facet created");
967 this->assignNormal(new_facet);
968 this->assignNeighbors(new_facet);
969 for (auto neighbor : aligns_map_[new_facet])
970 {
971 if (neighbor != lemon::INVALID)
972 {
973 vigra_assert(
974 type_map_[facet] == FACET,
975 "StarPolytope::addFacet(): Node must be facet");
976 this->updateInvalidNeighbors(neighbor);
977 }
978 }
979 }
980 }
981
982 public:
983
984 virtual bool contains(const node_type & n, const point_view_type & p) const
985 {
986 vigra_assert(
987 type_map_[n] == FACET,
988 "ConvexPolytope::contains(): Node needs do be a facet");
989 const out_arc_iterator a(graph_, n);
990 const point_view_type vertex = vec_map_[graph_.target(a)];
991 const point_view_type normal = vec_map_[n];
992 const real_type scalar = dot(p - vertex, normal);
993 return (scalar < std::numeric_limits<T>::epsilon() * 2);
994 }
995
996 virtual bool contains(const point_view_type & p) const
997 {
998 for (facet_iterator n(graph_, type_map_); n != lemon::INVALID; ++n)
999 {
1000 if (!contains(n, p))
1001 {
1002 return false;
1003 }
1004 }
1005 return true;
1006 }
1007
1008 /** Expand the polytope to the given point if it's outside of the current
1009 polytope, such that the new polytope is still convex.
1010 */
1011 virtual void addExtremeVertex(const point_view_type & p)
1012 {
1013 vigra_assert(
1014 this->closed(),
1015 "ConvexPolytope::addExtremeVertex(): Polytope needs to be closed");
1016 ArrayVector<node_type> lit_facets = this->litFacets(p);
1017 if (lit_facets.size() > 0)
1018 {
1019 std::set<node_type> open_facets;
1020 for (node_type lit_facet : lit_facets)
1021 {
1022 for (auto con : aligns_map_[lit_facet])
1023 {
1024 if (con != lemon::INVALID)
1025 {
1026 vigra_assert(
1027 type_map_[con] == FACET,
1028 "ConvexPolytope::addExtremeVertex(): "
1029 "facet not a facet");
1030 open_facets.insert(con);
1031 }
1032 }
1033 open_facets.erase(lit_facet);
1034 this->eraseFacet(lit_facet);
1035 }
1036 this->tidyUp();
1037 node_type new_vertex = this->addVertex(p);
1038 for (auto open_facet : open_facets)
1039 {
1040 this->closeFacet(new_vertex, open_facet);
1041 }
1042 }
1043 }
1044 };
1045
1046 } /* namespace vigra */
1047
1048 #endif /* VIGRA_POLYTOPE_HXX */
5252 signal(SIGSEGV, &vigra_print_backtrace); // catch the desired signal
5353
5454 run_buggy_code();
55 }
55 }
5656 */
5757
5858 #include <execinfo.h>
5959 #include <stdio.h>
6060 #include <stdlib.h>
61
61
6262
6363 static char * program_name;
6464
429429 indices_(maxSize_+1, -1),
430430 priorities_(maxSize_+1)
431431 {
432 for(int i = 0; i <= maxSize_; i++)
432 for(unsigned i = 0; i <= maxSize_; i++)
433433 indices_[i] = -1;
434434 }
435435
551551
552552 void bubbleDown(int k) {
553553 int j;
554 while(2*k <= currentSize_) {
554 while(static_cast<unsigned>(2*k) <= currentSize_) {
555555 j = 2*k;
556 if(j < currentSize_ && _gt(priorities_[heap_[j]] , priorities_[heap_[j+1]]) )
556 if(static_cast<unsigned>(j) < currentSize_ && _gt(priorities_[heap_[j]] , priorities_[heap_[j+1]]) )
557557 j++;
558558 if( _leqt(priorities_[heap_[k]] , priorities_[heap_[j]]))
559559 break;
8585
8686 }
8787
88 vigra_assert(linearSolve(A, b, res),
88 bool solvable = linearSolve(A, b, res);
89 // silence unused variable warning in release mode
90 static_cast<void>(solvable);
91 vigra_assert(solvable,
8992 "projectiveMatrix2DFromCorrespondingPoints(): singular solution matrix.");
9093
9194 linalg::TemporaryMatrix<double> projectiveMat(3,3);
7777 struct NodeHolder : GRAPH::Node
7878 {
7979 typedef typename GRAPH::Node Node;
80 NodeHolder(const lemon::Invalid & iv = lemon::INVALID)
80 NodeHolder(const lemon::Invalid & /*iv*/ = lemon::INVALID)
8181 : Node(lemon::INVALID),
8282 graph_(NULL)
8383 {}
105105 {
106106
107107 typedef typename GRAPH::Edge Edge;
108 EdgeHolder(const lemon::Invalid & iv = lemon::INVALID)
108 EdgeHolder(const lemon::Invalid & /*iv*/ = lemon::INVALID)
109109 : Edge(lemon::INVALID),
110110 graph_(NULL)
111111 {}
138138 template<class GRAPH>
139139 struct ArcHolder: GRAPH::Arc {
140140 typedef typename GRAPH::Arc Arc;
141 ArcHolder(const lemon::Invalid & iv = lemon::INVALID)
141 ArcHolder(const lemon::Invalid & /*iv*/ = lemon::INVALID)
142142 : Arc(lemon::INVALID),
143143 graph_(NULL)
144144 {}
530530 return NumpyArray<AD,int>::ArrayTraits::taggedShape(IntrinsicGraphShape<Graph>::intrinsicArcMapShape(graph),"e");
531531 }
532532
533 static AxisInfo axistagsNodeMap(const Graph & graph){
533 static AxisInfo axistagsNodeMap(const Graph & /*graph*/){
534534 return AxisInfo("n");
535535 }
536 static AxisInfo axistagsEdgeMap(const Graph & graph){
536 static AxisInfo axistagsEdgeMap(const Graph & /*graph*/){
537537 return AxisInfo("e");
538538 }
539 static AxisTags axistagsArcMap(const Graph & graph){
539 static AxisTags axistagsArcMap(const Graph & /*graph*/){
540540 return AxisInfo("e");
541541 }
542542 };
560560 static TaggedShape taggedArcMapShape(const Graph & graph){ \
561561 return NumpyArray<AD,int>::ArrayTraits::taggedShape(IntrinsicGraphShape<Graph>::intrinsicArcMapShape(graph),ta); \
562562 } \
563 static AxisInfo axistagsNodeMap(const Graph & graph){ \
563 static AxisInfo axistagsNodeMap(const Graph & /*graph*/){ \
564564 return AxisInfo(tn); \
565565 } \
566 static AxisInfo axistagsEdgeMap(const Graph & graph){ \
566 static AxisInfo axistagsEdgeMap(const Graph & /*graph*/){ \
567567 return AxisInfo(te); \
568568 } \
569 static AxisTags axistagsArcMap(const Graph & graph){ \
569 static AxisTags axistagsArcMap(const Graph & /*graph*/){ \
570570 return AxisInfo(ta); \
571571 } \
572572 };
234234 inline python_ptr pythonFromData(char const * str)
235235 {
236236 #if PY_MAJOR_VERSION < 3
237 return python_ptr(PyString_FromString(str), python_ptr::new_nonzero_reference);
237 return python_ptr(PyString_FromString(str), python_ptr::new_nonzero_reference);
238238 #else
239 return python_ptr(PyUnicode_FromString(str), python_ptr::new_nonzero_reference);
239 return python_ptr(PyUnicode_FromString(str), python_ptr::new_nonzero_reference);
240240 #endif
241241 }
242242
243243 inline python_ptr pythonFromData(std::string const & str)
244244 {
245 return pythonFromData(str.c_str());
245 return pythonFromData(str.c_str());
246246 }
247247
248248 #define VIGRA_PYTHON_FROM_DATA(type, fct, cast_type) \
338338 return data && PyString_Check(data)
339339 ? std::string(PyString_AsString(data))
340340 #else
341 python_ptr ascii(PyUnicode_AsASCIIString(data), python_ptr::keep_count);
341 python_ptr ascii(PyUnicode_AsASCIIString(data), python_ptr::keep_count);
342342 return data && PyBytes_Check(ascii)
343343 ? std::string(PyBytes_AsString(ascii))
344344 #endif
351351 return data && PyString_Check(data)
352352 ? std::string(PyString_AsString(data))
353353 #else
354 python_ptr ascii(PyUnicode_AsASCIIString(data), python_ptr::keep_count);
355 return data && PyBytes_Check(ascii)
356 ? std::string(PyBytes_AsString(ascii))
354 python_ptr ascii(PyUnicode_AsASCIIString(data), python_ptr::keep_count);
355 return data && PyBytes_Check(ascii)
356 ? std::string(PyBytes_AsString(ascii))
357357 #endif
358358 : defaultVal;
359359 }
996996 {
997997 public:
998998 template<class Nde>
999 bool operator()(Nde & cur, int level, Nde parent, bool infm)
999 bool operator()(Nde & cur, int /*level*/, Nde parent, bool /*infm*/)
10001000 {
10011001 if(parent.hasData_)
10021002 cur.status() = std::min(parent.status(), cur.status());
10441044 }
10451045
10461046 template<class Nde>
1047 bool operator()(Nde & cur, int level, Nde parent, bool infm)
1047 bool operator()(Nde & cur, int /*level*/, Nde parent, bool /*infm*/)
10481048 {
10491049 graphviz << "node" << cur.index() << " [style=\"filled\"][label = \" #Feats: "<< cur.columns_size() << "\\n";
10501050 graphviz << " status: " << cur.status() << "\\n";
11031103 /** Allocate enough memory
11041104 */
11051105 template<class RF, class PR>
1106 void visit_at_beginning(RF const & rf, PR const & pr)
1106 void visit_at_beginning(RF const & rf, PR const & /*pr*/)
11071107 {
11081108 Int32 const class_count = rf.ext_param_.class_count_;
11091109 Int32 const column_count = rf.ext_param_.column_count_+1;
11281128 * \sa FieldProxy
11291129 */
11301130 template<class RF, class PR, class SM, class ST>
1131 void after_tree_ip_impl(RF& rf, PR & pr, SM & sm, ST & st, int index)
1131 void after_tree_ip_impl(RF& rf, PR & pr, SM & sm, ST & /*st*/, int index)
11321132 {
11331133 typedef MultiArrayShape<2>::type Shp_t;
11341134 Int32 column_count = rf.ext_param_.column_count_ +1;
12281228 /** Normalise variable importance after the number of trees is known.
12291229 */
12301230 template<class RF, class PR>
1231 void visit_at_end(RF & rf, PR & pr)
1231 void visit_at_end(RF & rf, PR & /*pr*/)
12321232 {
12331233 NormalizeStatus nrm(rf.tree_count());
12341234 clustering.iterate(nrm);
7373 };
7474
7575 /* \brief chooses between default type and type supplied
76 *
76 *
7777 * This is an internal class and you shouldn't really care about it.
7878 * Just pass on used in RandomForest.learn()
7979 * Usage:
8080 *\code
81 * // example: use container type supplied by user or ArrayVector if
81 * // example: use container type supplied by user or ArrayVector if
8282 * // rf_default() was specified as argument;
8383 * template<class Container_t>
8484 * void do_some_foo(Container_t in)
8585 * {
8686 * typedef ArrayVector<int> Default_Container_t;
8787 * Default_Container_t default_value;
88 * Value_Chooser<Container_t, Default_Container_t>
88 * Value_Chooser<Container_t, Default_Container_t>
8989 * choose(in, default_value);
9090 *
91 * // if the user didn't care and the in was of type
91 * // if the user didn't care and the in was of type
9292 * // RF_DEFAULT then default_value is used.
9393 * do_some_more_foo(choose.value());
9494 * }
102102 typedef T type;
103103 static T & choose(T & t, C &)
104104 {
105 return t;
105 return t;
106106 }
107107 };
108108
111111 {
112112 public:
113113 typedef C type;
114
114
115115 static C & choose(detail::RF_DEFAULT &, C & c)
116116 {
117 return c;
117 return c;
118118 }
119119 };
120120
147147 RF_ALL};
148148
149149
150 /** \addtogroup MachineLearning
150 /** \addtogroup MachineLearning
151151 **/
152152 //@{
153153
193193 int mtry_;
194194 int (*mtry_func_)(int) ;
195195
196 bool predict_weighted_;
196 bool predict_weighted_;
197197 int tree_count_;
198198 int min_split_node_size_;
199199 bool prepare_online_learning_;
206206 {
207207 return 12;
208208 }
209
209
210210
211211 bool operator==(RandomForestOptions & rhs) const
212212 {
213213 bool result = true;
214 #define COMPARE(field) result = result && (this->field == rhs.field);
214 #define COMPARE(field) result = result && (this->field == rhs.field);
215215 COMPARE(training_set_proportion_);
216216 COMPARE(training_set_size_);
217217 COMPARE(training_set_calc_switch_);
234234 void unserialize(Iter const & begin, Iter const & end)
235235 {
236236 Iter iter = begin;
237 vigra_precondition(static_cast<int>(end - begin) == serialized_size(),
237 vigra_precondition(static_cast<int>(end - begin) == serialized_size(),
238238 "RandomForestOptions::unserialize():"
239239 "wrong number of parameters");
240240 #define PULL(item_, type_) item_ = type_(*iter); ++iter;
256256 void serialize(Iter const & begin, Iter const & end) const
257257 {
258258 Iter iter = begin;
259 vigra_precondition(static_cast<int>(end - begin) == serialized_size(),
259 vigra_precondition(static_cast<int>(end - begin) == serialized_size(),
260260 "RandomForestOptions::serialize():"
261261 "wrong number of parameters");
262262 #define PUSH(item_) *iter = double(item_); ++iter;
288288 PUSH(predict_weighted_);
289289 #undef PUSH
290290 }
291
291
292292 void make_from_map(map_type & in) // -> const: .operator[] -> .find
293293 {
294 #define PULL(item_, type_) item_ = type_(in[#item_][0]);
295 #define PULLBOOL(item_, type_) item_ = type_(in[#item_][0] > 0);
294 #define PULL(item_, type_) item_ = type_(in[#item_][0]);
295 #define PULLBOOL(item_, type_) item_ = type_(in[#item_][0] > 0);
296296 PULL(training_set_proportion_,double);
297297 PULL(training_set_size_, int);
298298 PULL(mtry_, int);
301301 PULLBOOL(sample_with_replacement_, bool);
302302 PULLBOOL(prepare_online_learning_, bool);
303303 PULLBOOL(predict_weighted_, bool);
304
304
305305 PULL(training_set_calc_switch_, (RF_OptionTag)(int));
306306
307307 PULL(stratification_method_, (RF_OptionTag)(int));
308308 PULL(mtry_switch_, (RF_OptionTag)(int));
309
309
310310 /*don't pull*/
311311 //PULL(mtry_func_!=0, int);
312312 //PULL(training_set_func,int);
325325 PUSH(sample_with_replacement_, bool);
326326 PUSH(prepare_online_learning_, bool);
327327 PUSH(predict_weighted_, bool);
328
328
329329 PUSH(training_set_calc_switch_, RF_OptionTag);
330330 PUSH(stratification_method_, RF_OptionTag);
331331 PUSH(mtry_switch_, RF_OptionTag);
332
332
333333 PUSHFUNC(mtry_func_, int);
334334 PUSHFUNC(training_set_func_,int);
335335 #undef PUSH
354354 mtry_(0),
355355 mtry_func_(0),
356356 predict_weighted_(false),
357 tree_count_(256),
357 tree_count_(255),
358358 min_split_node_size_(1),
359359 prepare_online_learning_(false)
360360 {}
399399 return *this;
400400 }
401401
402 /**\brief specify the fraction of the total number of samples
403 * used per tree for learning.
402 /**\brief specify the fraction of the total number of samples
403 * used per tree for learning.
404404 *
405405 * This value should be in [0.0 1.0] if sampling without
406406 * replacement has been specified.
415415 }
416416
417417 /**\brief directly specify the number of samples per tree
418 *
419 * This value should not be higher than the total number of
420 * samples if sampling without replacement has been specified.
418421 */
419422 RandomForestOptions & samples_per_tree(int in)
420423 {
435438 training_set_calc_switch_ = RF_FUNCTION;
436439 return *this;
437440 }
438
441
439442 /**\brief weight each tree with number of samples in that node
440443 */
441444 RandomForestOptions & predict_weighted()
448451 *
449452 * Use one of the built in mappings to calculate mtry from the number
450453 * of columns in the input feature data.
451 * \param in possible values: RF_LOG, RF_SQRT or RF_ALL
452 * <br> default: RF_SQRT.
454 * \param in possible values:
455 * - RF_LOG (the number of features considered for each split is \f$ 1+\lfloor \log(n_f)/\log(2) \rfloor \f$ as in Breiman's original paper),
456 * - RF_SQRT (default, the number of features considered for each split is \f$ \lfloor \sqrt{n_f} + 0.5 \rfloor \f$)
457 * - RF_ALL (all features are considered for each split)
453458 */
454459 RandomForestOptions & features_per_node(RF_OptionTag in)
455460 {
491496 *
492497 * <br> Default: 255.
493498 */
494 RandomForestOptions & tree_count(int in)
499 RandomForestOptions & tree_count(unsigned int in)
495500 {
496501 tree_count_ = in;
497502 return *this;
513518 };
514519
515520
516 /** \brief problem types
521 /* \brief problem types
517522 */
518523 enum Problem_t{REGRESSION, CLASSIFICATION, CHECKLATER};
519524
551556 int actual_msample_; // number if in-bag samples per tree
552557
553558 Problem_t problem_type_; // classification or regression
554
559
555560 int used_; // this ProblemSpec is valid
556561 ArrayVector<double> class_weights_; // if classes have different importance
557562 int is_weighted_; // class_weights_ are used
558563 double precision_; // termination criterion for regression loss
559 int response_size_;
560
561 template<class T>
564 int response_size_;
565
566 template<class T>
562567 void to_classlabel(int index, T & out) const
563568 {
564569 out = T(classes[index]);
565570 }
566 template<class T>
571 template<class T>
567572 int to_classIndex(T index) const
568573 {
569574 return std::find(classes.begin(), classes.end(), index) - classes.begin();
571576
572577 #define EQUALS(field) field(rhs.field)
573578 ProblemSpec(ProblemSpec const & rhs)
574 :
579 :
575580 EQUALS(column_count_),
576581 EQUALS(class_count_),
577582 EQUALS(row_count_),
586591 {
587592 std::back_insert_iterator<ArrayVector<Label_t> >
588593 iter(classes);
589 std::copy(rhs.classes.begin(), rhs.classes.end(), iter);
594 std::copy(rhs.classes.begin(), rhs.classes.end(), iter);
590595 }
591596 #undef EQUALS
592597 #define EQUALS(field) field(rhs.field)
593598 template<class T>
594599 ProblemSpec(ProblemSpec<T> const & rhs)
595 :
600 :
596601 EQUALS(column_count_),
597602 EQUALS(class_count_),
598603 EQUALS(row_count_),
607612 {
608613 std::back_insert_iterator<ArrayVector<Label_t> >
609614 iter(classes);
610 std::copy(rhs.classes.begin(), rhs.classes.end(), iter);
615 std::copy(rhs.classes.begin(), rhs.classes.end(), iter);
611616 }
612617 #undef EQUALS
613618
627632 class_weights_.clear();
628633 std::back_insert_iterator<ArrayVector<double> >
629634 iter2(class_weights_);
630 std::copy(rhs.class_weights_.begin(), rhs.class_weights_.end(), iter2);
635 std::copy(rhs.class_weights_.begin(), rhs.class_weights_.end(), iter2);
631636 classes.clear();
632637 std::back_insert_iterator<ArrayVector<Label_t> >
633638 iter(classes);
634 std::copy(rhs.classes.begin(), rhs.classes.end(), iter);
639 std::copy(rhs.classes.begin(), rhs.classes.end(), iter);
635640 return *this;
636641 }
637642
651656 class_weights_.clear();
652657 std::back_insert_iterator<ArrayVector<double> >
653658 iter2(class_weights_);
654 std::copy(rhs.class_weights_.begin(), rhs.class_weights_.end(), iter2);
659 std::copy(rhs.class_weights_.begin(), rhs.class_weights_.end(), iter2);
655660 classes.clear();
656661 std::back_insert_iterator<ArrayVector<Label_t> >
657662 iter(classes);
658 std::copy(rhs.classes.begin(), rhs.classes.end(), iter);
663 std::copy(rhs.classes.begin(), rhs.classes.end(), iter);
659664 return *this;
660665 }
661666 #undef EQUALS
697702 void unserialize(Iter const & begin, Iter const & end)
698703 {
699704 Iter iter = begin;
700 vigra_precondition(end - begin >= 10,
705 vigra_precondition(end - begin >= 10,
701706 "ProblemSpec::unserialize():"
702707 "wrong number of parameters");
703708 #define PULL(item_, type_) item_ = type_(*iter); ++iter;
704709 PULL(column_count_,int);
705710 PULL(class_count_, int);
706711
707 vigra_precondition(end - begin >= 10 + class_count_,
712 vigra_precondition(end - begin >= 10 + class_count_,
708713 "ProblemSpec::unserialize(): 1");
709714 PULL(row_count_, int);
710715 PULL(actual_mtry_,int);
716721 PULL(response_size_, int);
717722 if(is_weighted_)
718723 {
719 vigra_precondition(end - begin == 10 + 2*class_count_,
724 vigra_precondition(end - begin == 10 + 2*class_count_,
720725 "ProblemSpec::unserialize(): 2");
721726 class_weights_.insert(class_weights_.end(),
722 iter,
727 iter,
723728 iter + class_count_);
724 iter += class_count_;
729 iter += class_count_;
725730 }
726731 classes.insert(classes.end(), iter, end);
727732 #undef PULL
732737 void serialize(Iter const & begin, Iter const & end) const
733738 {
734739 Iter iter = begin;
735 vigra_precondition(end - begin == serialized_size(),
740 vigra_precondition(end - begin == serialized_size(),
736741 "RandomForestOptions::serialize():"
737742 "wrong number of parameters");
738743 #define PUSH(item_) *iter = double(item_); ++iter;
751756 std::copy(class_weights_.begin(),
752757 class_weights_.end(),
753758 iter);
754 iter += class_count_;
759 iter += class_count_;
755760 }
756761 std::copy(classes.begin(),
757762 classes.end(),
761766
762767 void make_from_map(map_type & in) // -> const: .operator[] -> .find
763768 {
764 #define PULL(item_, type_) item_ = type_(in[#item_][0]);
769 #define PULL(item_, type_) item_ = type_(in[#item_][0]);
765770 PULL(column_count_,int);
766771 PULL(class_count_, int);
767772 PULL(row_count_, int);
791796 in["class_weights_"] = class_weights_;
792797 #undef PUSH
793798 }
794
799
795800 /**\brief set default values (-> values not set)
796801 */
797802 ProblemSpec()
815820 }
816821
817822 /**\brief supply with class labels -
818 *
823 *
819824 * the preprocessor will not calculate the labels needed in this case.
820825 */
821826 template<class C_Iter>
831836
832837 /** \brief supply with class weights -
833838 *
834 * this is the only case where you would really have to
839 * this is the only case where you would really have to
835840 * create a ProblemSpec object.
836841 */
837842 template<class W_Iter>
847852
848853 void clear()
849854 {
850 used_ = false;
855 used_ = false;
851856 classes.clear();
852857 class_weights_.clear();
853858 column_count_ = 0 ;
899904 template<class WeightIter, class T, class C>
900905 bool after_prediction(WeightIter, int /* k */, MultiArrayView<2, T, C> /* prob */, double /* totalCt */)
901906 {
902 return false;
907 return false;
903908 }
904909 };
905910
4040 #include <numeric>
4141 #include "vigra/multi_array.hxx"
4242 #include "vigra/mathutil.hxx"
43 #include "vigra/metaprogramming.hxx"
4344 #include "vigra/array_vector.hxx"
4445 #include "vigra/sized_int.hxx"
4546 #include "vigra/matrix.hxx"
215216 template<class Visitor_t>
216217 void traverse_mem_order(Visitor_t visitor) const
217218 {
218 TreeInt index = 2;
219 Int32 ii = 0;
219 UInt32 index = 2;
220220 while(index < topology_.size())
221221 {
222222 if(isLeafNode(topology_[index]))
233233 }
234234
235235 template<class Visitor_t>
236 void traverse_post_order(Visitor_t visitor, TreeInt start = 2) const
236 void traverse_post_order(Visitor_t visitor, TreeInt /*start*/ = 2) const
237237 {
238238 typedef TinyVector<double, 2> Entry;
239239 std::vector<Entry > stack;
450450 //copy the newly created node form the split functor to the
451451 //decision tree.
452452 NodeBase node(split.createNode(), topology_, parameters_ );
453 ignore_argument(node);
453454 }
454455 if(garbaged_child!=-1)
455456 {
178178 SB::set_external_parameters(prob, tree_count, is_weighted);
179179 }
180180 template<class WeightIter, class T, class C>
181 bool after_prediction(WeightIter iter, int k, MultiArrayView<2, T, C> const & prob, double totalCt)
181 bool after_prediction(WeightIter, int k, MultiArrayView<2, T, C> const & prob, double)
182182 {
183183 if(k == SB::tree_count_ -1)
184184 {
303303 }
304304
305305 template<class WeightIter, class T, class C>
306 bool after_prediction(WeightIter iter, int k,
307 MultiArrayView<2, T, C> const &prob, double totalCt)
306 bool after_prediction(WeightIter, int k,
307 MultiArrayView<2, T, C> const &prob, double)
308308 {
309309 if(k == SB::tree_count_ -1)
310310 {
384384 }
385385
386386 template<class WeightIter, class T, class C>
387 bool after_prediction(WeightIter iter, int k, MultiArrayView<2, T, C> prob, double totalCt)
387 bool after_prediction(WeightIter, int k, MultiArrayView<2, T, C> prob, double)
388388 {
389389 if(k == SB::tree_count_ -1)
390390 {
442442
443443 template<class T>
444444 void set_external_parameters(ProblemSpec<T> const &,
445 int tree_count = 0, bool /* is_weighted_ */= false)
445 int /*tree_count*/ = 0, bool /* is_weighted_ */= false)
446446 {}
447447
448448 template<class Region>
6565 {
6666 public:
6767 template<class Iter>
68 static void exec(Iter begin, Iter end)
68 static void exec(Iter /*begin*/, Iter /*end*/)
6969 {}
7070 };
7171
146146 **/
147147
148148 template<class T, class C, class T2, class C2, class Region, class Random>
149 int findBestSplit(MultiArrayView<2, T, C> features,
150 MultiArrayView<2, T2, C2> labels,
151 Region region,
152 ArrayVector<Region> childs,
153 Random randint)
149 int findBestSplit(MultiArrayView<2, T, C> /*features*/,
150 MultiArrayView<2, T2, C2> /*labels*/,
151 Region /*region*/,
152 ArrayVector<Region> /*childs*/,
153 Random /*randint*/)
154154 {
155155 #ifndef __clang__
156156 // FIXME: This compile-time checking trick does not work for clang.
546546 }
547547
548548 template<class Iter, class Resp_t>
549 double init (Iter begin, Iter end, Resp_t resp)
549 double init (Iter /*begin*/, Iter /*end*/, Resp_t resp)
550550 {
551551 reset();
552552 std::copy(resp.begin(), resp.end(), counts_.begin());
644644
645645
646646 template<class Iter, class Resp_t>
647 double init (Iter begin, Iter end, Resp_t resp)
647 double init (Iter begin, Iter end, Resp_t /*resp*/)
648648 {
649649 reset();
650650 return this->increment(begin, end);
962962 struct Correction
963963 {
964964 template<class Region, class LabelT>
965 static void exec(Region & in, LabelT & labels)
965 static void exec(Region & /*in*/, LabelT & /*labels*/)
966966 {}
967967 };
968968
4141 #include <iostream>
4242 #include <iomanip>
4343
44 #include <vigra/metaprogramming.hxx>
4445 #include <vigra/multi_pointoperators.hxx>
4546 #include <vigra/timing.hxx>
4647
4849 {
4950 namespace rf
5051 {
51 /** \addtogroup MachineLearning Machine Learning
52 **/
53 //@{
54
55 /**
56 This namespace contains all classes and methods related to extracting information during
57 learning of the random forest. All Visitors share the same interface defined in
58 visitors::VisitorBase. The member methods are invoked at certain points of the main code in
52 /** \brief Visitors to extract information during training of \ref vigra::RandomForest version 2.
53
54 \ingroup MachineLearning
55
56 This namespace contains all classes and methods related to extracting information during
57 learning of the random forest. All Visitors share the same interface defined in
58 visitors::VisitorBase. The member methods are invoked at certain points of the main code in
5959 the order they were supplied.
60
61 For the Random Forest the Visitor concept is implemented as a statically linked list
62 (Using templates). Each Visitor object is encapsulated in a detail::VisitorNode object. The
60
61 For the Random Forest the Visitor concept is implemented as a statically linked list
62 (Using templates). Each Visitor object is encapsulated in a detail::VisitorNode object. The
6363 VisitorNode object calls the Next Visitor after one of its visit() methods have terminated.
64
64
6565 To simplify usage create_visitor() factory methods are supplied.
6666 Use the create_visitor() method to supply visitor objects to the RandomForest::learn() method.
6767 It is possible to supply more than one visitor. They will then be invoked in serial order.
6868
6969 The calculated information are stored as public data members of the class. - see documentation
7070 of the individual visitors
71
72 While creating a new visitor the new class should therefore publicly inherit from this class
71
72 While creating a new visitor the new class should therefore publicly inherit from this class
7373 (i.e.: see visitors::OOB_Error).
7474
7575 \code
7676
7777 typedef xxx feature_t \\ replace xxx with whichever type
78 typedef yyy label_t \\ meme chose.
78 typedef yyy label_t \\ meme chose.
7979 MultiArrayView<2, feature_t> f = get_some_features();
8080 MultiArrayView<2, label_t> l = get_some_labels();
8181 RandomForest<> rf()
82
82
8383 //calculate OOB Error
8484 visitors::OOB_Error oob_v;
8585 //calculate Variable Importance
8787
8888 double oob_error = rf.learn(f, l, visitors::create_visitor(oob_v, varimp_v);
8989 //the data can be found in the attributes of oob_v and varimp_v now
90
90
9191 \endcode
9292 */
9393 namespace visitors
9494 {
95
96
97 /** Base Class from which all Visitors derive. Can be used as a template to create new
95
96
97 /** Base Class from which all Visitors derive. Can be used as a template to create new
9898 * Visitors.
9999 */
100100 class VisitorBase
101101 {
102102 public:
103 bool active_;
103 bool active_;
104104 bool is_active()
105105 {
106106 return active_;
123123 {
124124 active_ = true;
125125 }
126
126
127127 /** do something after the the Split has decided how to process the Region
128128 * (Stack entry)
129129 *
138138 * \sa RF_Traits::StackEntry_t
139139 */
140140 template<class Tree, class Split, class Region, class Feature_t, class Label_t>
141 void visit_after_split( Tree & tree,
141 void visit_after_split( Tree & tree,
142142 Split & split,
143143 Region & parent,
144144 Region & leftChild,
145145 Region & rightChild,
146146 Feature_t & features,
147147 Label_t & labels)
148 {}
149
148 {
149 ignore_argument(tree,split,parent,leftChild,rightChild,features,labels);
150 }
151
150152 /** do something after each tree has been learned
151153 *
152154 * \param rf reference to the random forest object that called this
158160 */
159161 template<class RF, class PR, class SM, class ST>
160162 void visit_after_tree(RF & rf, PR & pr, SM & sm, ST & st, int index)
161 {}
162
163 {
164 ignore_argument(rf,pr,sm,st,index);
165 }
166
163167 /** do something after all trees have been learned
164168 *
165169 * \param rf reference to the random forest object that called this
168172 */
169173 template<class RF, class PR>
170174 void visit_at_end(RF const & rf, PR const & pr)
171 {}
172
173 /** do something before learning starts
175 {
176 ignore_argument(rf,pr);
177 }
178
179 /** do something before learning starts
174180 *
175181 * \param rf reference to the random forest object that called this
176182 * visitor
178184 */
179185 template<class RF, class PR>
180186 void visit_at_beginning(RF const & rf, PR const & pr)
181 {}
182 /** do some thing while traversing tree after it has been learned
187 {
188 ignore_argument(rf,pr);
189 }
190 /** do some thing while traversing tree after it has been learned
183191 * (external nodes)
184192 *
185193 * \param tr reference to the tree object that called this visitor
188196 * \param features feature matrix
189197 * \sa NodeTags;
190198 *
191 * you can create the node by using a switch on node_tag and using the
192 * corresponding Node objects. Or - if you do not care about the type
199 * you can create the node by using a switch on node_tag and using the
200 * corresponding Node objects. Or - if you do not care about the type
193201 * use the NodeBase class.
194202 */
195203 template<class TR, class IntT, class TopT,class Feat>
196204 void visit_external_node(TR & tr, IntT index, TopT node_t, Feat & features)
197 {}
198
205 {
206 ignore_argument(tr,index,node_t,features);
207 }
208
199209 /** do something when visiting a internal node after it has been learned
200210 *
201211 * \sa visit_external_node
204214 void visit_internal_node(TR & /* tr */, IntT /* index */, TopT /* node_t */, Feat & /* features */)
205215 {}
206216
207 /** return a double value. The value of the first
217 /** return a double value. The value of the first
208218 * visitor encountered that has a return value is returned with the
209219 * RandomForest::learn() method - or -1.0 if no return value visitor
210 * existed. This functionality basically only exists so that the
211 * OOB - visitor can return the oob error rate like in the old version
220 * existed. This functionality basically only exists so that the
221 * OOB - visitor can return the oob error rate like in the old version
212222 * of the random forest.
213223 */
214224 double return_val()
243253 class VisitorNode
244254 {
245255 public:
246
256
247257 StopVisiting stop_;
248258 Next next_;
249 Visitor & visitor_;
250 VisitorNode(Visitor & visitor, Next & next)
251 :
259 Visitor & visitor_;
260 VisitorNode(Visitor & visitor, Next & next)
261 :
252262 next_(next), visitor_(visitor)
253263 {}
254264
255 VisitorNode(Visitor & visitor)
256 :
265 VisitorNode(Visitor & visitor)
266 :
257267 next_(stop_), visitor_(visitor)
258268 {}
259269
260270 template<class Tree, class Split, class Region, class Feature_t, class Label_t>
261 void visit_after_split( Tree & tree,
271 void visit_after_split( Tree & tree,
262272 Split & split,
263273 Region & parent,
264274 Region & leftChild,
267277 Label_t & labels)
268278 {
269279 if(visitor_.is_active())
270 visitor_.visit_after_split(tree, split,
280 visitor_.visit_after_split(tree, split,
271281 parent, leftChild, rightChild,
272282 features, labels);
273283 next_.visit_after_split(tree, split, parent, leftChild, rightChild,
296306 visitor_.visit_at_end(rf, pr);
297307 next_.visit_at_end(rf, pr);
298308 }
299
309
300310 template<class TR, class IntT, class TopT,class Feat>
301311 void visit_external_node(TR & tr, IntT & index, TopT & node_t,Feat & features)
302312 {
371381 /** factory method to to be used with RandomForest::learn()
372382 */
373383 template<class A, class B, class C, class D>
374 detail::VisitorNode<A, detail::VisitorNode<B, detail::VisitorNode<C,
384 detail::VisitorNode<A, detail::VisitorNode<B, detail::VisitorNode<C,
375385 detail::VisitorNode<D> > > >
376386 create_visitor(A & a, B & b, C & c, D & d)
377387 {
390400 /** factory method to to be used with RandomForest::learn()
391401 */
392402 template<class A, class B, class C, class D, class E>
393 detail::VisitorNode<A, detail::VisitorNode<B, detail::VisitorNode<C,
403 detail::VisitorNode<A, detail::VisitorNode<B, detail::VisitorNode<C,
394404 detail::VisitorNode<D, detail::VisitorNode<E> > > > >
395 create_visitor(A & a, B & b, C & c,
405 create_visitor(A & a, B & b, C & c,
396406 D & d, E & e)
397407 {
398408 typedef detail::VisitorNode<E> _4_t;
413423 */
414424 template<class A, class B, class C, class D, class E,
415425 class F>
416 detail::VisitorNode<A, detail::VisitorNode<B, detail::VisitorNode<C,
426 detail::VisitorNode<A, detail::VisitorNode<B, detail::VisitorNode<C,
417427 detail::VisitorNode<D, detail::VisitorNode<E, detail::VisitorNode<F> > > > > >
418 create_visitor(A & a, B & b, C & c,
428 create_visitor(A & a, B & b, C & c,
419429 D & d, E & e, F & f)
420430 {
421431 typedef detail::VisitorNode<F> _5_t;
438448 */
439449 template<class A, class B, class C, class D, class E,
440450 class F, class G>
441 detail::VisitorNode<A, detail::VisitorNode<B, detail::VisitorNode<C,
442 detail::VisitorNode<D, detail::VisitorNode<E, detail::VisitorNode<F,
451 detail::VisitorNode<A, detail::VisitorNode<B, detail::VisitorNode<C,
452 detail::VisitorNode<D, detail::VisitorNode<E, detail::VisitorNode<F,
443453 detail::VisitorNode<G> > > > > > >
444 create_visitor(A & a, B & b, C & c,
454 create_visitor(A & a, B & b, C & c,
445455 D & d, E & e, F & f, G & g)
446456 {
447457 typedef detail::VisitorNode<G> _6_t;
466476 */
467477 template<class A, class B, class C, class D, class E,
468478 class F, class G, class H>
469 detail::VisitorNode<A, detail::VisitorNode<B, detail::VisitorNode<C,
470 detail::VisitorNode<D, detail::VisitorNode<E, detail::VisitorNode<F,
479 detail::VisitorNode<A, detail::VisitorNode<B, detail::VisitorNode<C,
480 detail::VisitorNode<D, detail::VisitorNode<E, detail::VisitorNode<F,
471481 detail::VisitorNode<G, detail::VisitorNode<H> > > > > > > >
472 create_visitor(A & a, B & b, C & c,
473 D & d, E & e, F & f,
482 create_visitor(A & a, B & b, C & c,
483 D & d, E & e, F & f,
474484 G & g, H & h)
475485 {
476486 typedef detail::VisitorNode<H> _7_t;
497507 */
498508 template<class A, class B, class C, class D, class E,
499509 class F, class G, class H, class I>
500 detail::VisitorNode<A, detail::VisitorNode<B, detail::VisitorNode<C,
501 detail::VisitorNode<D, detail::VisitorNode<E, detail::VisitorNode<F,
510 detail::VisitorNode<A, detail::VisitorNode<B, detail::VisitorNode<C,
511 detail::VisitorNode<D, detail::VisitorNode<E, detail::VisitorNode<F,
502512 detail::VisitorNode<G, detail::VisitorNode<H, detail::VisitorNode<I> > > > > > > > >
503 create_visitor(A & a, B & b, C & c,
504 D & d, E & e, F & f,
513 create_visitor(A & a, B & b, C & c,
514 D & d, E & e, F & f,
505515 G & g, H & h, I & i)
506516 {
507517 typedef detail::VisitorNode<I> _8_t;
529539 */
530540 template<class A, class B, class C, class D, class E,
531541 class F, class G, class H, class I, class J>
532 detail::VisitorNode<A, detail::VisitorNode<B, detail::VisitorNode<C,
533 detail::VisitorNode<D, detail::VisitorNode<E, detail::VisitorNode<F,
542 detail::VisitorNode<A, detail::VisitorNode<B, detail::VisitorNode<C,
543 detail::VisitorNode<D, detail::VisitorNode<E, detail::VisitorNode<F,
534544 detail::VisitorNode<G, detail::VisitorNode<H, detail::VisitorNode<I,
535545 detail::VisitorNode<J> > > > > > > > > >
536 create_visitor(A & a, B & b, C & c,
537 D & d, E & e, F & f,
546 create_visitor(A & a, B & b, C & c,
547 D & d, E & e, F & f,
538548 G & g, H & h, I & i,
539549 J & j)
540550 {
636646 {
637647 tree_id++;
638648 }
639
649
640650 template<class Tree, class Split, class Region, class Feature_t, class Label_t>
641 void visit_after_split( Tree & tree,
651 void visit_after_split( Tree & tree,
642652 Split & split,
643653 Region & parent,
644654 Region & leftChild,
748758 }
749759 }
750760 /** do something when visiting a extern node during getToLeaf
751 *
761 *
752762 * Store the new index!
753763 */
754764 };
759769
760770
761771 /** Visitor that calculates the oob error of each individual randomized
762 * decision tree.
772 * decision tree.
763773 *
764774 * After training a tree, all those samples that are OOB for this particular tree
765 * are put down the tree and the error estimated.
766 * the per tree oob error is the average of the individual error estimates.
775 * are put down the tree and the error estimated.
776 * the per tree oob error is the average of the individual error estimates.
767777 * (oobError = average error of one randomized tree)
768 * Note: This is Not the OOB - Error estimate suggested by Breiman (See OOB_Error
778 * Note: This is Not the OOB - Error estimate suggested by Breiman (See OOB_Error
769779 * visitor)
770780 */
771781 class OOB_PerTreeError:public VisitorBase
792802
793803 /** does the basic calculation per tree*/
794804 template<class RF, class PR, class SM, class ST>
795 void visit_after_tree(RF & rf, PR & pr, SM & sm, ST & st, int index)
805 void visit_after_tree(RF & rf, PR & pr, SM & sm, ST &, int index)
796806 {
797807 //do the first time called.
798808 if(int(oobCount.size()) != rf.ext_param_.row_count_)
808818 {
809819 ++oobCount[l];
810820 if( rf.tree(index)
811 .predictLabel(rowVector(pr.features(), l))
821 .predictLabel(rowVector(pr.features(), l))
812822 != pr.response()(l,0))
813823 {
814824 ++oobErrorCount[l];
821831 /** Does the normalisation
822832 */
823833 template<class RF, class PR>
824 void visit_at_end(RF & rf, PR & pr)
834 void visit_at_end(RF & rf, PR &)
825835 {
826836 // do some normalisation
827837 for(int l=0; l < static_cast<int>(rf.ext_param_.row_count_); ++l)
831841 oobError += double(oobErrorCount[l]) / oobCount[l];
832842 ++totalOobCount;
833843 }
834 }
844 }
835845 oobError/=totalOobCount;
836846 }
837
847
838848 };
839849
840850 /** Visitor that calculates the oob error of the ensemble
841 * This rate should be used to estimate the crossvalidation
851 *
852 * This rate serves as a quick estimate for the crossvalidation
842853 * error rate.
843 * Here each sample is put down those trees, for which this sample
844 * is OOB i.e. if sample #1 is OOB for trees 1, 3 and 5 we calculate
845 * the output using the ensemble consisting only of trees 1 3 and 5.
854 * Here, each sample is put down the trees for which this sample
855 * is OOB, i.e., if sample #1 is OOB for trees 1, 3 and 5, we calculate
856 * the output using the ensemble consisting only of trees 1 3 and 5.
846857 *
847 * Using normal bagged sampling each sample is OOB for approx. 33% of trees
848 * The error rate obtained as such therefore corresponds to crossvalidation
858 * Using normal bagged sampling each sample is OOB for approx. 33% of trees.
859 * The error rate obtained as such therefore corresponds to a crossvalidation
849860 * rate obtained using a ensemble containing 33% of the trees.
850861 */
851862 class OOB_Error : public VisitorBase
856867 MultiArray<2,double> tmp_prob;
857868 public:
858869
859 MultiArray<2, double> prob_oob;
870 MultiArray<2, double> prob_oob;
860871 /** Ensemble oob error rate
861872 */
862873 double oob_breiman;
863874
864875 MultiArray<2, double> oobCount;
865 ArrayVector< int> indices;
876 ArrayVector< int> indices;
866877 OOB_Error() : VisitorBase(), oob_breiman(0.0) {}
867878 #ifdef HasHDF5
868879 void save(std::string filen, std::string pathn)
870881 if(*(pathn.end()-1) != '/')
871882 pathn += "/";
872883 const char* filename = filen.c_str();
873 MultiArray<2, double> temp(Shp(1,1), 0.0);
884 MultiArray<2, double> temp(Shp(1,1), 0.0);
874885 temp[0] = oob_breiman;
875886 writeHDF5(filename, (pathn + "breiman_error").c_str(), temp);
876887 }
879890 // value >=0 if sample was oob, 0 means fail 1, correct
880891
881892 template<class RF, class PR>
882 void visit_at_beginning(RF & rf, PR & pr)
893 void visit_at_beginning(RF & rf, PR &)
883894 {
884895 class_count = rf.class_count();
885 tmp_prob.reshape(Shp(1, class_count), 0);
896 tmp_prob.reshape(Shp(1, class_count), 0);
886897 prob_oob.reshape(Shp(rf.ext_param().row_count_,class_count), 0);
887898 is_weighted = rf.options().predict_weighted_;
888899 indices.resize(rf.ext_param().row_count_);
897908 }
898909
899910 template<class RF, class PR, class SM, class ST>
900 void visit_after_tree(RF& rf, PR & pr, SM & sm, ST & st, int index)
911 void visit_after_tree(RF& rf, PR & pr, SM & sm, ST &, int index)
901912 {
902913 // go through the samples
903914 int total_oob =0;
904915 // FIXME: magic number 10000: invoke special treatment when when msample << sample_count
905916 // (i.e. the OOB sample ist very large)
906 // 40000: use at most 40000 OOB samples per class for OOB error estimate
917 // 40000: use at most 40000 OOB samples per class for OOB error estimate
907918 if(rf.ext_param_.actual_msample_ < pr.features().shape(0) - 10000)
908919 {
909920 ArrayVector<int> oob_indices;
923934 ++oobCount[oob_indices[ll]];
924935
925936 // update number of oob samples in this tree.
926 ++total_oob;
937 ++total_oob;
927938 // get the predicted votes ---> tmp_prob;
928939 int pos = rf.tree(index).getToLeaf(rowVector(pr.features(),oob_indices[ll]));
929 Node<e_ConstProbNode> node ( rf.tree(index).topology_,
940 Node<e_ConstProbNode> node ( rf.tree(index).topology_,
930941 rf.tree(index).parameters_,
931942 pos);
932 tmp_prob.init(0);
943 tmp_prob.init(0);
933944 for(int ii = 0; ii < class_count; ++ii)
934945 {
935946 tmp_prob[ii] = node.prob_begin()[ii];
940951 tmp_prob[ii] = tmp_prob[ii] * (*(node.prob_begin()-1));
941952 }
942953 rowVector(prob_oob, oob_indices[ll]) += tmp_prob;
943
954
944955 }
945956 }else
946957 {
953964 ++oobCount[ll];
954965
955966 // update number of oob samples in this tree.
956 ++total_oob;
967 ++total_oob;
957968 // get the predicted votes ---> tmp_prob;
958969 int pos = rf.tree(index).getToLeaf(rowVector(pr.features(),ll));
959 Node<e_ConstProbNode> node ( rf.tree(index).topology_,
970 Node<e_ConstProbNode> node ( rf.tree(index).topology_,
960971 rf.tree(index).parameters_,
961972 pos);
962 tmp_prob.init(0);
973 tmp_prob.init(0);
963974 for(int ii = 0; ii < class_count; ++ii)
964975 {
965976 tmp_prob[ii] = node.prob_begin()[ii];
973984 }
974985 }
975986 }
976 // go through the ib samples;
987 // go through the ib samples;
977988 }
978989
979990 /** Normalise variable importance after the number of trees is known.
9931004 ++totalOobCount;
9941005 }
9951006 }
996 oob_breiman = double(breimanstyle)/totalOobCount;
1007 oob_breiman = double(breimanstyle)/totalOobCount;
9971008 }
9981009 };
9991010
10171028 /**Standard deviation of oob_per_tree
10181029 */
10191030 double oob_std;
1020
1021 MultiArray<2, double> prob_oob;
1031
1032 MultiArray<2, double> prob_oob;
10221033 /** Ensemble OOB error
10231034 *
10241035 * \sa OOB_Error
10361047 * error rate with increasing number of trees
10371048 */
10381049 MultiArray<2, double> breiman_per_tree;
1039 /** 4 dimensional array containing the development of confusion matrices
1050 /** 4 dimensional array containing the development of confusion matrices
10401051 * with number of trees - can be used to estimate ROC curves etc.
10411052 *
1042 * oobroc_per_tree(ii,jj,kk,ll)
1043 * corresponds true label = ii
1053 * oobroc_per_tree(ii,jj,kk,ll)
1054 * corresponds true label = ii
10441055 * predicted label = jj
10451056 * confusion matrix after ll trees
10461057 *
10531064 * kk = 0. Threshold on probability set by argMax of the probability array.
10541065 */
10551066 MultiArray<4, double> oobroc_per_tree;
1056
1067
10571068 CompleteOOBInfo() : VisitorBase(), oob_mean(0), oob_std(0), oob_per_tree2(0) {}
10581069
10591070 #ifdef HasHDF5
10641075 if(*(pathn.end()-1) != '/')
10651076 pathn += "/";
10661077 const char* filename = filen.c_str();
1067 MultiArray<2, double> temp(Shp(1,1), 0.0);
1078 MultiArray<2, double> temp(Shp(1,1), 0.0);
10681079 writeHDF5(filename, (pathn + "oob_per_tree").c_str(), oob_per_tree);
10691080 writeHDF5(filename, (pathn + "oobroc_per_tree").c_str(), oobroc_per_tree);
10701081 writeHDF5(filename, (pathn + "breiman_per_tree").c_str(), breiman_per_tree);
10821093 // value >=0 if sample was oob, 0 means fail 1, correct
10831094
10841095 template<class RF, class PR>
1085 void visit_at_beginning(RF & rf, PR & pr)
1096 void visit_at_beginning(RF & rf, PR &)
10861097 {
10871098 class_count = rf.class_count();
10881099 if(class_count == 2)
10891100 oobroc_per_tree.reshape(MultiArrayShape<4>::type(2,2,rf.tree_count(), rf.tree_count()));
10901101 else
10911102 oobroc_per_tree.reshape(MultiArrayShape<4>::type(rf.class_count(),rf.class_count(),1, rf.tree_count()));
1092 tmp_prob.reshape(Shp(1, class_count), 0);
1103 tmp_prob.reshape(Shp(1, class_count), 0);
10931104 prob_oob.reshape(Shp(rf.ext_param().row_count_,class_count), 0);
10941105 is_weighted = rf.options().predict_weighted_;
10951106 oob_per_tree.reshape(Shp(1, rf.tree_count()), 0);
11031114 }
11041115
11051116 template<class RF, class PR, class SM, class ST>
1106 void visit_after_tree(RF& rf, PR & pr, SM & sm, ST & st, int index)
1117 void visit_after_tree(RF& rf, PR & pr, SM & sm, ST &, int index)
11071118 {
11081119 // go through the samples
11091120 int total_oob =0;
11171128 ++oobCount[ll];
11181129
11191130 // update number of oob samples in this tree.
1120 ++total_oob;
1131 ++total_oob;
11211132 // get the predicted votes ---> tmp_prob;
11221133 int pos = rf.tree(index).getToLeaf(rowVector(pr.features(),ll));
1123 Node<e_ConstProbNode> node ( rf.tree(index).topology_,
1134 Node<e_ConstProbNode> node ( rf.tree(index).topology_,
11241135 rf.tree(index).parameters_,
11251136 pos);
1126 tmp_prob.init(0);
1137 tmp_prob.init(0);
11271138 for(int ii = 0; ii < class_count; ++ii)
11281139 {
11291140 tmp_prob[ii] = node.prob_begin()[ii];
11341145 tmp_prob[ii] = tmp_prob[ii] * (*(node.prob_begin()-1));
11351146 }
11361147 rowVector(prob_oob, ll) += tmp_prob;
1137 int label = argMax(tmp_prob);
1138
1148 int label = argMax(tmp_prob);
1149
11391150 if(label != pr.response()(ll, 0))
11401151 {
11411152 // update number of wrong oob samples in this tree.
11641175 oobroc_per_tree.bindOuter(index)/=totalOobCount;
11651176 if(oobroc_per_tree.shape(2) > 1)
11661177 {
1167 MultiArrayView<3, double> current_roc
1178 MultiArrayView<3, double> current_roc
11681179 = oobroc_per_tree.bindOuter(index);
11691180 for(int gg = 0; gg < current_roc.shape(2); ++gg)
11701181 {
11731184 if(oobCount[ll])
11741185 {
11751186 int pred = prob_oob(ll, 1) > (double(gg)/double(current_roc.shape(2)))?
1176 1 : 0;
1177 current_roc(pr.response()(ll, 0), pred, gg)+= 1;
1187 1 : 0;
1188 current_roc(pr.response()(ll, 0), pred, gg)+= 1;
11781189 }
11791190 }
11801191 current_roc.bindOuter(gg)/= totalOobCount;
11821193 }
11831194 breiman_per_tree[index] = double(breimanstyle)/double(totalOobCount);
11841195 oob_per_tree[index] = double(wrong_oob)/double(total_oob);
1185 // go through the ib samples;
1196 // go through the ib samples;
11861197 }
11871198
11881199 /** Normalise variable importance after the number of trees is known.
11911202 void visit_at_end(RF & rf, PR & pr)
11921203 {
11931204 // ullis original metric and breiman style stuff
1194 oob_per_tree2 = 0;
1205 oob_per_tree2 = 0;
11951206 int totalOobCount =0;
11961207 int breimanstyle = 0;
11971208 for(int ll=0; ll < static_cast<int>(rf.ext_param_.row_count_); ++ll)
12041215 ++totalOobCount;
12051216 }
12061217 }
1207 oob_per_tree2 /= totalOobCount;
1208 oob_breiman = double(breimanstyle)/totalOobCount;
1218 oob_per_tree2 /= totalOobCount;
1219 oob_breiman = double(breimanstyle)/totalOobCount;
12091220 // mean error of each tree
12101221 MultiArrayView<2, double> mean(Shp(1,1), &oob_mean);
12111222 MultiArrayView<2, double> stdDev(Shp(1,1), &oob_std);
12221233 /** This Array has the same entries as the R - random forest variable
12231234 * importance.
12241235 * Matrix is featureCount by (classCount +2)
1225 * variable_importance_(ii,jj) is the variable importance measure of
1236 * variable_importance_(ii,jj) is the variable importance measure of
12261237 * the ii-th variable according to:
12271238 * jj = 0 - (classCount-1)
1228 * classwise permutation importance
1239 * classwise permutation importance
12291240 * jj = rowCount(variable_importance_) -2
12301241 * permutation importance
12311242 * jj = rowCount(variable_importance_) -1
12371248 * The ii-th column is permuted rep_cnt times.
12381249 *
12391250 * class wise permutation importance:
1240 * same as permutation importance. We only look at those OOB samples whose
1251 * same as permutation importance. We only look at those OOB samples whose
12411252 * response corresponds to class jj.
12421253 *
12431254 * gini decrease importance:
1244 * row ii corresponds to the sum of all gini decreases induced by variable ii
1255 * row ii corresponds to the sum of all gini decreases induced by variable ii
12451256 * in each node of the random forest.
12461257 */
12471258 MultiArray<2, double> variable_importance_;
12521263 void save(std::string filename, std::string prefix)
12531264 {
12541265 prefix = "variable_importance_" + prefix;
1255 writeHDF5(filename.c_str(),
1256 prefix.c_str(),
1266 writeHDF5(filename.c_str(),
1267 prefix.c_str(),
12571268 variable_importance_);
12581269 }
12591270 #endif
12601271
12611272 /* Constructor
1262 * \param rep_cnt (defautl: 10) how often should
1273 * \param rep_cnt (defautl: 10) how often should
12631274 * the permutation take place. Set to 1 to make calculation faster (but
12641275 * possibly more instable)
12651276 */
1266 VariableImportanceVisitor(int rep_cnt = 10)
1277 VariableImportanceVisitor(int rep_cnt = 10)
12671278 : repetition_count_(rep_cnt)
12681279
12691280 {}
12701281
12711282 /** calculates impurity decrease based variable importance after every
1272 * split.
1283 * split.
12731284 */
12741285 template<class Tree, class Split, class Region, class Feature_t, class Label_t>
1275 void visit_after_split( Tree & tree,
1286 void visit_after_split( Tree & tree,
12761287 Split & split,
12771288 Region & /* parent */,
12781289 Region & /* leftChild */,
12811292 Label_t & /* labels */)
12821293 {
12831294 //resize to right size when called the first time
1284
1295
12851296 Int32 const class_count = tree.ext_param_.class_count_;
12861297 Int32 const column_count = tree.ext_param_.column_count_;
12871298 if(variable_importance_.size() == 0)
12881299 {
1289
1300
12901301 variable_importance_
1291 .reshape(MultiArrayShape<2>::type(column_count,
1302 .reshape(MultiArrayShape<2>::type(column_count,
12921303 class_count+2));
12931304 }
12941305
12951306 if(split.createNode().typeID() == i_ThresholdNode)
12961307 {
12971308 Node<i_ThresholdNode> node(split.createNode());
1298 variable_importance_(node.column(),class_count+1)
1309 variable_importance_(node.column(),class_count+1)
12991310 += split.region_gini_ - split.minGini();
13001311 }
13011312 }
13021313
1303 /**compute permutation based var imp.
1314 /**compute permutation based var imp.
13041315 * (Only an Array of size oob_sample_count x 1 is created.
13051316 * - apposed to oob_sample_count x feature_count in the other method.
1306 *
1317 *
13071318 * \sa FieldProxy
13081319 */
13091320 template<class RF, class PR, class SM, class ST>
13111322 {
13121323 typedef MultiArrayShape<2>::type Shp_t;
13131324 Int32 column_count = rf.ext_param_.column_count_;
1314 Int32 class_count = rf.ext_param_.class_count_;
1315
1325 Int32 class_count = rf.ext_param_.class_count_;
1326
13161327 /* This solution saves memory uptake but not multithreading
13171328 * compatible
13181329 */
1319 // remove the const cast on the features (yep , I know what I am
1330 // remove the const cast on the features (yep , I know what I am
13201331 // doing here.) data is not destroyed.
1321 //typename PR::Feature_t & features
1332 //typename PR::Feature_t & features
13221333 // = const_cast<typename PR::Feature_t &>(pr.features());
13231334
13241335 typedef typename PR::FeatureWithMemory_t FeatureArray;
13261337
13271338 FeatureArray features = pr.features();
13281339
1329 //find the oob indices of current tree.
1340 //find the oob indices of current tree.
13301341 ArrayVector<Int32> oob_indices;
13311342 ArrayVector<Int32>::iterator
13321343 iter;
13341345 if(!sm.is_used()[ii])
13351346 oob_indices.push_back(ii);
13361347
1337 //create space to back up a column
1348 //create space to back up a column
13381349 ArrayVector<FeatureValue> backup_column;
13391350
13401351 // Random foo
13411352 #ifdef CLASSIFIER_TEST
13421353 RandomMT19937 random(1);
1343 #else
1354 #else
13441355 RandomMT19937 random(RandomSeed);
13451356 #endif
1346 UniformIntRandomFunctor<RandomMT19937>
1357 UniformIntRandomFunctor<RandomMT19937>
13471358 randint(random);
13481359
13491360
13501361 //make some space for the results
13511362 MultiArray<2, double>
1352 oob_right(Shp_t(1, class_count + 1));
1363 oob_right(Shp_t(1, class_count + 1));
13531364 MultiArray<2, double>
1354 perm_oob_right (Shp_t(1, class_count + 1));
1355
1356
1365 perm_oob_right (Shp_t(1, class_count + 1));
1366
1367
13571368 // get the oob success rate with the original samples
1358 for(iter = oob_indices.begin();
1359 iter != oob_indices.end();
1369 for(iter = oob_indices.begin();
1370 iter != oob_indices.end();
13601371 ++iter)
13611372 {
13621373 if(rf.tree(index)
1363 .predictLabel(rowVector(features, *iter))
1374 .predictLabel(rowVector(features, *iter))
13641375 == pr.response()(*iter, 0))
13651376 {
13661377 //per class
13721383 //get the oob rate after permuting the ii'th dimension.
13731384 for(int ii = 0; ii < column_count; ++ii)
13741385 {
1375 perm_oob_right.init(0.0);
1386 perm_oob_right.init(0.0);
13761387 //make backup of original column
13771388 backup_column.clear();
1378 for(iter = oob_indices.begin();
1379 iter != oob_indices.end();
1389 for(iter = oob_indices.begin();
1390 iter != oob_indices.end();
13801391 ++iter)
13811392 {
13821393 backup_column.push_back(features(*iter,ii));
13831394 }
1384
1395
13851396 //get the oob rate after permuting the ii'th dimension.
13861397 for(int rr = 0; rr < repetition_count_; ++rr)
1387 {
1388 //permute dimension.
1398 {
1399 //permute dimension.
13891400 int n = oob_indices.size();
13901401 for(int jj = n-1; jj >= 1; --jj)
1391 std::swap(features(oob_indices[jj], ii),
1402 std::swap(features(oob_indices[jj], ii),
13921403 features(oob_indices[randint(jj+1)], ii));
13931404
13941405 //get the oob success rate after permuting
1395 for(iter = oob_indices.begin();
1396 iter != oob_indices.end();
1406 for(iter = oob_indices.begin();
1407 iter != oob_indices.end();
13971408 ++iter)
13981409 {
13991410 if(rf.tree(index)
1400 .predictLabel(rowVector(features, *iter))
1411 .predictLabel(rowVector(features, *iter))
14011412 == pr.response()(*iter, 0))
14021413 {
14031414 //per class
14071418 }
14081419 }
14091420 }
1410
1411
1421
1422
14121423 //normalise and add to the variable_importance array.
14131424 perm_oob_right /= repetition_count_;
14141425 perm_oob_right -=oob_right;
14151426 perm_oob_right *= -1;
14161427 perm_oob_right /= oob_indices.size();
14171428 variable_importance_
1418 .subarray(Shp_t(ii,0),
1429 .subarray(Shp_t(ii,0),
14191430 Shp_t(ii+1,class_count+1)) += perm_oob_right;
14201431 //copy back permuted dimension
14211432 for(int jj = 0; jj < int(oob_indices.size()); ++jj)
14231434 }
14241435 }
14251436
1426 /** calculate permutation based impurity after every tree has been
1437 /** calculate permutation based impurity after every tree has been
14271438 * learned default behaviour is that this happens out of place.
1428 * If you have very big data sets and want to avoid copying of data
1429 * set the in_place_ flag to true.
1439 * If you have very big data sets and want to avoid copying of data
1440 * set the in_place_ flag to true.
14301441 */
14311442 template<class RF, class PR, class SM, class ST>
14321443 void visit_after_tree(RF& rf, PR & pr, SM & sm, ST & st, int index)
14501461 RandomForestProgressVisitor() : VisitorBase() {}
14511462
14521463 template<class RF, class PR, class SM, class ST>
1453 void visit_after_tree(RF& rf, PR & pr, SM & sm, ST & st, int index){
1464 void visit_after_tree(RF& rf, PR &, SM &, ST &, int index){
14541465 if(index != rf.options().tree_count_-1) {
14551466 std::cout << "\r[" << std::setw(10) << (index+1)/static_cast<double>(rf.options().tree_count_)*100 << "%]"
14561467 << " (" << index+1 << " of " << rf.options().tree_count_ << ") done" << std::flush;
14591470 std::cout << "\r[" << std::setw(10) << 100.0 << "%]" << std::endl;
14601471 }
14611472 }
1462
1473
14631474 template<class RF, class PR>
1464 void visit_at_end(RF const & rf, PR const & pr) {
1475 void visit_at_end(RF const & rf, PR const &) {
14651476 std::string a = TOCS;
14661477 std::cout << "all " << rf.options().tree_count_ << " trees have been learned in " << a << std::endl;
14671478 }
1468
1479
14691480 template<class RF, class PR>
1470 void visit_at_beginning(RF const & rf, PR const & pr) {
1481 void visit_at_beginning(RF const & rf, PR const &) {
14711482 TIC;
14721483 std::cout << "growing random forest, which will have " << rf.options().tree_count_ << " trees" << std::endl;
14731484 }
1474
1485
14751486 private:
14761487 USETICTOC;
14771488 };
14851496 public:
14861497 /** gini_missc(ii, jj) describes how well variable jj can describe a partition
14871498 * created on variable ii(when variable ii was chosen)
1488 */
1499 */
14891500 MultiArray<2, double> gini_missc;
14901501 MultiArray<2, int> tmp_labels;
1491 /** additional noise features.
1502 /** additional noise features.
14921503 */
14931504 MultiArray<2, double> noise;
14941505 MultiArray<2, double> noise_l;
14981509 MultiArray<2, double> corr_l;
14991510
15001511 /** Similarity Matrix
1501 *
1512 *
15021513 * (numberOfFeatures + 1) by (number Of Features + 1) Matrix
1503 * gini_missc
1514 * gini_missc
15041515 * - row normalized by the number of times the column was chosen
15051516 * - mean of corr_noise subtracted
1506 * - and symmetrised.
1507 *
1517 * - and symmetrised.
1518 *
15081519 */
15091520 MultiArray<2, double> similarity;
15101521 /** Distance Matrix 1-similarity
15111522 */
15121523 MultiArray<2, double> distance;
15131524 ArrayVector<int> tmp_cc;
1514
1525
15151526 /** How often was variable ii chosen
15161527 */
15171528 ArrayVector<int> numChoices;
15181529 typedef BestGiniOfColumn<GiniCriterion> ColumnDecisionFunctor;
15191530 BestGiniOfColumn<GiniCriterion> bgfunc;
1520 void save(std::string file, std::string prefix)
1531 void save(std::string, std::string)
15211532 {
15221533 /*
15231534 std::string tmp;
15531564 noise_l[ii] = random.uniform53() > 0.5;
15541565 }
15551566 bgfunc = ColumnDecisionFunctor( rf.ext_param_);
1556 tmp_labels.reshape(pr.response().shape());
1567 tmp_labels.reshape(pr.response().shape());
15571568 tmp_cc.resize(2);
15581569 numChoices.resize(n+1);
15591570 // look at all axes
15601571 }
15611572 template<class RF, class PR>
1562 void visit_at_end(RF const & rf, PR const & pr)
1573 void visit_at_end(RF const &, PR const &)
15631574 {
15641575 typedef MultiArrayShape<2>::type Shp;
15651576 similarity.reshape(gini_missc.shape());
15661577 similarity = gini_missc;;
15671578 MultiArray<2, double> mean_noise(Shp(corr_noise.shape(0), 1));
15681579 rowStatistics(corr_noise, mean_noise);
1569 mean_noise/= MultiArrayView<2, int>(mean_noise.shape(), numChoices.data());
1580 mean_noise/= MultiArrayView<2, int>(mean_noise.shape(), numChoices.data());
15701581 int rC = similarity.shape(0);
15711582 for(int jj = 0; jj < rC-1; ++jj)
15721583 {
15811592 similarity = abs(similarity);
15821593 FindMinMax<double> minmax;
15831594 inspectMultiArray(srcMultiArrayRange(similarity), minmax);
1584
1595
15851596 for(int jj = 0; jj < rC; ++jj)
15861597 similarity(jj, jj) = minmax.max;
1587
1588 similarity.subarray(Shp(0,0), Shp(rC-1, rC-1))
1598
1599 similarity.subarray(Shp(0,0), Shp(rC-1, rC-1))
15891600 += similarity.subarray(Shp(0,0), Shp(rC-1, rC-1)).transpose();
1590 similarity.subarray(Shp(0,0), Shp(rC-1, rC-1))/= 2;
1601 similarity.subarray(Shp(0,0), Shp(rC-1, rC-1))/= 2;
15911602 columnVector(similarity, rC-1) = rowVector(similarity, rC-1).transpose();
15921603 for(int jj = 0; jj < rC; ++jj)
15931604 similarity(jj, jj) = 0;
1594
1605
15951606 FindMinMax<double> minmax2;
15961607 inspectMultiArray(srcMultiArrayRange(similarity), minmax2);
15971608 for(int jj = 0; jj < rC; ++jj)
15981609 similarity(jj, jj) = minmax2.max;
15991610 distance.reshape(gini_missc.shape(), minmax2.max);
1600 distance -= similarity;
1611 distance -= similarity;
16011612 }
16021613
16031614 template<class Tree, class Split, class Region, class Feature_t, class Label_t>
1604 void visit_after_split( Tree & tree,
1615 void visit_after_split( Tree &,
16051616 Split & split,
16061617 Region & parent,
1607 Region & leftChild,
1608 Region & rightChild,
1618 Region &,
1619 Region &,
16091620 Feature_t & features,
16101621 Label_t & labels)
16111622 {
16121623 if(split.createNode().typeID() == i_ThresholdNode)
16131624 {
16141625 double wgini;
1615 tmp_cc.init(0);
1626 tmp_cc.init(0);
16161627 for(int ii = 0; ii < parent.size(); ++ii)
16171628 {
1618 tmp_labels[parent[ii]]
1629 tmp_labels[parent[ii]]
16191630 = (features(parent[ii], split.bestSplitColumn()) < split.bestSplitThreshold());
16201631 ++tmp_cc[tmp_labels[parent[ii]]];
16211632 }
1622 double region_gini = bgfunc.loss_of_region(tmp_labels,
1633 double region_gini = bgfunc.loss_of_region(tmp_labels,
16231634 parent.begin(),
16241635 parent.end(),
16251636 tmp_cc);
16261637
1627 int n = split.bestSplitColumn();
1638 int n = split.bestSplitColumn();
16281639 ++numChoices[n];
16291640 ++(*(numChoices.end()-1));
16301641 //this functor does all the work
16311642 for(int k = 0; k < features.shape(1); ++k)
16321643 {
16331644 bgfunc(columnVector(features, k),
1634 tmp_labels,
1635 parent.begin(), parent.end(),
1645 tmp_labels,
1646 parent.begin(), parent.end(),
16361647 tmp_cc);
16371648 wgini = (region_gini - bgfunc.min_gini_);
1638 gini_missc(n, k)
1649 gini_missc(n, k)
16391650 += wgini;
16401651 }
16411652 for(int k = 0; k < 10; ++k)
16421653 {
16431654 bgfunc(columnVector(noise, k),
1644 tmp_labels,
1645 parent.begin(), parent.end(),
1655 tmp_labels,
1656 parent.begin(), parent.end(),
16461657 tmp_cc);
16471658 wgini = (region_gini - bgfunc.min_gini_);
1648 corr_noise(n, k)
1659 corr_noise(n, k)
16491660 += wgini;
16501661 }
1651
1662
16521663 for(int k = 0; k < 10; ++k)
16531664 {
16541665 bgfunc(columnVector(noise_l, k),
1655 tmp_labels,
1656 parent.begin(), parent.end(),
1666 tmp_labels,
1667 parent.begin(), parent.end(),
16571668 tmp_cc);
16581669 wgini = (region_gini - bgfunc.min_gini_);
1659 corr_l(n, k)
1670 corr_l(n, k)
16601671 += wgini;
16611672 }
16621673 bgfunc(labels, tmp_labels, parent.begin(), parent.end(),tmp_cc);
16631674 wgini = (region_gini - bgfunc.min_gini_);
1664 gini_missc(n, columnCount(gini_missc)-1)
1675 gini_missc(n, columnCount(gini_missc)-1)
16651676 += wgini;
1666
1677
16671678 region_gini = split.region_gini_;
1668 #if 1
1679 #if 1
16691680 Node<i_ThresholdNode> node(split.createNode());
1670 gini_missc(rowCount(gini_missc)-1,
1671 node.column())
1681 gini_missc(rowCount(gini_missc)-1,
1682 node.column())
16721683 +=split.region_gini_ - split.minGini();
16731684 #endif
16741685 for(int k = 0; k < 10; ++k)
16751686 {
16761687 split.bgfunc(columnVector(noise, k),
1677 labels,
1678 parent.begin(), parent.end(),
1688 labels,
1689 parent.begin(), parent.end(),
16791690 parent.classCounts());
1680 corr_noise(rowCount(gini_missc)-1,
1681 k)
1691 corr_noise(rowCount(gini_missc)-1,
1692 k)
16821693 += wgini;
16831694 }
16841695 #if 0
16851696 for(int k = 0; k < tree.ext_param_.actual_mtry_; ++k)
16861697 {
16871698 wgini = region_gini - split.min_gini_[k];
1688
1689 gini_missc(rowCount(gini_missc)-1,
1690 split.splitColumns[k])
1699
1700 gini_missc(rowCount(gini_missc)-1,
1701 split.splitColumns[k])
16911702 += wgini;
16921703 }
1693
1704
16941705 for(int k=tree.ext_param_.actual_mtry_; k<features.shape(1); ++k)
16951706 {
16961707 split.bgfunc(columnVector(features, split.splitColumns[k]),
1697 labels,
1698 parent.begin(), parent.end(),
1708 labels,
1709 parent.begin(), parent.end(),
16991710 parent.classCounts());
17001711 wgini = region_gini - split.bgfunc.min_gini_;
1701 gini_missc(rowCount(gini_missc)-1,
1712 gini_missc(rowCount(gini_missc)-1,
17021713 split.splitColumns[k]) += wgini;
17031714 }
17041715 #endif
17051716 // remember to partition the data according to the best.
1706 gini_missc(rowCount(gini_missc)-1,
1707 columnCount(gini_missc)-1)
1717 gini_missc(rowCount(gini_missc)-1,
1718 columnCount(gini_missc)-1)
17081719 += region_gini;
1709 SortSamplesByDimensions<Feature_t>
1720 SortSamplesByDimensions<Feature_t>
17101721 sorter(features, split.bestSplitColumn(), split.bestSplitThreshold());
17111722 std::partition(parent.begin(), parent.end(), sorter);
17121723 }
17181729 } // namespace rf
17191730 } // namespace vigra
17201731
1721 //@}
17221732 #endif // RF_VISITORS_HXX
4646 #include "array_vector.hxx"
4747 #include "sized_int.hxx"
4848 #include "matrix.hxx"
49 #include "metaprogramming.hxx"
4950 #include "random.hxx"
5051 #include "functorexpression.hxx"
5152 #include "random_forest/rf_common.hxx"
6465
6566 /** \addtogroup MachineLearning Machine Learning
6667
67 This module provides classification algorithms that map
68 This module provides classification algorithms that map
6869 features to labels or label probabilities.
69 Look at the RandomForest class first for a overview of most of the
70 functionality provided as well as use cases.
70 Look at the \ref vigra::RandomForest class (for implementation version 2) or the
71 \ref vigra::rf3::random_forest() factory function (for implementation version 3)
72 for an overview of the functionality as well as use cases.
7173 **/
72 //@{
7374
7475 namespace detail
7576 {
8788 }
8889 }//namespace detail
8990
90 /** Random Forest class
91 /** \brief Random forest version 2 (see also \ref vigra::rf3::RandomForest for version 3)
92 *
93 * \ingroup MachineLearning
9194 *
9295 * \tparam <LabelType = double> Type used for predicted labels.
9396 * \tparam <PreprocessorTag = ClassificationTag> Class used to preprocess
9497 * the input while learning and predicting. Currently Available:
9598 * ClassificationTag and RegressionTag. It is recommended to use
9699 * Splitfunctor::Preprocessor_t while using custom splitfunctors
97 * as they may need the data to be in a different format.
100 * as they may need the data to be in a different format.
98101 * \sa Preprocessor
99 *
102 *
100103 * Simple usage for classification (regression is not yet supported):
101104 * look at RandomForest::learn() as well as RandomForestOptions() for additional
102 * options.
105 * options.
103106 *
104107 * \code
105108 * using namespace vigra;
106109 * using namespace rf;
107110 * typedef xxx feature_t; \\ replace xxx with whichever type
108 * typedef yyy label_t; \\ likewise
109 *
111 * typedef yyy label_t; \\ likewise
112 *
110113 * // allocate the training data
111114 * MultiArrayView<2, feature_t> f = get_training_features();
112115 * MultiArrayView<2, label_t> l = get_training_labels();
113 *
116 *
114117 * RandomForest<label_t> rf;
115118 *
116119 * // construct visitor to calculate out-of-bag error
120123 * rf.learn(f, l, visitors::create_visitor(oob_v));
121124 *
122125 * std::cout << "the out-of-bag error is: " << oob_v.oob_breiman << "\n";
123 *
126 *
124127 * // get features for new data to be used for prediction
125128 * MultiArrayView<2, feature_t> pf = get_features();
126129 *
127130 * // allocate space for the response (pf.shape(0) is the number of samples)
128131 * MultiArrayView<2, label_t> prediction(pf.shape(0), 1);
129132 * MultiArrayView<2, double> prob(pf.shape(0), rf.class_count());
130 *
133 *
131134 * // perform prediction on new data
132135 * rf.predictLabels(pf, prediction);
133136 * rf.predictProbabilities(pf, prob);
135138 * \endcode
136139 *
137140 * Additional information such as Variable Importance measures are accessed
138 * via Visitors defined in rf::visitors.
141 * via Visitors defined in rf::visitors.
139142 * Have a look at rf::split for other splitting methods.
140143 *
141144 */
153156 typedef rf::visitors::StopVisiting Default_Visitor_t;
154157 typedef DT_StackEntry<ArrayVectorView<Int32>::iterator>
155158 StackEntry_t;
156 typedef LabelType LabelT;
159 typedef LabelType LabelT;
157160
158161 //problem independent data.
159162 Options_t options_;
175178 public:
176179
177180 /** \name Constructors
178 * Note: No copy Constructor specified as no pointers are manipulated
181 * Note: No copy constructor specified as no pointers are manipulated
179182 * in this class
180 */
181 /*\{*/
182 /**\brief default constructor
183
184 * @{
185 */
186
187 /**\brief default constructor
183188 *
184189 * \param options general options to the Random Forest. Must be of Type
185190 * Options_t
186 * \param ext_param problem specific values that can be supplied
191 * \param ext_param problem specific values that can be supplied
187192 * additionally. (class weights , labels etc)
188193 * \sa RandomForestOptions, ProblemSpec
189194 *
190195 */
191 RandomForest(Options_t const & options = Options_t(),
196 RandomForest(Options_t const & options = Options_t(),
192197 ProblemSpec_t const & ext_param = ProblemSpec_t())
193198 :
194199 options_(options),
201206
202207 /**\brief Create RF from external source
203208 * \param treeCount Number of trees to add.
204 * \param topology_begin
209 * \param topology_begin
205210 * Iterator to a Container where the topology_ data
206211 * of the trees are stored.
207 * Iterator should support at least treeCount forward
212 * Iterator should support at least treeCount forward
208213 * iterations. (i.e. topology_end - topology_begin >= treeCount
209 * \param parameter_begin
214 * \param parameter_begin
210215 * iterator to a Container where the parameters_ data
211 * of the trees are stored. Iterator should support at
216 * of the trees are stored. Iterator should support at
212217 * least treeCount forward iterations.
213 * \param problem_spec
218 * \param problem_spec
214219 * Extrinsic parameters that specify the problem e.g.
215220 * ClassCount, featureCount etc.
216221 * \param options (optional) specify options used to train the original
217222 * Random forest. This parameter is not used anywhere
218223 * during prediction and thus is optional.
219224 *
220 */
221 /* TODO: This constructor may be replaced by a Constructor using
222 * NodeProxy iterators to encapsulate the underlying data type.
223225 */
224226 template<class TopologyIterator, class ParameterIterator>
225227 RandomForest(int treeCount,
232234 ext_param_(problem_spec),
233235 options_(options)
234236 {
235 for(unsigned int k=0; k<treeCount; ++k, ++topology_begin, ++parameter_begin)
237 /* TODO: This constructor may be replaced by a Constructor using
238 * NodeProxy iterators to encapsulate the underlying data type.
239 */
240 for(int k=0; k<treeCount; ++k, ++topology_begin, ++parameter_begin)
236241 {
237242 trees_[k].topology_ = *topology_begin;
238243 trees_[k].parameters_ = *parameter_begin;
239244 }
240245 }
241246
242 /*\}*/
247 /** @} */
243248
244249
245250 /** \name Data Access
246251 * data access interface - usage of member variables is deprecated
247 */
248
249 /*\{*/
250
252 *
253 * @{
254 */
251255
252256 /**\brief return external parameters for viewing
253257 * \return ProblemSpec_t
264268 *
265269 * \param in external parameters to be set
266270 *
267 * set external parameters explicitly.
268 * If Random Forest has not been trained the preprocessor will
269 * either ignore filling values set this way or will throw an exception
270 * if values specified manually do not match the value calculated
271 * set external parameters explicitly.
272 * If Random Forest has not been trained the preprocessor will
273 * either ignore filling values set this way or will throw an exception
274 * if values specified manually do not match the value calculated
271275 & during the preparation step.
272276 */
273277 void set_ext_param(ProblemSpec_t const & in)
274278 {
279 ignore_argument(in);
275280 vigra_precondition(ext_param_.used() == false,
276281 "RandomForest::set_ext_param():"
277282 "Random forest has been trained! Call reset()"
311316 return trees_[index];
312317 }
313318
314 /*\}*/
315
316 /**\brief return number of features used while
319 /**\brief return number of features used while
317320 * training.
318321 */
319322 int feature_count() const
320323 {
321324 return ext_param_.column_count_;
322325 }
323
324
325 /**\brief return number of features used while
326
327
328 /**\brief return number of features used while
326329 * training.
327330 *
328331 * deprecated. Use feature_count() instead.
332335 return ext_param_.column_count_;
333336 }
334337
335 /**\brief return number of classes used while
338 /**\brief return number of classes used while
336339 * training.
337340 */
338341 int class_count() const
347350 return options_.tree_count_;
348351 }
349352
350
351
353 /** @} */
354
355 /**\name Learning
356 * Following functions differ in the degree of customization
357 * allowed
358 *
359 * @{
360 */
361
362 /**\brief learn on data with custom config and random number generator
363 *
364 * \param features a N x M matrix containing N samples with M
365 * features
366 * \param response a N x D matrix containing the corresponding
367 * response. Current split functors assume D to
368 * be 1 and ignore any additional columns.
369 * This is not enforced to allow future support
370 * for uncertain labels, label independent strata etc.
371 * The Preprocessor specified during construction
372 * should be able to handle features and labels
373 * features and the labels.
374 * see also: SplitFunctor, Preprocessing
375 *
376 * \param visitor visitor which is to be applied after each split,
377 * tree and at the end. Use rf_default() for using
378 * default value. (No Visitors)
379 * see also: rf::visitors
380 * \param split split functor to be used to calculate each split
381 * use rf_default() for using default value. (GiniSplit)
382 * see also: rf::split
383 * \param stop
384 * predicate to be used to calculate each split
385 * use rf_default() for using default value. (EarlyStoppStd)
386 * \param random RandomNumberGenerator to be used. Use
387 * rf_default() to use default value.(RandomMT19337)
388 *
389 *
390 */
391 template <class U, class C1,
392 class U2,class C2,
393 class Split_t,
394 class Stop_t,
395 class Visitor_t,
396 class Random_t>
397 void learn( MultiArrayView<2, U, C1> const & features,
398 MultiArrayView<2, U2,C2> const & response,
399 Visitor_t visitor,
400 Split_t split,
401 Stop_t stop,
402 Random_t const & random);
403
404 template <class U, class C1,
405 class U2,class C2,
406 class Split_t,
407 class Stop_t,
408 class Visitor_t>
409 void learn( MultiArrayView<2, U, C1> const & features,
410 MultiArrayView<2, U2,C2> const & response,
411 Visitor_t visitor,
412 Split_t split,
413 Stop_t stop)
414
415 {
416 RandomNumberGenerator<> rnd = RandomNumberGenerator<>(RandomSeed);
417 learn( features,
418 response,
419 visitor,
420 split,
421 stop,
422 rnd);
423 }
424
425 template <class U, class C1, class U2,class C2, class Visitor_t>
426 void learn( MultiArrayView<2, U, C1> const & features,
427 MultiArrayView<2, U2,C2> const & labels,
428 Visitor_t visitor)
429 {
430 learn( features,
431 labels,
432 visitor,
433 rf_default(),
434 rf_default());
435 }
436
437 template <class U, class C1, class U2,class C2,
438 class Visitor_t, class Split_t>
439 void learn( MultiArrayView<2, U, C1> const & features,
440 MultiArrayView<2, U2,C2> const & labels,
441 Visitor_t visitor,
442 Split_t split)
443 {
444 learn( features,
445 labels,
446 visitor,
447 split,
448 rf_default());
449 }
450
451 /**\brief learn on data with default configuration
452 *
453 * \param features a N x M matrix containing N samples with M
454 * features
455 * \param labels a N x D matrix containing the corresponding
456 * N labels. Current split functors assume D to
457 * be 1 and ignore any additional columns.
458 * this is not enforced to allow future support
459 * for uncertain labels.
460 *
461 * learning is done with:
462 *
463 * \sa rf::split, EarlyStoppStd
464 *
465 * - Randomly seeded random number generator
466 * - default gini split functor as described by Breiman
467 * - default The standard early stopping criterion
468 */
469 template <class U, class C1, class U2,class C2>
470 void learn( MultiArrayView<2, U, C1> const & features,
471 MultiArrayView<2, U2,C2> const & labels)
472 {
473 learn( features,
474 labels,
475 rf_default(),
476 rf_default(),
477 rf_default());
478 }
479
480
352481 template<class U,class C1,
353482 class U2, class C2,
354483 class Split_t,
369498 MultiArrayView<2, U2,C2> const & labels,int new_start_index,bool adjust_thresholds=false)
370499 {
371500 RandomNumberGenerator<> rnd = RandomNumberGenerator<>(RandomSeed);
372 onlineLearn(features,
373 labels,
501 onlineLearn(features,
502 labels,
374503 new_start_index,
375 rf_default(),
376 rf_default(),
504 rf_default(),
505 rf_default(),
377506 rf_default(),
378507 rnd,
379508 adjust_thresholds);
408537 rnd);
409538 }
410539
411
412 /**\name Learning
413 * Following functions differ in the degree of customization
414 * allowed
415 */
416 /*\{*/
417 /**\brief learn on data with custom config and random number generator
418 *
419 * \param features a N x M matrix containing N samples with M
420 * features
421 * \param response a N x D matrix containing the corresponding
422 * response. Current split functors assume D to
423 * be 1 and ignore any additional columns.
424 * This is not enforced to allow future support
425 * for uncertain labels, label independent strata etc.
426 * The Preprocessor specified during construction
427 * should be able to handle features and labels
428 * features and the labels.
429 * see also: SplitFunctor, Preprocessing
430 *
431 * \param visitor visitor which is to be applied after each split,
432 * tree and at the end. Use rf_default() for using
433 * default value. (No Visitors)
434 * see also: rf::visitors
435 * \param split split functor to be used to calculate each split
436 * use rf_default() for using default value. (GiniSplit)
437 * see also: rf::split
438 * \param stop
439 * predicate to be used to calculate each split
440 * use rf_default() for using default value. (EarlyStoppStd)
441 * \param random RandomNumberGenerator to be used. Use
442 * rf_default() to use default value.(RandomMT19337)
443 *
444 *
445 */
446 template <class U, class C1,
447 class U2,class C2,
448 class Split_t,
449 class Stop_t,
450 class Visitor_t,
451 class Random_t>
452 void learn( MultiArrayView<2, U, C1> const & features,
453 MultiArrayView<2, U2,C2> const & response,
454 Visitor_t visitor,
455 Split_t split,
456 Stop_t stop,
457 Random_t const & random);
458
459 template <class U, class C1,
460 class U2,class C2,
461 class Split_t,
462 class Stop_t,
463 class Visitor_t>
464 void learn( MultiArrayView<2, U, C1> const & features,
465 MultiArrayView<2, U2,C2> const & response,
466 Visitor_t visitor,
467 Split_t split,
468 Stop_t stop)
469
470 {
471 RandomNumberGenerator<> rnd = RandomNumberGenerator<>(RandomSeed);
472 learn( features,
473 response,
474 visitor,
475 split,
476 stop,
477 rnd);
478 }
479
480 template <class U, class C1, class U2,class C2, class Visitor_t>
481 void learn( MultiArrayView<2, U, C1> const & features,
482 MultiArrayView<2, U2,C2> const & labels,
483 Visitor_t visitor)
484 {
485 learn( features,
486 labels,
487 visitor,
488 rf_default(),
489 rf_default());
490 }
491
492 template <class U, class C1, class U2,class C2,
493 class Visitor_t, class Split_t>
494 void learn( MultiArrayView<2, U, C1> const & features,
495 MultiArrayView<2, U2,C2> const & labels,
496 Visitor_t visitor,
497 Split_t split)
498 {
499 learn( features,
500 labels,
501 visitor,
502 split,
503 rf_default());
504 }
505
506 /**\brief learn on data with default configuration
507 *
508 * \param features a N x M matrix containing N samples with M
509 * features
510 * \param labels a N x D matrix containing the corresponding
511 * N labels. Current split functors assume D to
512 * be 1 and ignore any additional columns.
513 * this is not enforced to allow future support
514 * for uncertain labels.
515 *
516 * learning is done with:
517 *
518 * \sa rf::split, EarlyStoppStd
519 *
520 * - Randomly seeded random number generator
521 * - default gini split functor as described by Breiman
522 * - default The standard early stopping criterion
523 */
524 template <class U, class C1, class U2,class C2>
525 void learn( MultiArrayView<2, U, C1> const & features,
526 MultiArrayView<2, U2,C2> const & labels)
527 {
528 learn( features,
529 labels,
530 rf_default(),
531 rf_default(),
532 rf_default());
533 }
534 /*\}*/
535
536
537
538 /**\name prediction
539 */
540 /*\{*/
540 /** @} */
541
542
543
544 /**\name Prediction
545 *
546 * @{
547 */
548
541549 /** \brief predict a label given a feature.
542550 *
543551 * \param features: a 1 by featureCount matrix containing
547555 * \return double value representing class. You can use the
548556 * predictLabels() function together with the
549557 * rf.external_parameter().class_type_ attribute
550 * to get back the same type used during learning.
558 * to get back the same type used during learning.
551559 */
552560 template <class U, class C, class Stop>
553561 LabelType predictLabel(MultiArrayView<2, U, C>const & features, Stop & stop) const;
555563 template <class U, class C>
556564 LabelType predictLabel(MultiArrayView<2, U, C>const & features)
557565 {
558 return predictLabel(features, rf_default());
559 }
566 return predictLabel(features, rf_default());
567 }
560568 /** \brief predict a label with features and class priors
561569 *
562570 * \param features: same as above.
643651 * save class probabilities
644652 * \param stop earlystopping criterion
645653 * \sa EarlyStopping
646
654
647655 When a row of the feature array contains an NaN, the corresponding instance
648 cannot belong to any of the classes. The corresponding row in the probability
656 cannot belong to any of the classes. The corresponding row in the probability
649657 array will therefore contain all zeros.
650658 */
651659 template <class U, class C1, class T, class C2, class Stop>
666674 void predictProbabilities(MultiArrayView<2, U, C1>const & features,
667675 MultiArrayView<2, T, C2> & prob) const
668676 {
669 predictProbabilities(features, prob, rf_default());
670 }
677 predictProbabilities(features, prob, rf_default());
678 }
671679
672680 template <class U, class C1, class T, class C2>
673681 void predictRaw(MultiArrayView<2, U, C1>const & features,
674682 MultiArrayView<2, T, C2> & prob) const;
675683
676684
677 /*\}*/
685 /** @} */
678686
679687 };
680688
707715 // Value Chooser chooses second argument as value if first argument
708716 // is of type RF_DEFAULT. (thanks to template magic - don't care about
709717 // it - just smile and wave.
710
711 #define RF_CHOOSER(type_) detail::Value_Chooser<type_, Default_##type_>
718
719 #define RF_CHOOSER(type_) detail::Value_Chooser<type_, Default_##type_>
712720 Default_Stop_t default_stop(options_);
713721 typename RF_CHOOSER(Stop_t)::type stop
714 = RF_CHOOSER(Stop_t)::choose(stop_, default_stop);
722 = RF_CHOOSER(Stop_t)::choose(stop_, default_stop);
715723 Default_Split_t default_split;
716 typename RF_CHOOSER(Split_t)::type split
717 = RF_CHOOSER(Split_t)::choose(split_, default_split);
724 typename RF_CHOOSER(Split_t)::type split
725 = RF_CHOOSER(Split_t)::choose(split_, default_split);
718726 rf::visitors::StopVisiting stopvisiting;
719727 typedef rf::visitors::detail::VisitorNode
720 <rf::visitors::OnlineLearnVisitor,
721 typename RF_CHOOSER(Visitor_t)::type>
722 IntermedVis;
728 <rf::visitors::OnlineLearnVisitor,
729 typename RF_CHOOSER(Visitor_t)::type>
730 IntermedVis;
723731 IntermedVis
724732 visitor(online_visitor_, RF_CHOOSER(Visitor_t)::choose(visitor_, stopvisiting));
725733 #undef RF_CHOOSER
835843 Random_t & random)
836844 {
837845 using namespace rf;
838
839
846
847
840848 typedef UniformIntRandomFunctor<Random_t>
841849 RandFunctor_t;
842850
843851 // See rf_preprocessing.hxx for more info on this
844852 ext_param_.class_count_=0;
845853 typedef Processor<PreprocessorTag,LabelType, U, C1, U2, C2> Preprocessor_t;
846
854
847855 // default values and initialization
848856 // Value Chooser chooses second argument as value if first argument
849857 // is of type RF_DEFAULT. (thanks to template magic - don't care about
850858 // it - just smile and wave.
851
852 #define RF_CHOOSER(type_) detail::Value_Chooser<type_, Default_##type_>
859
860 #define RF_CHOOSER(type_) detail::Value_Chooser<type_, Default_##type_>
853861 Default_Stop_t default_stop(options_);
854862 typename RF_CHOOSER(Stop_t)::type stop
855 = RF_CHOOSER(Stop_t)::choose(stop_, default_stop);
863 = RF_CHOOSER(Stop_t)::choose(stop_, default_stop);
856864 Default_Split_t default_split;
857 typename RF_CHOOSER(Split_t)::type split
858 = RF_CHOOSER(Split_t)::choose(split_, default_split);
865 typename RF_CHOOSER(Split_t)::type split
866 = RF_CHOOSER(Split_t)::choose(split_, default_split);
859867 rf::visitors::StopVisiting stopvisiting;
860868 typedef rf::visitors::detail::VisitorNode
861 <rf::visitors::OnlineLearnVisitor,
862 typename RF_CHOOSER(Visitor_t)::type> IntermedVis;
869 <rf::visitors::OnlineLearnVisitor,
870 typename RF_CHOOSER(Visitor_t)::type> IntermedVis;
863871 IntermedVis
864872 visitor(online_visitor_, RF_CHOOSER(Visitor_t)::choose(visitor_, stopvisiting));
865873 #undef RF_CHOOSER
947955
948956 vigra_precondition(features.shape(0) == response.shape(0),
949957 "RandomForest::learn(): shape mismatch between features and response.");
950
958
951959 // default values and initialization
952960 // Value Chooser chooses second argument as value if first argument
953961 // is of type RF_DEFAULT. (thanks to template magic - don't care about
954962 // it - just smile and wave).
955
956 #define RF_CHOOSER(type_) detail::Value_Chooser<type_, Default_##type_>
963
964 #define RF_CHOOSER(type_) detail::Value_Chooser<type_, Default_##type_>
957965 Default_Stop_t default_stop(options_);
958966 typename RF_CHOOSER(Stop_t)::type stop
959 = RF_CHOOSER(Stop_t)::choose(stop_, default_stop);
967 = RF_CHOOSER(Stop_t)::choose(stop_, default_stop);
960968 Default_Split_t default_split;
961 typename RF_CHOOSER(Split_t)::type split
962 = RF_CHOOSER(Split_t)::choose(split_, default_split);
969 typename RF_CHOOSER(Split_t)::type split
970 = RF_CHOOSER(Split_t)::choose(split_, default_split);
963971 rf::visitors::StopVisiting stopvisiting;
964972 typedef rf::visitors::detail::VisitorNode<
965 rf::visitors::OnlineLearnVisitor,
966 typename RF_CHOOSER(Visitor_t)::type> IntermedVis;
973 rf::visitors::OnlineLearnVisitor,
974 typename RF_CHOOSER(Visitor_t)::type> IntermedVis;
967975 IntermedVis
968976 visitor(online_visitor_, RF_CHOOSER(Visitor_t)::choose(visitor_, stopvisiting));
969977 #undef RF_CHOOSER
10001008
10011009 visitor.visit_at_beginning(*this, preprocessor);
10021010 // THE MAIN EFFING RF LOOP - YEAY DUDE!
1003
1011
10041012 for(int ii = 0; ii < static_cast<int>(trees_.size()); ++ii)
10051013 {
10061014 //initialize First region/node/stack entry
10071015 sampler
1008 .sample();
1016 .sample();
10091017 StackEntry_t
10101018 first_stack_entry( sampler.sampledIndices().begin(),
10111019 sampler.sampledIndices().end(),
10871095 {
10881096 //Features are n xp
10891097 //prob is n x NumOfLabel probability for each feature in each class
1090
1098
10911099 vigra_precondition(rowCount(predictionSet.features) == rowCount(prob),
10921100 "RandomFroest::predictProbabilities():"
10931101 " Feature matrix and probability matrix size mismatch.");
12441252 "RandomForestn::predictProbabilities():"
12451253 " Probability matrix must have as many columns as there are classes.");
12461254
1247 #define RF_CHOOSER(type_) detail::Value_Chooser<type_, Default_##type_>
1255 #define RF_CHOOSER(type_) detail::Value_Chooser<type_, Default_##type_>
12481256 Default_Stop_t default_stop(options_);
12491257 typename RF_CHOOSER(Stop_t)::type & stop
1250 = RF_CHOOSER(Stop_t)::choose(stop_, default_stop);
1251 #undef RF_CHOOSER
1258 = RF_CHOOSER(Stop_t)::choose(stop_, default_stop);
1259 #undef RF_CHOOSER
12521260 stop.set_external_parameters(ext_param_, tree_count());
12531261 prob.init(NumericTraits<T>::zero());
12541262 /* This code was originally there for testing early stopping
12561264 if(tree_indices_.size() != 0)
12571265 {
12581266 std::random_shuffle(tree_indices_.begin(),
1259 tree_indices_.end());
1267 tree_indices_.end());
12601268 }
12611269 */
12621270 //Classify for each row.
12631271 for(int row=0; row < rowCount(features); ++row)
12641272 {
12651273 MultiArrayView<2, U, StridedArrayTag> currentRow(rowVector(features, row));
1266
1274
12671275 // when the features contain an NaN, the instance doesn't belong to any class
12681276 // => indicate this by returning a zero probability array.
12691277 if(detail::contains_nan(currentRow))
12711279 rowVector(prob, row).init(0.0);
12721280 continue;
12731281 }
1274
1282
12751283 ArrayVector<double>::const_iterator weights;
12761284
12771285 //totalWeight == totalVoteCount!
12931301 //every weight in totalWeight.
12941302 totalWeight += cur_w;
12951303 }
1296 if(stop.after_prediction(weights,
1304 if(stop.after_prediction(weights,
12971305 k,
12981306 rowVector(prob, row),
12991307 totalWeight))
13341342 "RandomForestn::predictProbabilities():"
13351343 " Probability matrix must have as many columns as there are classes.");
13361344
1337 #define RF_CHOOSER(type_) detail::Value_Chooser<type_, Default_##type_>
1345 #define RF_CHOOSER(type_) detail::Value_Chooser<type_, Default_##type_>
13381346 prob.init(NumericTraits<T>::zero());
13391347 /* This code was originally there for testing early stopping
13401348 * - we wanted the order of the trees to be randomized
13411349 if(tree_indices_.size() != 0)
13421350 {
13431351 std::random_shuffle(tree_indices_.begin(),
1344 tree_indices_.end());
1352 tree_indices_.end());
13451353 }
13461354 */
13471355 //Classify for each row.
13741382
13751383 }
13761384
1377 //@}
1378
13791385 } // namespace vigra
13801386
13811387 #include "random_forest/rf_algorithm.hxx"
0 /************************************************************************/
1 /* */
2 /* Copyright 2014-2015 by Ullrich Koethe and Philip Schill */
3 /* */
4 /* This file is part of the VIGRA computer vision library. */
5 /* The VIGRA Website is */
6 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
7 /* Please direct questions, bug reports, and contributions to */
8 /* ullrich.koethe@iwr.uni-heidelberg.de or */
9 /* vigra@informatik.uni-hamburg.de */
10 /* */
11 /* Permission is hereby granted, free of charge, to any person */
12 /* obtaining a copy of this software and associated documentation */
13 /* files (the "Software"), to deal in the Software without */
14 /* restriction, including without limitation the rights to use, */
15 /* copy, modify, merge, publish, distribute, sublicense, and/or */
16 /* sell copies of the Software, and to permit persons to whom the */
17 /* Software is furnished to do so, subject to the following */
18 /* conditions: */
19 /* */
20 /* The above copyright notice and this permission notice shall be */
21 /* included in all copies or substantial portions of the */
22 /* Software. */
23 /* */
24 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
25 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
26 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
27 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
28 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
29 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
30 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
32 /* */
33 /************************************************************************/
34 #ifndef VIGRA_RF3_RANDOM_FOREST_HXX
35 #define VIGRA_RF3_RANDOM_FOREST_HXX
36
37 #include <type_traits>
38 #include <thread>
39
40 #include "../multi_shape.hxx"
41 #include "../binary_forest.hxx"
42 #include "../threadpool.hxx"
43 #include "random_forest_common.hxx"
44
45
46
47 namespace vigra
48 {
49
50 namespace rf3
51 {
52
53 /********************************************************/
54 /* */
55 /* rf3::RandomForest */
56 /* */
57 /********************************************************/
58
59 /** \brief Random forest version 3.
60
61 vigra::rf3::RandomForest is typicall constructed via the factory function \ref vigra::rf3::random_forest().
62 */
63 template <typename FEATURES,
64 typename LABELS,
65 typename SPLITTESTS = LessEqualSplitTest<typename FEATURES::value_type>,
66 typename ACCTYPE = ArgMaxVectorAcc<double>>
67 class RandomForest
68 {
69 public:
70
71 typedef FEATURES Features;
72 typedef typename Features::value_type FeatureType;
73 typedef LABELS Labels;
74 typedef typename Labels::value_type LabelType;
75 typedef SPLITTESTS SplitTests;
76 typedef ACCTYPE ACC;
77 typedef typename ACC::input_type AccInputType;
78 typedef BinaryForest Graph;
79 typedef Graph::Node Node;
80
81 static ContainerTag const container_tag = VectorTag;
82
83 // FIXME:
84 // Once the support for Visual Studio 2012 is dropped, replace this struct with
85 // template <typename T>
86 // using NodeMap = PropertyMap<Node, T, container_tag>;
87 // Then the verbose typename NodeMap<T>::type, which typically shows up on NodeMap usages,
88 // can be replace with NodeMap<T>.
89 template <typename T>
90 struct NodeMap
91 {
92 typedef PropertyMap<Node, T, container_tag> type;
93 };
94
95 // Default (empty) constructor.
96 RandomForest();
97
98 // Default constructor (copy all of the given stuff).
99 RandomForest(
100 Graph const & graph,
101 typename NodeMap<SplitTests>::type const & split_tests,
102 typename NodeMap<AccInputType>::type const & node_responses,
103 ProblemSpec<LabelType> const & problem_spec
104 );
105
106 /// \brief Grow this forest by incorporating the other.
107 void merge(
108 RandomForest const & other
109 );
110
111 /// \brief Predict the given data and return the average number of split comparisons.
112 /// \note labels must be a 1-D array with size <tt>features.shape(0)</tt>.
113 void predict(
114 FEATURES const & features,
115 LABELS & labels,
116 int n_threads = -1,
117 const std::vector<size_t> & tree_indices = std::vector<size_t>()
118 ) const;
119
120 /// \brief Predict the probabilities of the given data and return the average number of split comparisons.
121 /// \note probs should have the shape (features.shape()[0], num_classes).
122 template <typename PROBS>
123 void predict_probabilities(
124 FEATURES const & features,
125 PROBS & probs,
126 int n_threads = -1,
127 const std::vector<size_t> & tree_indices = std::vector<size_t>()
128 ) const;
129
130 /// \brief For each data point in features, compute the corresponding leaf ids and return the average number of split comparisons.
131 /// \note ids should have the shape (features.shape()[0], num_trees).
132 template <typename IDS>
133 double leaf_ids(
134 FEATURES const & features,
135 IDS & ids,
136 int n_threads = -1,
137 const std::vector<size_t> tree_indices = std::vector<size_t>()
138 ) const;
139
140 /// \brief Return the number of nodes.
141 size_t num_nodes() const
142 {
143 return graph_.numNodes();
144 }
145
146 /// \brief Return the number of trees.
147 size_t num_trees() const
148 {
149 return graph_.numRoots();
150 }
151
152 /// \brief Return the number of classes.
153 size_t num_classes() const
154 {
155 return problem_spec_.num_classes_;
156 }
157
158 /// \brief Return the number of classes.
159 size_t num_features() const
160 {
161 return problem_spec_.num_features_;
162 }
163
164 /// \brief The graph structure.
165 Graph graph_;
166
167 /// \brief Contains a test for each internal node, that is used to determine whether given data goes to the left or the right child.
168 typename NodeMap<SplitTests>::type split_tests_;
169
170 /// \brief Contains the responses of each node (for example the most frequent label).
171 typename NodeMap<AccInputType>::type node_responses_;
172
173 /// \brief The specifications.
174 ProblemSpec<LabelType> problem_spec_;
175
176 /// \brief The options that were used for training.
177 RandomForestOptions options_;
178
179 private:
180
181 /// \brief Compute the leaf ids of the instances in [from, to).
182 template <typename IDS, typename INDICES>
183 double leaf_ids_impl(
184 FEATURES const & features,
185 IDS & ids,
186 size_t from,
187 size_t to,
188 INDICES const & tree_indices
189 ) const;
190
191 template<typename PROBS>
192 void predict_probabilities_impl(
193 FEATURES const & features,
194 PROBS & probs,
195 const size_t i,
196 const std::vector<size_t> & tree_indices) const;
197
198 };
199
200 template <typename FEATURES, typename LABELS, typename SPLITTESTS, typename ACC>
201 RandomForest<FEATURES, LABELS, SPLITTESTS, ACC>::RandomForest()
202 :
203 graph_(),
204 split_tests_(),
205 node_responses_(),
206 problem_spec_()
207 {}
208
209 template <typename FEATURES, typename LABELS, typename SPLITTESTS, typename ACC>
210 RandomForest<FEATURES, LABELS, SPLITTESTS, ACC>::RandomForest(
211 Graph const & graph,
212 typename NodeMap<SplitTests>::type const & split_tests,
213 typename NodeMap<AccInputType>::type const & node_responses,
214 ProblemSpec<LabelType> const & problem_spec
215 ) :
216 graph_(graph),
217 split_tests_(split_tests),
218 node_responses_(node_responses),
219 problem_spec_(problem_spec)
220 {}
221
222 template <typename FEATURES, typename LABELS, typename SPLITTESTS, typename ACC>
223 void RandomForest<FEATURES, LABELS, SPLITTESTS, ACC>::merge(
224 RandomForest const & other
225 ){
226 vigra_precondition(problem_spec_ == other.problem_spec_,
227 "RandomForest::merge(): You cannot merge with different problem specs.");
228
229 // FIXME: Eventually compare the options and only fix if the forests are compatible.
230
231 size_t const offset = num_nodes();
232 graph_.merge(other.graph_);
233 for (auto const & p : other.split_tests_)
234 {
235 split_tests_.insert(Node(p.first.id()+offset), p.second);
236 }
237 for (auto const & p : other.node_responses_)
238 {
239 node_responses_.insert(Node(p.first.id()+offset), p.second);
240 }
241 }
242
243 // FIXME TODO we don't support the selection of tree indices any more in predict_probabilities, might be a good idea
244 // to re-enable this.
245 template <typename FEATURES, typename LABELS, typename SPLITTESTS, typename ACC>
246 void RandomForest<FEATURES, LABELS, SPLITTESTS, ACC>::predict(
247 FEATURES const & features,
248 LABELS & labels,
249 int n_threads,
250 const std::vector<size_t> & tree_indices
251 ) const {
252 vigra_precondition(features.shape()[0] == labels.shape()[0],
253 "RandomForest::predict(): Shape mismatch between features and labels.");
254 vigra_precondition((size_t)features.shape()[1] == problem_spec_.num_features_,
255 "RandomForest::predict(): Number of features in prediction differs from training.");
256
257 MultiArray<2, double> probs(Shape2(features.shape()[0], problem_spec_.num_classes_));
258 predict_probabilities(features, probs, n_threads, tree_indices);
259 for (size_t i = 0; i < (size_t)features.shape()[0]; ++i)
260 {
261 auto const sub_probs = probs.template bind<0>(i);
262 auto it = std::max_element(sub_probs.begin(), sub_probs.end());
263 size_t const label = std::distance(sub_probs.begin(), it);
264 labels(i) = problem_spec_.distinct_classes_[label];
265 }
266 }
267
268
269 // FIXME TODO we don't support the selection of tree indices any more in predict_probabilities, might be a good idea
270 // to re-enable this.
271 template <typename FEATURES, typename LABELS, typename SPLITTESTS, typename ACC>
272 template <typename PROBS>
273 void RandomForest<FEATURES, LABELS, SPLITTESTS, ACC>::predict_probabilities(
274 FEATURES const & features,
275 PROBS & probs,
276 int n_threads,
277 const std::vector<size_t> & tree_indices
278 ) const {
279 vigra_precondition(features.shape()[0] == probs.shape()[0],
280 "RandomForest::predict_probabilities(): Shape mismatch between features and probabilities.");
281 vigra_precondition((size_t)features.shape()[1] == problem_spec_.num_features_,
282 "RandomForest::predict_probabilities(): Number of features in prediction differs from training.");
283 vigra_precondition((size_t)probs.shape()[1] == problem_spec_.num_classes_,
284 "RandomForest::predict_probabilities(): Number of labels in probabilities differs from training.");
285
286 // By default, actual_tree_indices is empty. In that case we want to use all trees.
287 // We need to make a copy. I really don't know how the old code did compile...
288 std::vector<size_t> tree_indices_cpy(tree_indices);
289 if (tree_indices_cpy.size() == 0)
290 {
291 tree_indices_cpy.resize(graph_.numRoots());
292 std::iota(tree_indices_cpy.begin(), tree_indices_cpy.end(), 0);
293 }
294 else {
295 // Check the tree indices.
296 std::sort(tree_indices_cpy.begin(), tree_indices_cpy.end());
297 tree_indices_cpy.erase(std::unique(tree_indices_cpy.begin(), tree_indices_cpy.end()), tree_indices_cpy.end());
298 for (auto i : tree_indices_cpy)
299 vigra_precondition(i < graph_.numRoots(), "RandomForest::leaf_ids(): Tree index out of range.");
300 }
301
302 size_t const num_instances = features.shape()[0];
303
304 if (n_threads == -1)
305 n_threads = std::thread::hardware_concurrency();
306 if (n_threads < 1)
307 n_threads = 1;
308
309 parallel_foreach(
310 n_threads,
311 num_instances,
312 [&features,&probs,&tree_indices_cpy,this](size_t, size_t i) {
313 this->predict_probabilities_impl(features, probs, i, tree_indices_cpy);
314 }
315 );
316 }
317
318 template <typename FEATURES, typename LABELS, typename SPLITTESTS, typename ACC>
319 template <typename PROBS>
320 void RandomForest<FEATURES, LABELS, SPLITTESTS, ACC>::predict_probabilities_impl(
321 FEATURES const & features,
322 PROBS & probs,
323 const size_t i,
324 const std::vector<size_t> & tree_indices
325 ) const {
326
327 // instantiate the accumulation function and the vector to store the tree node results
328 ACC acc;
329 std::vector<AccInputType> tree_results;
330 tree_results.reserve(tree_indices.size());
331 auto const sub_features = features.template bind<0>(i);
332
333 // loop over the trees
334 for (auto k : tree_indices)
335 {
336 Node node = graph_.getRoot(k);
337 while (graph_.outDegree(node) > 0)
338 {
339 size_t const child_index = split_tests_.at(node)(sub_features);
340 node = graph_.getChild(node, child_index);
341 }
342 tree_results.emplace_back(node_responses_.at(node));
343 }
344
345 // write the tree results into the probabilities
346 auto sub_probs = probs.template bind<0>(i);
347 acc(tree_results.begin(), tree_results.end(), sub_probs.begin());
348 }
349
350 template <typename FEATURES, typename LABELS, typename SPLITTESTS, typename ACC>
351 template <typename IDS>
352 double RandomForest<FEATURES, LABELS, SPLITTESTS, ACC>::leaf_ids(
353 FEATURES const & features,
354 IDS & ids,
355 int n_threads,
356 std::vector<size_t> tree_indices
357 ) const {
358 vigra_precondition(features.shape()[0] == ids.shape()[0],
359 "RandomForest::leaf_ids(): Shape mismatch between features and probabilities.");
360 vigra_precondition((size_t)features.shape()[1] == problem_spec_.num_features_,
361 "RandomForest::leaf_ids(): Number of features in prediction differs from training.");
362 vigra_precondition(ids.shape()[1] == graph_.numRoots(),
363 "RandomForest::leaf_ids(): Leaf array has wrong shape.");
364
365 // Check the tree indices.
366 std::sort(tree_indices.begin(), tree_indices.end());
367 tree_indices.erase(std::unique(tree_indices.begin(), tree_indices.end()), tree_indices.end());
368 for (auto i : tree_indices)
369 vigra_precondition(i < graph_.numRoots(), "RandomForest::leaf_ids(): Tree index out of range.");
370
371 // By default, actual_tree_indices is empty. In that case we want to use all trees.
372 if (tree_indices.size() == 0)
373 {
374 tree_indices.resize(graph_.numRoots());
375 std::iota(tree_indices.begin(), tree_indices.end(), 0);
376 }
377
378 size_t const num_instances = features.shape()[0];
379 if (n_threads == -1)
380 n_threads = std::thread::hardware_concurrency();
381 if (n_threads < 1)
382 n_threads = 1;
383 std::vector<double> split_comparisons(n_threads, 0.0);
384 std::vector<size_t> indices(num_instances);
385 std::iota(indices.begin(), indices.end(), 0);
386 std::fill(ids.begin(), ids.end(), -1);
387 parallel_foreach(
388 n_threads,
389 indices.begin(),
390 indices.end(),
391 [this, &features, &ids, &split_comparisons, &tree_indices](size_t thread_id, size_t i) {
392 split_comparisons[thread_id] += this->leaf_ids_impl(features, ids, i, i+1, tree_indices);
393 }
394 );
395
396 double const sum_split_comparisons = std::accumulate(split_comparisons.begin(), split_comparisons.end(), 0.0);
397 return sum_split_comparisons / features.shape()[0];
398 }
399
400 template <typename FEATURES, typename LABELS, typename SPLITTESTS, typename ACC>
401 template <typename IDS, typename INDICES>
402 double RandomForest<FEATURES, LABELS, SPLITTESTS, ACC>::leaf_ids_impl(
403 FEATURES const & features,
404 IDS & ids,
405 size_t from,
406 size_t to,
407 INDICES const & tree_indices
408 ) const {
409 vigra_precondition(features.shape()[0] == ids.shape()[0],
410 "RandomForest::leaf_ids_impl(): Shape mismatch between features and labels.");
411 vigra_precondition(features.shape()[1] == problem_spec_.num_features_,
412 "RandomForest::leaf_ids_impl(): Number of Features in prediction differs from training.");
413 vigra_precondition(from >= 0 && from <= to && to <= (size_t)features.shape()[0],
414 "RandomForest::leaf_ids_impl(): Indices out of range.");
415 vigra_precondition(ids.shape()[1] == graph_.numRoots(),
416 "RandomForest::leaf_ids_impl(): Leaf array has wrong shape.");
417
418 double split_comparisons = 0.0;
419 for (size_t i = from; i < to; ++i)
420 {
421 auto const sub_features = features.template bind<0>(i);
422 for (auto k : tree_indices)
423 {
424 Node node = graph_.getRoot(k);
425 while (graph_.outDegree(node) > 0)
426 {
427 size_t const child_index = split_tests_.at(node)(sub_features);
428 node = graph_.getChild(node, child_index);
429 split_comparisons += 1.0;
430 }
431 ids(i, k) = node.id();
432 }
433 }
434 return split_comparisons;
435 }
436
437
438
439 } // namespace rf3
440 } // namespace vigra
441
442 #endif
0 /************************************************************************/
1 /* */
2 /* Copyright 2014-2015 by Ullrich Koethe and Philip Schill */
3 /* */
4 /* This file is part of the VIGRA computer vision library. */
5 /* The VIGRA Website is */
6 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
7 /* Please direct questions, bug reports, and contributions to */
8 /* ullrich.koethe@iwr.uni-heidelberg.de or */
9 /* vigra@informatik.uni-hamburg.de */
10 /* */
11 /* Permission is hereby granted, free of charge, to any person */
12 /* obtaining a copy of this software and associated documentation */
13 /* files (the "Software"), to deal in the Software without */
14 /* restriction, including without limitation the rights to use, */
15 /* copy, modify, merge, publish, distribute, sublicense, and/or */
16 /* sell copies of the Software, and to permit persons to whom the */
17 /* Software is furnished to do so, subject to the following */
18 /* conditions: */
19 /* */
20 /* The above copyright notice and this permission notice shall be */
21 /* included in all copies or substantial portions of the */
22 /* Software. */
23 /* */
24 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
25 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
26 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
27 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
28 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
29 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
30 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
32 /* */
33 /************************************************************************/
34 #ifndef VIGRA_RF3_COMMON_HXX
35 #define VIGRA_RF3_COMMON_HXX
36
37 #include <iterator>
38 #include <type_traits>
39 #include <cmath>
40 #include <numeric>
41
42 #include "../multi_array.hxx"
43 #include "../mathutil.hxx"
44
45 namespace vigra
46 {
47
48 namespace rf3
49 {
50
51 /** \addtogroup MachineLearning
52 **/
53 //@{
54
55 template <typename T>
56 struct LessEqualSplitTest
57 {
58 public:
59 LessEqualSplitTest(size_t dim = 0, T const & val = 0)
60 :
61 dim_(dim),
62 val_(val)
63 {}
64
65 template<typename FEATURES>
66 size_t operator()(FEATURES const & features) const
67 {
68 return features(dim_) <= val_ ? 0 : 1;
69 }
70
71 size_t dim_;
72 T val_;
73 };
74
75
76
77 struct ArgMaxAcc
78 {
79 public:
80 typedef size_t input_type;
81
82 template <typename ITER, typename OUTITER>
83 void operator()(ITER begin, ITER end, OUTITER out)
84 {
85 std::fill(buffer_.begin(), buffer_.end(), 0);
86 size_t max_v = 0;
87 size_t n = 0;
88 for (ITER it = begin; it != end; ++it)
89 {
90 size_t const v = *it;
91 if (v >= buffer_.size())
92 {
93 buffer_.resize(v+1, 0);
94 }
95 ++buffer_[v];
96 ++n;
97 max_v = std::max(max_v, v);
98 }
99 for (size_t i = 0; i <= max_v; ++i)
100 {
101 *out = buffer_[i] / static_cast<double>(n);
102 ++out;
103 }
104 }
105 private:
106 std::vector<size_t> buffer_;
107 };
108
109
110
111 template <typename VALUETYPE>
112 struct ArgMaxVectorAcc
113 {
114 public:
115 typedef VALUETYPE value_type;
116 typedef std::vector<value_type> input_type;
117 template <typename ITER, typename OUTITER>
118 void operator()(ITER begin, ITER end, OUTITER out)
119 {
120 std::fill(buffer_.begin(), buffer_.end(), 0);
121 size_t max_v = 0;
122 for (ITER it = begin; it != end; ++it)
123 {
124 input_type const & vec = *it;
125 if (vec.size() >= buffer_.size())
126 {
127 buffer_.resize(vec.size(), 0);
128 }
129 value_type const n = std::accumulate(vec.begin(), vec.end(), static_cast<value_type>(0));
130 for (size_t i = 0; i < vec.size(); ++i)
131 {
132 buffer_[i] += vec[i] / static_cast<double>(n);
133 }
134 max_v = std::max(vec.size()-1, max_v);
135 }
136 for (size_t i = 0; i <= max_v; ++i)
137 {
138 *out = buffer_[i];
139 ++out;
140 }
141 }
142 private:
143 std::vector<double> buffer_;
144 };
145
146
147
148 // struct LargestSumAcc
149 // {
150 // public:
151 // typedef std::vector<size_t> input_type;
152 // template <typename ITER>
153 // size_t operator()(ITER begin, ITER end)
154 // {
155 // std::fill(buffer_.begin(), buffer_.end(), 0);
156 // for (ITER it = begin; it != end; ++it)
157 // {
158 // auto const & v = *it;
159 // if (v.size() > buffer_.size())
160 // {
161 // buffer_.resize(v.size(), 0);
162 // }
163 // for (size_t i = 0; i < v.size(); ++i)
164 // {
165 // buffer_[i] += v[i];
166 // }
167 // }
168 // size_t max_label = 0;
169 // size_t max_count = 0;
170 // for (size_t i = 0; i < buffer_.size(); ++i)
171 // {
172 // if (buffer_[i] > max_count)
173 // {
174 // max_count = buffer_[i];
175 // max_label = i;
176 // }
177 // }
178 // return max_label;
179 // }
180 // private:
181 // std::vector<size_t> buffer_;
182 // };
183
184
185
186 // struct ForestGarroteAcc
187 // {
188 // public:
189 // typedef double input_type;
190 // template <typename ITER, typename OUTITER>
191 // void operator()(ITER begin, ITER end, OUTITER out)
192 // {
193 // double s = 0.0;
194 // for (ITER it = begin; it != end; ++it)
195 // {
196 // s += *it;
197 // }
198 // if (s < 0.0)
199 // s = 0.0;
200 // else if (s > 1.0)
201 // s = 1.0;
202 // *out = 1.0-s;
203 // ++out;
204 // *out = s;
205 // }
206 // };
207
208
209
210 namespace detail
211 {
212
213 /// Abstract scorer that iterates over all split candidates, uses FUNCTOR to compute a score,
214 /// and saves the split with the minimum score.
215 template <typename FUNCTOR>
216 class GeneralScorer
217 {
218 public:
219
220 typedef FUNCTOR Functor;
221
222 GeneralScorer(std::vector<double> const & priors)
223 :
224 split_found_(false),
225 best_split_(0),
226 best_dim_(0),
227 best_score_(std::numeric_limits<double>::max()),
228 priors_(priors),
229 n_total_(std::accumulate(priors.begin(), priors.end(), 0.0))
230 {}
231
232 template <typename FEATURES, typename LABELS, typename WEIGHTS, typename ITER>
233 void operator()(
234 FEATURES const & features,
235 LABELS const & labels,
236 WEIGHTS const & weights,
237 ITER begin,
238 ITER end,
239 size_t dim
240 ){
241 if (begin == end)
242 return;
243
244 Functor score;
245
246 std::vector<double> counts(priors_.size(), 0.0);
247 double n_left = 0;
248 ITER next = begin;
249 ++next;
250 for (; next != end; ++begin, ++next)
251 {
252 // Move the label from the right side to the left side.
253 size_t const left_index = *begin;
254 size_t const right_index = *next;
255 size_t const label = static_cast<size_t>(labels(left_index));
256 counts[label] += weights[left_index];
257 n_left += weights[left_index];
258
259 // Skip if there is no new split.
260 auto const left = features(left_index, dim);
261 auto const right = features(right_index, dim);
262 if (left == right)
263 continue;
264
265 // Update the score.
266 split_found_ = true;
267 double const s = score(priors_, counts, n_total_, n_left);
268 bool const better_score = s < best_score_;
269 if (better_score)
270 {
271 best_score_ = s;
272 best_split_ = 0.5*(left+right);
273 best_dim_ = dim;
274 }
275 }
276 }
277
278 bool split_found_; // whether a split was found at all
279 double best_split_; // the threshold of the best split
280 size_t best_dim_; // the dimension of the best split
281 double best_score_; // the score of the best split
282
283 private:
284
285 std::vector<double> const priors_; // the weighted number of datapoints per class
286 double const n_total_; // the weighted number of datapoints
287 };
288
289 } // namespace detail
290
291 /// \brief Functor that computes the gini score.
292 ///
293 /// This functor is typically selected indirectly by passing the value <tt>RF_GINI</tt>
294 /// to vigra::rf3::RandomForestOptions::split().
295 class GiniScore
296 {
297 public:
298 double operator()(std::vector<double> const & priors,
299 std::vector<double> const & counts, double n_total, double n_left) const
300 {
301 double const n_right = n_total - n_left;
302 double gini_left = 1.0;
303 double gini_right = 1.0;
304 for (size_t i = 0; i < counts.size(); ++i)
305 {
306 double const p_left = counts[i] / n_left;
307 double const p_right = (priors[i] - counts[i]) / n_right;
308 gini_left -= (p_left*p_left);
309 gini_right -= (p_right*p_right);
310 }
311 return n_left*gini_left + n_right*gini_right;
312 }
313
314 // needed for Gini-based variable importance calculation
315 template <typename LABELS, typename WEIGHTS, typename ITER>
316 static double region_score(LABELS const & labels, WEIGHTS const & weights, ITER begin, ITER end)
317 {
318 // Count the occurences.
319 std::vector<double> counts;
320 double total = 0.0;
321 for (auto it = begin; it != end; ++it)
322 {
323 auto const d = *it;
324 auto const lbl = labels[d];
325 if (counts.size() <= lbl)
326 {
327 counts.resize(lbl+1, 0.0);
328 }
329 counts[lbl] += weights[d];
330 total += weights[d];
331 }
332
333 // Compute the gini.
334 double gini = total;
335 for (auto x : counts)
336 {
337 gini -= x*x/total;
338 }
339 return gini;
340 }
341 };
342
343 /// \brief Functor that computes the entropy score.
344 ///
345 /// This functor is typically selected indirectly by passing the value <tt>RF_ENTROPY</tt>
346 /// to vigra::rf3::RandomForestOptions::split().
347 class EntropyScore
348 {
349 public:
350 double operator()(std::vector<double> const & priors, std::vector<double> const & counts, double n_total, double n_left) const
351 {
352 double const n_right = n_total - n_left;
353 double ig = 0;
354 for (size_t i = 0; i < counts.size(); ++i)
355 {
356 double c = counts[i];
357 if (c != 0)
358 ig -= c * std::log(c / n_left);
359
360 c = priors[i] - c;
361 if (c != 0)
362 ig -= c * std::log(c / n_right);
363 }
364 return ig;
365 }
366
367 template <typename LABELS, typename WEIGHTS, typename ITER>
368 double region_score(LABELS const & /*labels*/, WEIGHTS const & /*weights*/, ITER /*begin*/, ITER /*end*/) const
369 {
370 vigra_fail("EntropyScore::region_score(): Not implemented yet.");
371 return 0.0; // FIXME
372 }
373 };
374
375 /// \brief Functor that computes the Kolmogorov-Smirnov score.
376 ///
377 /// Actually, it reutrns the negated KSD score, because we want to minimize.
378 ///
379 /// This functor is typically selected indirectly by passing the value <tt>RF_KSD</tt>
380 /// to vigra::rf3::RandomForestOptions::split().
381 class KolmogorovSmirnovScore
382 {
383 public:
384 double operator()(std::vector<double> const & priors, std::vector<double> const & counts, double /*n_total*/, double /*n_left*/) const // Fix unused parameter warning, but leave in to not break compatibility with overall API
385 {
386 double const eps = 1e-10;
387 double nnz = 0;
388 std::vector<double> norm_counts(counts.size(), 0.0);
389 for (size_t i = 0; i < counts.size(); ++i)
390 {
391 if (priors[i] > eps)
392 {
393 norm_counts[i] = counts[i] / priors[i];
394 ++nnz;
395 }
396 }
397 if (nnz < eps)
398 return 0.0;
399
400 // NOTE to future self:
401 // In std::accumulate, it makes a huge difference whether you use 0 or 0.0 as init. Think about that before making changes.
402 double const mean = std::accumulate(norm_counts.begin(), norm_counts.end(), 0.0) / nnz;
403
404 // Compute the sum of the squared distances.
405 double ksd = 0.0;
406 for (size_t i = 0; i < norm_counts.size(); ++i)
407 {
408 if (priors[i] != 0)
409 {
410 double const v = (mean-norm_counts[i]);
411 ksd += v*v;
412 }
413 }
414 return -ksd;
415 }
416
417 template <typename LABELS, typename WEIGHTS, typename ITER>
418 double region_score(LABELS const & /*labels*/, WEIGHTS const & /*weights*/, ITER /*begin*/, ITER /*end*/) const
419 {
420 vigra_fail("KolmogorovSmirnovScore::region_score(): Region score not available for the Kolmogorov-Smirnov split.");
421 return 0.0;
422 }
423 };
424
425 // This struct holds the depth and the weighted number of datapoints per class of a single node.
426 template <typename ARR>
427 struct RFNodeDescription
428 {
429 public:
430 RFNodeDescription(size_t depth, ARR const & priors)
431 :
432 depth_(depth),
433 priors_(priors)
434 {}
435 size_t depth_;
436 ARR const & priors_;
437 };
438
439
440
441 // Return true if the given node is pure.
442 template <typename LABELS, typename ITER>
443 bool is_pure(LABELS const & /*labels*/, RFNodeDescription<ITER> const & desc)
444 {
445 bool found = false;
446 for (auto n : desc.priors_)
447 {
448 if (n > 0)
449 {
450 if (found)
451 return false;
452 else
453 found = true;
454 }
455 }
456 return true;
457 }
458
459 /// @brief Random forest 'node purity' stop criterion.
460 ///
461 /// Stop splitting a node when it contains only instanes of a single class.
462 class PurityStop
463 {
464 public:
465 template <typename LABELS, typename ITER>
466 bool operator()(LABELS const & labels, RFNodeDescription<ITER> const & desc) const
467 {
468 return is_pure(labels, desc);
469 }
470 };
471
472 /// @brief Random forest 'maximum depth' stop criterion.
473 ///
474 /// Stop splitting a node when the its depth reaches a given value or when it is pure.
475 class DepthStop
476 {
477 public:
478 /// @brief Constructor: terminate tree construction at \a max_depth.
479 DepthStop(size_t max_depth)
480 :
481 max_depth_(max_depth)
482 {}
483
484 template <typename LABELS, typename ITER>
485 bool operator()(LABELS const & labels, RFNodeDescription<ITER> const & desc) const
486 {
487 if (desc.depth_ >= max_depth_)
488 return true;
489 else
490 return is_pure(labels, desc);
491 }
492 size_t max_depth_;
493 };
494
495 /// @brief Random forest 'number of datapoints' stop criterion.
496 ///
497 /// Stop splitting a node when it contains too few instances or when it is pure.
498 class NumInstancesStop
499 {
500 public:
501 /// @brief Constructor: terminate tree construction when node contains less than \a min_n instances.
502 NumInstancesStop(size_t min_n)
503 :
504 min_n_(min_n)
505 {}
506
507 template <typename LABELS, typename ARR>
508 bool operator()(LABELS const & labels, RFNodeDescription<ARR> const & desc) const
509 {
510 typedef typename ARR::value_type value_type;
511 if (std::accumulate(desc.priors_.begin(), desc.priors_.end(), static_cast<value_type>(0)) <= min_n_)
512 return true;
513 else
514 return is_pure(labels, desc);
515 }
516 size_t min_n_;
517 };
518
519 /// @brief Random forest 'node complexity' stop criterion.
520 ///
521 /// Stop splitting a node when it allows for too few different data arrangements.
522 /// This includes purity, which offers only a sinlge data arrangement.
523 class NodeComplexityStop
524 {
525 public:
526 /// @brief Constructor: stop when fewer than <tt>1/tau</tt> label arrangements are possible.
527 NodeComplexityStop(double tau = 0.001)
528 :
529 logtau_(std::log(tau))
530 {
531 vigra_precondition(tau > 0 && tau < 1, "NodeComplexityStop(): Tau must be in the open interval (0, 1).");
532 }
533
534 template <typename LABELS, typename ARR>
535 bool operator()(LABELS const & /*labels*/, RFNodeDescription<ARR> const & desc) // Fix unused parameter, but leave in for API compatability
536 {
537 typedef typename ARR::value_type value_type;
538
539 // Count the labels.
540 size_t const total = std::accumulate(desc.priors_.begin(), desc.priors_.end(), static_cast<value_type>(0));
541
542 // Compute log(prod_k(n_k!)).
543 size_t nnz = 0;
544 double lg = 0.0;
545 for (auto v : desc.priors_)
546 {
547 if (v > 0)
548 {
549 ++nnz;
550 lg += loggamma(static_cast<double>(v+1));
551 }
552 }
553 lg += loggamma(static_cast<double>(nnz+1));
554 lg -= loggamma(static_cast<double>(total+1));
555 if (nnz <= 1)
556 return true;
557
558 return lg >= logtau_;
559 }
560
561 double logtau_;
562 };
563
564 enum RandomForestOptionTags
565 {
566 RF_SQRT,
567 RF_LOG,
568 RF_CONST,
569 RF_ALL,
570 RF_GINI,
571 RF_ENTROPY,
572 RF_KSD
573 };
574
575
576 /** \brief Options class for \ref vigra::rf3::RandomForest version 3.
577
578 <b>\#include</b> \<vigra/random_forest_3.hxx\><br/>
579 Namespace: vigra::rf3
580 */
581 class RandomForestOptions
582 {
583 public:
584
585 RandomForestOptions()
586 :
587 tree_count_(255),
588 features_per_node_(0),
589 features_per_node_switch_(RF_SQRT),
590 bootstrap_sampling_(true),
591 resample_count_(0),
592 split_(RF_GINI),
593 max_depth_(0),
594 node_complexity_tau_(-1),
595 min_num_instances_(1),
596 use_stratification_(false),
597 n_threads_(-1),
598 class_weights_()
599 {}
600
601 /**
602 * @brief The number of trees.
603 *
604 * Default: 255
605 */
606 RandomForestOptions & tree_count(int p_tree_count)
607 {
608 tree_count_ = p_tree_count;
609 return *this;
610 }
611
612 /**
613 * @brief The number of features that are considered when computing the split.
614 *
615 * @param p_features_per_node the number of features
616 *
617 * Default: use sqrt of the total number of features.
618 */
619 RandomForestOptions & features_per_node(int p_features_per_node)
620 {
621 features_per_node_switch_ = RF_CONST;
622 features_per_node_ = p_features_per_node;
623 return *this;
624 }
625
626 /**
627 * @brief The number of features that are considered when computing the split.
628 *
629 * @param p_features_per_node_switch possible values: <br/>
630 <tt>vigra::rf3::RF_SQRT</tt> (use square root of total number of features, recommended for classification), <br/>
631 <tt>vigra::rf3::RF_LOG</tt> (use logarithm of total number of features, recommended for regression), <br/>
632 <tt>vigra::rf3::RF_ALL</tt> (use all features).
633 *
634 * Default: <tt>vigra::rf3::RF_SQRT</tt>
635 */
636 RandomForestOptions & features_per_node(RandomForestOptionTags p_features_per_node_switch)
637 {
638 vigra_precondition(p_features_per_node_switch == RF_SQRT ||
639 p_features_per_node_switch == RF_LOG ||
640 p_features_per_node_switch == RF_ALL,
641 "RandomForestOptions::features_per_node(): Input must be RF_SQRT, RF_LOG or RF_ALL.");
642 features_per_node_switch_ = p_features_per_node_switch;
643 return *this;
644 }
645
646 /**
647 * @brief Use bootstrap sampling.
648 *
649 * Default: true
650 */
651 RandomForestOptions & bootstrap_sampling(bool b)
652 {
653 bootstrap_sampling_ = b;
654 return *this;
655 }
656
657 /**
658 * @brief If resample_count is greater than zero, the split in each node is computed using only resample_count data points.
659 *
660 * Default: \a n = 0 (don't resample in every node)
661 */
662 RandomForestOptions & resample_count(size_t n)
663 {
664 resample_count_ = n;
665 bootstrap_sampling_ = false;
666 return *this;
667 }
668
669 /**
670 * @brief The split criterion.
671 *
672 * @param p_split possible values: <br/>
673 <tt>vigra::rf3::RF_GINI</tt> (use Gini criterion, \ref vigra::rf3::GiniScorer), <br/>
674 <tt>vigra::rf3::RF_ENTROPY</tt> (use entropy criterion, \ref vigra::rf3::EntropyScorer), <br/>
675 <tt>vigra::rf3::RF_KSD</tt> (use Kolmogorov-Smirnov criterion, \ref vigra::rf3::KSDScorer).
676 *
677 * Default: <tt>vigra::rf3::RF_GINI</tt>
678 */
679 RandomForestOptions & split(RandomForestOptionTags p_split)
680 {
681 vigra_precondition(p_split == RF_GINI ||
682 p_split == RF_ENTROPY ||
683 p_split == RF_KSD,
684 "RandomForestOptions::split(): Input must be RF_GINI, RF_ENTROPY or RF_KSD.");
685 split_ = p_split;
686 return *this;
687 }
688
689 /**
690 * @brief Do not split a node if its depth is greater or equal to max_depth.
691 *
692 * Default: \a d = 0 (don't use depth as a termination criterion)
693 */
694 RandomForestOptions & max_depth(size_t d)
695 {
696 max_depth_ = d;
697 return *this;
698 }
699
700 /**
701 * @brief Value of the node complexity termination criterion.
702 *
703 * Default: \a tau = -1 (don't use complexity as a termination criterion)
704 */
705 RandomForestOptions & node_complexity_tau(double tau)
706 {
707 node_complexity_tau_ = tau;
708 return *this;
709 }
710
711 /**
712 * @brief Do not split a node if it contains less than min_num_instances data points.
713 *
714 * Default: \a n = 1 (don't use instance count as a termination criterion)
715 */
716 RandomForestOptions & min_num_instances(size_t n)
717 {
718 min_num_instances_ = n;
719 return *this;
720 }
721
722 /**
723 * @brief Use stratification when creating the bootstrap samples.
724 *
725 * That is, preserve the proportion between the number of class instances exactly
726 * rather than on average.
727 *
728 * Default: false
729 */
730 RandomForestOptions & use_stratification(bool b)
731 {
732 use_stratification_ = b;
733 return *this;
734 }
735
736 /**
737 * @brief The number of threads that are used in training.
738 *
739 * \a n = -1 means use number of cores, \a n = 0 means single-threaded training.
740 *
741 * Default: \a n = -1 (use as many threads as there are cores in the machine).
742 */
743 RandomForestOptions & n_threads(int n)
744 {
745 n_threads_ = n;
746 return *this;
747 }
748
749 /**
750 * @brief Each datapoint is weighted by its class weight. By default, each class has weight 1.
751 * @details
752 * The classes in the random forest training have to follow a strict ordering. The weights must be given in that order.
753 * Example:
754 * You have the classes 3, 8 and 5 and use the vector {0.2, 0.3, 0.4} for the class weights.
755 * The ordering of the classes is 3, 5, 8, so class 3 will get weight 0.2, class 5 will get weight 0.3
756 * and class 8 will get weight 0.4.
757 */
758 RandomForestOptions & class_weights(std::vector<double> const & v)
759 {
760 class_weights_ = v;
761 return *this;
762 }
763
764 /**
765 * @brief Get the actual number of features per node.
766 *
767 * @param total the total number of features
768 *
769 * This function is normally only called internally before training is started.
770 */
771 size_t get_features_per_node(size_t total) const
772 {
773 if (features_per_node_switch_ == RF_SQRT)
774 return std::ceil(std::sqrt(total));
775 else if (features_per_node_switch_ == RF_LOG)
776 return std::ceil(std::log(total));
777 else if (features_per_node_switch_ == RF_CONST)
778 return features_per_node_;
779 else if (features_per_node_switch_ == RF_ALL)
780 return total;
781 vigra_fail("RandomForestOptions::get_features_per_node(): Unknown switch.");
782 return 0;
783 }
784
785 int tree_count_;
786 int features_per_node_;
787 RandomForestOptionTags features_per_node_switch_;
788 bool bootstrap_sampling_;
789 size_t resample_count_;
790 RandomForestOptionTags split_;
791 size_t max_depth_;
792 double node_complexity_tau_;
793 size_t min_num_instances_;
794 bool use_stratification_;
795 int n_threads_;
796 std::vector<double> class_weights_;
797
798 };
799
800
801
802 template <typename LabelType>
803 class ProblemSpec
804 {
805 public:
806
807 ProblemSpec()
808 :
809 num_features_(0),
810 num_instances_(0),
811 num_classes_(0),
812 distinct_classes_(),
813 actual_mtry_(0),
814 actual_msample_(0)
815 {}
816
817 ProblemSpec & num_features(size_t n)
818 {
819 num_features_ = n;
820 return *this;
821 }
822
823 ProblemSpec & num_instances(size_t n)
824 {
825 num_instances_ = n;
826 return *this;
827 }
828
829 ProblemSpec & num_classes(size_t n)
830 {
831 num_classes_ = n;
832 return *this;
833 }
834
835 ProblemSpec & distinct_classes(std::vector<LabelType> v)
836 {
837 distinct_classes_ = v;
838 num_classes_ = v.size();
839 return *this;
840 }
841
842 ProblemSpec & actual_mtry(size_t m)
843 {
844 actual_mtry_ = m;
845 return *this;
846 }
847
848 ProblemSpec & actual_msample(size_t m)
849 {
850 actual_msample_ = m;
851 return *this;
852 }
853
854 bool operator==(ProblemSpec const & other) const
855 {
856 #define COMPARE(field) if (field != other.field) return false;
857 COMPARE(num_features_);
858 COMPARE(num_instances_);
859 COMPARE(num_classes_);
860 COMPARE(distinct_classes_);
861 COMPARE(actual_mtry_);
862 COMPARE(actual_msample_);
863 #undef COMPARE
864 return true;
865 }
866
867 size_t num_features_;
868 size_t num_instances_;
869 size_t num_classes_;
870 std::vector<LabelType> distinct_classes_;
871 size_t actual_mtry_;
872 size_t actual_msample_;
873
874 };
875
876 //@}
877
878 } // namespace rf3
879
880 } // namespace vigra
881
882 #endif
883
0 /************************************************************************/
1 /* */
2 /* Copyright 2014-2015 by Ullrich Koethe and Philip Schill */
3 /* */
4 /* This file is part of the VIGRA computer vision library. */
5 /* The VIGRA Website is */
6 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
7 /* Please direct questions, bug reports, and contributions to */
8 /* ullrich.koethe@iwr.uni-heidelberg.de or */
9 /* vigra@informatik.uni-hamburg.de */
10 /* */
11 /* Permission is hereby granted, free of charge, to any person */
12 /* obtaining a copy of this software and associated documentation */
13 /* files (the "Software"), to deal in the Software without */
14 /* restriction, including without limitation the rights to use, */
15 /* copy, modify, merge, publish, distribute, sublicense, and/or */
16 /* sell copies of the Software, and to permit persons to whom the */
17 /* Software is furnished to do so, subject to the following */
18 /* conditions: */
19 /* */
20 /* The above copyright notice and this permission notice shall be */
21 /* included in all copies or substantial portions of the */
22 /* Software. */
23 /* */
24 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
25 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
26 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
27 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
28 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
29 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
30 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
32 /* */
33 /************************************************************************/
34 #ifndef VIGRA_RF3_VISITORS_HXX
35 #define VIGRA_RF3_VISITORS_HXX
36
37 #include <vector>
38 #include <memory>
39 #include "../multi_array.hxx"
40 #include "../multi_shape.hxx"
41 #include <typeinfo>
42
43
44 namespace vigra
45 {
46 namespace rf3
47 {
48
49 /**
50 * @brief Base class from which all random forest visitors derive.
51 *
52 * @details
53 * Due to the parallel training, we cannot simply use a single visitor for all trees.
54 * Instead, each tree gets a copy of the original visitor.
55 *
56 * The random forest training with visitors looks as follows:
57 * - Do the random forest preprocessing (translate labels to 0, 1, 2, ...).
58 * - Call visit_at_beginning() on the original visitor.
59 * - For each tree:
60 * - - Copy the original visitor and give the copy to the tree.
61 * - - Do the preprocessing (create the bootstrap sample, assign weights to the data points, ...).
62 * - - Call visit_before_tree() on the visitor copy.
63 * - - Do the node splitting until the tree is fully trained.
64 * - - Call visit_after_tree() on the visitor copy.
65 * - Call visit_at_end (which gets a vector with pointers to the visitor copies) on the original visitor.
66 */
67 class RFVisitorBase
68 {
69 public:
70
71 RFVisitorBase()
72 :
73 active_(true)
74 {}
75
76 /**
77 * @brief Do something before training starts.
78 */
79 void visit_before_training()
80 {}
81
82 /**
83 * @brief Do something after all trees have been learned.
84 *
85 * @param v vector with pointers to the visitor copies
86 * @param rf the trained random forest
87 */
88 template <typename VISITORS, typename RF, typename FEATURES, typename LABELS>
89 void visit_after_training(VISITORS &, RF &, const FEATURES &, const LABELS &)
90 {}
91
92 /**
93 * @brief Do something before a tree has been learned.
94 *
95 * @param weights the actual instance weights (after bootstrap sampling and class weights)
96 */
97 template <typename TREE, typename FEATURES, typename LABELS, typename WEIGHTS>
98 void visit_before_tree(TREE &, FEATURES &, LABELS &, WEIGHTS &)
99 {}
100
101 /**
102 * @brief Do something after a tree has been learned.
103 */
104 template <typename RF, typename FEATURES, typename LABELS, typename WEIGHTS>
105 void visit_after_tree(RF &,
106 FEATURES &,
107 LABELS &,
108 WEIGHTS &)
109 {}
110
111 /**
112 * @brief Do something after the split was made.
113 */
114 template <typename TREE,
115 typename FEATURES,
116 typename LABELS,
117 typename WEIGHTS,
118 typename SCORER,
119 typename ITER>
120 void visit_after_split(TREE &,
121 FEATURES &,
122 LABELS &,
123 WEIGHTS &,
124 SCORER &,
125 ITER,
126 ITER,
127 ITER)
128 {}
129
130 /**
131 * @brief Return whether the visitor is active or not.
132 */
133 bool is_active() const
134 {
135 return active_;
136 }
137
138 /**
139 * @brief Activate the visitor.
140 */
141 void activate()
142 {
143 active_ = true;
144 }
145
146 /**
147 * @brief Deactivate the visitor.
148 */
149 void deactivate()
150 {
151 active_ = false;
152 }
153
154 private:
155
156 bool active_;
157
158 };
159
160 /////////////////////////////////////////////
161 // The concrete visitors //
162 /////////////////////////////////////////////
163
164 /**
165 * @brief Compute the out of bag error.
166 *
167 * After training, each data point is put down those trees for which it is OOB.
168 * Using bootstrap sampling, each data point is OOB for approximately 37% of
169 * the trees.
170 */
171 class OOBError : public RFVisitorBase
172 {
173 public:
174
175 /**
176 * Save whether a data point is in-bag (weight > 0) or out-of-bag (weight == 0).
177 */
178 template <typename TREE, typename FEATURES, typename LABELS, typename WEIGHTS>
179 void visit_before_tree(
180 TREE & /*tree*/,
181 FEATURES & /*features*/,
182 LABELS & /*labels*/,
183 WEIGHTS & weights
184 ){
185 double const EPS = 1e-20;
186 bool found = false;
187
188 // Save the in-bags.
189 is_in_bag_.resize(weights.size(), true);
190 for (size_t i = 0; i < weights.size(); ++i)
191 {
192 if (std::abs(weights[i]) < EPS)
193 {
194 is_in_bag_[i] = false;
195 found = true;
196 }
197 }
198
199 if (!found)
200 throw std::runtime_error("OOBError::visit_before_tree(): The tree has no out-of-bags.");
201 }
202
203 /**
204 * Compute the out-of-bag error.
205 */
206 template <typename VISITORS, typename RF, typename FEATURES, typename LABELS>
207 void visit_after_training(
208 VISITORS & visitors,
209 RF & rf,
210 const FEATURES & features,
211 const LABELS & labels
212 ){
213 // Check the input sizes.
214 vigra_precondition(rf.num_trees() > 0, "OOBError::visit_after_training(): Number of trees must be greater than zero after training.");
215 vigra_precondition(visitors.size() == rf.num_trees(), "OOBError::visit_after_training(): Number of visitors must be equal to number of trees.");
216 size_t const num_instances = features.shape()[0];
217 auto const num_features = features.shape()[1];
218 for (auto vptr : visitors)
219 vigra_precondition(vptr->is_in_bag_.size() == num_instances, "OOBError::visit_after_training(): Some visitors have the wrong number of data points.");
220
221 // Get a prediction for each data point using only the trees where it is out of bag.
222 typedef typename std::remove_const<LABELS>::type Labels;
223 Labels pred(Shape1(1));
224 oob_err_ = 0.0;
225 for (size_t i = 0; i < (size_t)num_instances; ++i)
226 {
227 // Get the indices of the trees where the data points is out of bag.
228 std::vector<size_t> tree_indices;
229 for (size_t k = 0; k < visitors.size(); ++k)
230 if (!visitors[k]->is_in_bag_[i])
231 tree_indices.push_back(k);
232
233 // Get the prediction using the above trees.
234 auto const sub_features = features.subarray(Shape2(i, 0), Shape2(i+1, num_features));
235 rf.predict(sub_features, pred, 1, tree_indices);
236 if (pred(0) != labels(i))
237 oob_err_ += 1.0;
238 }
239 oob_err_ /= num_instances;
240 }
241
242 /**
243 * the out-of-bag error
244 */
245 double oob_err_;
246
247 private:
248 std::vector<bool> is_in_bag_; // whether a data point is in-bag or out-of-bag
249 };
250
251
252
253 /**
254 * @brief Compute the variable importance.
255 */
256 class VariableImportance : public RFVisitorBase
257 {
258 public:
259
260 VariableImportance(size_t repetition_count = 10)
261 :
262 repetition_count_(repetition_count)
263 {}
264
265 /**
266 * Resize the variable importance array and store in-bag / out-of-bag information.
267 */
268 template <typename TREE, typename FEATURES, typename LABELS, typename WEIGHTS>
269 void visit_before_tree(
270 TREE & tree,
271 FEATURES & features,
272 LABELS & /*labels*/,
273 WEIGHTS & weights
274 ){
275 // Resize the variable importance array.
276 // The shape differs from the shape of the actual output, since the single trees
277 // only store the impurity decrease without the permutation importances.
278 auto const num_features = features.shape()[1];
279 variable_importance_.reshape(Shape2(num_features, tree.num_classes()+2), 0.0);
280
281 // Save the in-bags.
282 double const EPS = 1e-20;
283 bool found = false;
284 is_in_bag_.resize(weights.size(), true);
285 for (size_t i = 0; i < weights.size(); ++i)
286 {
287 if (std::abs(weights[i]) < EPS)
288 {
289 is_in_bag_[i] = false;
290 found = true;
291 }
292 }
293 if (!found)
294 throw std::runtime_error("VariableImportance::visit_before_tree(): The tree has no out-of-bags.");
295 }
296
297 /**
298 * Calculate the impurity decrease based variable importance after every split.
299 */
300 template <typename TREE,
301 typename FEATURES,
302 typename LABELS,
303 typename WEIGHTS,
304 typename SCORER,
305 typename ITER>
306 void visit_after_split(TREE & tree,
307 FEATURES & /*features*/,
308 LABELS & labels,
309 WEIGHTS & weights,
310 SCORER & scorer,
311 ITER begin,
312 ITER /*split*/,
313 ITER end)
314 {
315 // Update the impurity decrease.
316 typename SCORER::Functor functor;
317 auto const region_impurity = functor.region_score(labels, weights, begin, end);
318 auto const split_impurity = scorer.best_score_;
319 variable_importance_(scorer.best_dim_, tree.num_classes()+1) += region_impurity - split_impurity;
320 }
321
322 /**
323 * Compute the permutation importance.
324 */
325 template <typename RF, typename FEATURES, typename LABELS, typename WEIGHTS>
326 void visit_after_tree(RF & rf,
327 const FEATURES & features,
328 const LABELS & labels,
329 WEIGHTS & /*weights*/)
330 {
331 // Non-const types of features and labels.
332 typedef typename std::remove_const<FEATURES>::type Features;
333 typedef typename std::remove_const<LABELS>::type Labels;
334
335 typedef typename Features::value_type FeatureType;
336
337 auto const num_features = features.shape()[1];
338
339 // For the permutation importance, the features must be permuted (obviously).
340 // This cannot be done on the original feature matrix, since it would interfere
341 // with other threads in concurrent training. Therefore, we have to make a copy.
342 Features feats;
343 Labels labs;
344 copy_out_of_bags(features, labels, feats, labs);
345 auto const num_oobs = feats.shape()[0];
346
347 // Compute (standard and class-wise) out-of-bag success rate with the original sample.
348 MultiArray<1, double> oob_right(Shape1(rf.num_classes()+1), 0.0);
349 vigra::MultiArray<1,int> pred( (Shape1(num_oobs)) );
350 rf.predict(feats, pred, 1);
351 for (size_t i = 0; i < (size_t)labs.size(); ++i)
352 {
353 if (labs(i) == pred(i))
354 {
355 oob_right(labs(i)) += 1.0; // per class
356 oob_right(rf.num_classes()) += 1.0; // total
357 }
358 }
359
360 // Get out-of-bag success rate after permuting the j'th dimension.
361 UniformIntRandomFunctor<MersenneTwister> randint;
362 for (size_t j = 0; j < (size_t)num_features; ++j)
363 {
364 MultiArray<1, FeatureType> backup(( Shape1(num_oobs) ));
365 backup = feats.template bind<1>(j);
366 MultiArray<2, double> perm_oob_right(Shape2(1, rf.num_classes()+1), 0.0);
367
368 for (size_t k = 0; k < repetition_count_; ++k)
369 {
370 // Permute the current dimension.
371 for (int ii = num_oobs-1; ii >= 1; --ii)
372 std::swap(feats(ii, j), feats(randint(ii+1), j));
373
374 // Get the out-of-bag success rate after permuting.
375 rf.predict(feats, pred, 1);
376 for (size_t i = 0; i < (size_t)labs.size(); ++i)
377 {
378 if (labs(i) == pred(i))
379 {
380 perm_oob_right(0, labs(i)) += 1.0; // per class
381 perm_oob_right(0, rf.num_classes()) += 1.0; // total
382 }
383 }
384 }
385
386 // Normalize and add to the importance matrix.
387 perm_oob_right /= repetition_count_;
388 perm_oob_right.bind<0>(0) -= oob_right;
389 perm_oob_right *= -1;
390 perm_oob_right /= num_oobs;
391 variable_importance_.subarray(Shape2(j, 0), Shape2(j+1, rf.num_classes()+1)) += perm_oob_right;
392
393 // Copy back the permuted dimension.
394 feats.template bind<1>(j) = backup;
395 }
396 }
397
398 /**
399 * Accumulate the variable importances from the single trees.
400 */
401 template <typename VISITORS, typename RF, typename FEATURES, typename LABELS>
402 void visit_after_training(
403 VISITORS & visitors,
404 RF & rf,
405 const FEATURES & features,
406 const LABELS & /*labels*/
407 ){
408 vigra_precondition(rf.num_trees() > 0, "VariableImportance::visit_after_training(): Number of trees must be greater than zero after training.");
409 vigra_precondition(visitors.size() == rf.num_trees(), "VariableImportance::visit_after_training(): Number of visitors must be equal to number of trees.");
410
411 // Sum the variable importances from the single trees.
412 auto const num_features = features.shape()[1];
413 variable_importance_.reshape(Shape2(num_features, rf.num_classes()+2), 0.0);
414 for (auto vptr : visitors)
415 {
416 vigra_precondition(vptr->variable_importance_.shape() == variable_importance_.shape(),
417 "VariableImportance::visit_after_training(): Shape mismatch.");
418 variable_importance_ += vptr->variable_importance_;
419 }
420
421 // Normalize the variable importance.
422 variable_importance_ /= rf.num_trees();
423 }
424
425 /**
426 * This Array has the same entries as the R - random forest variable
427 * importance.
428 * Matrix is featureCount by (classCount +2)
429 * variable_importance_(ii,jj) is the variable importance measure of
430 * the ii-th variable according to:
431 * jj = 0 - (classCount-1)
432 * classwise permutation importance
433 * jj = rowCount(variable_importance_) -2
434 * permutation importance
435 * jj = rowCount(variable_importance_) -1
436 * gini decrease importance.
437 *
438 * permutation importance:
439 * The difference between the fraction of OOB samples classified correctly
440 * before and after permuting (randomizing) the ii-th column is calculated.
441 * The ii-th column is permuted rep_cnt times.
442 *
443 * class wise permutation importance:
444 * same as permutation importance. We only look at those OOB samples whose
445 * response corresponds to class jj.
446 *
447 * gini decrease importance:
448 * row ii corresponds to the sum of all gini decreases induced by variable ii
449 * in each node of the random forest.
450 */
451 MultiArray<2, double> variable_importance_;
452
453 /**
454 * how often the permutation takes place
455 */
456 size_t repetition_count_;
457
458 private:
459
460 /**
461 * Copy the out-of-bag features and labels.
462 */
463 template <typename F0, typename L0, typename F1, typename L1>
464 void copy_out_of_bags(
465 F0 const & features_in,
466 L0 const & labels_in,
467 F1 & features_out,
468 L1 & labels_out
469 ) const {
470 auto const num_instances = features_in.shape()[0];
471 auto const num_features = features_in.shape()[1];
472
473 // Count the out-of-bags.
474 size_t num_oobs = 0;
475 for (auto x : is_in_bag_)
476 if (!x)
477 ++num_oobs;
478
479 // Copy the out-of-bags.
480 features_out.reshape(Shape2(num_oobs, num_features));
481 labels_out.reshape(Shape1(num_oobs));
482 size_t current = 0;
483 for (size_t i = 0; i < (size_t)num_instances; ++i)
484 {
485 if (!is_in_bag_[i])
486 {
487 auto const src = features_in.template bind<0>(i);
488 auto out = features_out.template bind<0>(current);
489 out = src;
490 labels_out(current) = labels_in(i);
491 ++current;
492 }
493 }
494 }
495
496 std::vector<bool> is_in_bag_; // whether a data point is in-bag or out-of-bag
497 };
498
499
500
501 /////////////////////////////////////////////
502 // The visitor chain //
503 /////////////////////////////////////////////
504
505 /**
506 * @brief The default visitor node (= "do nothing").
507 */
508 class RFStopVisiting : public RFVisitorBase
509 {};
510
511 namespace detail
512 {
513
514 /**
515 * @brief Container elements of the statically linked visitor list. Use the create_visitor() functions to create visitors up to size 10.
516 */
517 template <typename VISITOR, typename NEXT = RFStopVisiting, bool CPY = false>
518 class RFVisitorNode
519 {
520 public:
521
522 typedef VISITOR Visitor;
523 typedef NEXT Next;
524
525 typename std::conditional<CPY, Visitor, Visitor &>::type visitor_;
526 Next next_;
527
528 RFVisitorNode(Visitor & visitor, Next next)
529 :
530 visitor_(visitor),
531 next_(next)
532 {}
533
534 explicit RFVisitorNode(Visitor & visitor)
535 :
536 visitor_(visitor),
537 next_(RFStopVisiting())
538 {}
539
540 explicit RFVisitorNode(RFVisitorNode<Visitor, Next, !CPY> & other)
541 :
542 visitor_(other.visitor_),
543 next_(other.next_)
544 {}
545
546 explicit RFVisitorNode(RFVisitorNode<Visitor, Next, !CPY> const & other)
547 :
548 visitor_(other.visitor_),
549 next_(other.next_)
550 {}
551
552 void visit_before_training()
553 {
554 if (visitor_.is_active())
555 visitor_.visit_before_training();
556 next_.visit_before_training();
557 }
558
559 template <typename VISITORS, typename RF, typename FEATURES, typename LABELS>
560 void visit_after_training(VISITORS & v, RF & rf, const FEATURES & features, const LABELS & labels)
561 {
562 typedef typename VISITORS::value_type VisitorNodeType;
563 typedef typename VisitorNodeType::Visitor VisitorType;
564 typedef typename VisitorNodeType::Next NextType;
565
566 // We want to call the visit_after_training function of the concrete visitor (e. g. OOBError).
567 // Since v is a vector of visitor nodes (and not a vector of concrete visitors), we have to
568 // extract the concrete visitors.
569 // A vector cannot hold references, so we use pointers instead.
570 if (visitor_.is_active())
571 {
572 std::vector<VisitorType*> visitors;
573 for (auto & x : v)
574 visitors.push_back(&x.visitor_);
575 visitor_.visit_after_training(visitors, rf, features, labels);
576 }
577
578 // Remove the concrete visitors that we just visited.
579 std::vector<NextType> nexts;
580 for (auto & x : v)
581 nexts.push_back(x.next_);
582
583 // Call the next visitor node in the chain.
584 next_.visit_after_training(nexts, rf, features, labels);
585 }
586
587 template <typename TREE, typename FEATURES, typename LABELS, typename WEIGHTS>
588 void visit_before_tree(TREE & tree, FEATURES & features, LABELS & labels, WEIGHTS & weights)
589 {
590 if (visitor_.is_active())
591 visitor_.visit_before_tree(tree, features, labels, weights);
592 next_.visit_before_tree(tree, features, labels, weights);
593 }
594
595 template <typename RF, typename FEATURES, typename LABELS, typename WEIGHTS>
596 void visit_after_tree(RF & rf,
597 FEATURES & features,
598 LABELS & labels,
599 WEIGHTS & weights)
600 {
601 if (visitor_.is_active())
602 visitor_.visit_after_tree(rf, features, labels, weights);
603 next_.visit_after_tree(rf, features, labels, weights);
604 }
605
606 template <typename TREE,
607 typename FEATURES,
608 typename LABELS,
609 typename WEIGHTS,
610 typename SCORER,
611 typename ITER>
612 void visit_after_split(TREE & tree,
613 FEATURES & features,
614 LABELS & labels,
615 WEIGHTS & weights,
616 SCORER & scorer,
617 ITER begin,
618 ITER split,
619 ITER end)
620 {
621 if (visitor_.is_active())
622 visitor_.visit_after_split(tree, features, labels, weights, scorer, begin, split, end);
623 next_.visit_after_split(tree, features, labels, weights, scorer, begin, split, end);
624 }
625
626 };
627
628 } // namespace detail
629
630 /**
631 * The VisitorCopy can be used to set the copy argument of the given visitor chain to true.
632 */
633 template <typename VISITOR>
634 struct VisitorCopy
635 {
636 typedef detail::RFVisitorNode<typename VISITOR::Visitor, typename VisitorCopy<typename VISITOR::Next>::type, true> type;
637 };
638
639 template <>
640 struct VisitorCopy<RFStopVisiting>
641 {
642 typedef RFStopVisiting type;
643 };
644
645
646
647 //////////////////////////////////////////////////////////
648 // Visitor factory functions for up to 10 visitors. //
649 // FIXME: This should be a variadic template. //
650 //////////////////////////////////////////////////////////
651
652 template<typename A>
653 detail::RFVisitorNode<A>
654 create_visitor(A & a)
655 {
656 typedef detail::RFVisitorNode<A> _0_t;
657 _0_t _0(a);
658 return _0;
659 }
660
661 template<typename A, typename B>
662 detail::RFVisitorNode<A, detail::RFVisitorNode<B> >
663 create_visitor(A & a, B & b)
664 {
665 typedef detail::RFVisitorNode<B> _1_t;
666 _1_t _1(b);
667 typedef detail::RFVisitorNode<A, _1_t> _0_t;
668 _0_t _0(a, _1);
669 return _0;
670 }
671
672 template<typename A, typename B, typename C>
673 detail::RFVisitorNode<A, detail::RFVisitorNode<B, detail::RFVisitorNode<C> > >
674 create_visitor(A & a, B & b, C & c)
675 {
676 typedef detail::RFVisitorNode<C> _2_t;
677 _2_t _2(c);
678 typedef detail::RFVisitorNode<B, _2_t> _1_t;
679 _1_t _1(b, _2);
680 typedef detail::RFVisitorNode<A, _1_t> _0_t;
681 _0_t _0(a, _1);
682 return _0;
683 }
684
685 template<typename A, typename B, typename C, typename D>
686 detail::RFVisitorNode<A, detail::RFVisitorNode<B, detail::RFVisitorNode<C,
687 detail::RFVisitorNode<D> > > >
688 create_visitor(A & a, B & b, C & c, D & d)
689 {
690 typedef detail::RFVisitorNode<D> _3_t;
691 _3_t _3(d);
692 typedef detail::RFVisitorNode<C, _3_t> _2_t;
693 _2_t _2(c, _3);
694 typedef detail::RFVisitorNode<B, _2_t> _1_t;
695 _1_t _1(b, _2);
696 typedef detail::RFVisitorNode<A, _1_t> _0_t;
697 _0_t _0(a, _1);
698 return _0;
699 }
700
701 template<typename A, typename B, typename C, typename D, typename E>
702 detail::RFVisitorNode<A, detail::RFVisitorNode<B, detail::RFVisitorNode<C,
703 detail::RFVisitorNode<D, detail::RFVisitorNode<E> > > > >
704 create_visitor(A & a, B & b, C & c, D & d, E & e)
705 {
706 typedef detail::RFVisitorNode<E> _4_t;
707 _4_t _4(e);
708 typedef detail::RFVisitorNode<D, _4_t> _3_t;
709 _3_t _3(d, _4);
710 typedef detail::RFVisitorNode<C, _3_t> _2_t;
711 _2_t _2(c, _3);
712 typedef detail::RFVisitorNode<B, _2_t> _1_t;
713 _1_t _1(b, _2);
714 typedef detail::RFVisitorNode<A, _1_t> _0_t;
715 _0_t _0(a, _1);
716 return _0;
717 }
718
719 template<typename A, typename B, typename C, typename D, typename E,
720 typename F>
721 detail::RFVisitorNode<A, detail::RFVisitorNode<B, detail::RFVisitorNode<C,
722 detail::RFVisitorNode<D, detail::RFVisitorNode<E, detail::RFVisitorNode<F> > > > > >
723 create_visitor(A & a, B & b, C & c, D & d, E & e, F & f)
724 {
725 typedef detail::RFVisitorNode<F> _5_t;
726 _5_t _5(f);
727 typedef detail::RFVisitorNode<E, _5_t> _4_t;
728 _4_t _4(e, _5);
729 typedef detail::RFVisitorNode<D, _4_t> _3_t;
730 _3_t _3(d, _4);
731 typedef detail::RFVisitorNode<C, _3_t> _2_t;
732 _2_t _2(c, _3);
733 typedef detail::RFVisitorNode<B, _2_t> _1_t;
734 _1_t _1(b, _2);
735 typedef detail::RFVisitorNode<A, _1_t> _0_t;
736 _0_t _0(a, _1);
737 return _0;
738 }
739
740 template<typename A, typename B, typename C, typename D, typename E,
741 typename F, typename G>
742 detail::RFVisitorNode<A, detail::RFVisitorNode<B, detail::RFVisitorNode<C,
743 detail::RFVisitorNode<D, detail::RFVisitorNode<E, detail::RFVisitorNode<F,
744 detail::RFVisitorNode<G> > > > > > >
745 create_visitor(A & a, B & b, C & c, D & d, E & e, F & f, G & g)
746 {
747 typedef detail::RFVisitorNode<G> _6_t;
748 _6_t _6(g);
749 typedef detail::RFVisitorNode<F, _6_t> _5_t;
750 _5_t _5(f, _6);
751 typedef detail::RFVisitorNode<E, _5_t> _4_t;
752 _4_t _4(e, _5);
753 typedef detail::RFVisitorNode<D, _4_t> _3_t;
754 _3_t _3(d, _4);
755 typedef detail::RFVisitorNode<C, _3_t> _2_t;
756 _2_t _2(c, _3);
757 typedef detail::RFVisitorNode<B, _2_t> _1_t;
758 _1_t _1(b, _2);
759 typedef detail::RFVisitorNode<A, _1_t> _0_t;
760 _0_t _0(a, _1);
761 return _0;
762 }
763
764 template<typename A, typename B, typename C, typename D, typename E,
765 typename F, typename G, typename H>
766 detail::RFVisitorNode<A, detail::RFVisitorNode<B, detail::RFVisitorNode<C,
767 detail::RFVisitorNode<D, detail::RFVisitorNode<E, detail::RFVisitorNode<F,
768 detail::RFVisitorNode<G, detail::RFVisitorNode<H> > > > > > > >
769 create_visitor(A & a, B & b, C & c, D & d, E & e, F & f, G & g, H & h)
770 {
771 typedef detail::RFVisitorNode<H> _7_t;
772 _7_t _7(h);
773 typedef detail::RFVisitorNode<G, _7_t> _6_t;
774 _6_t _6(g, _7);
775 typedef detail::RFVisitorNode<F, _6_t> _5_t;
776 _5_t _5(f, _6);
777 typedef detail::RFVisitorNode<E, _5_t> _4_t;
778 _4_t _4(e, _5);
779 typedef detail::RFVisitorNode<D, _4_t> _3_t;
780 _3_t _3(d, _4);
781 typedef detail::RFVisitorNode<C, _3_t> _2_t;
782 _2_t _2(c, _3);
783 typedef detail::RFVisitorNode<B, _2_t> _1_t;
784 _1_t _1(b, _2);
785 typedef detail::RFVisitorNode<A, _1_t> _0_t;
786 _0_t _0(a, _1);
787 return _0;
788 }
789
790 template<typename A, typename B, typename C, typename D, typename E,
791 typename F, typename G, typename H, typename I>
792 detail::RFVisitorNode<A, detail::RFVisitorNode<B, detail::RFVisitorNode<C,
793 detail::RFVisitorNode<D, detail::RFVisitorNode<E, detail::RFVisitorNode<F,
794 detail::RFVisitorNode<G, detail::RFVisitorNode<H, detail::RFVisitorNode<I> > > > > > > > >
795 create_visitor(A & a, B & b, C & c, D & d, E & e, F & f, G & g, H & h, I & i)
796 {
797 typedef detail::RFVisitorNode<I> _8_t;
798 _8_t _8(i);
799 typedef detail::RFVisitorNode<H, _8_t> _7_t;
800 _7_t _7(h, _8);
801 typedef detail::RFVisitorNode<G, _7_t> _6_t;
802 _6_t _6(g, _7);
803 typedef detail::RFVisitorNode<F, _6_t> _5_t;
804 _5_t _5(f, _6);
805 typedef detail::RFVisitorNode<E, _5_t> _4_t;
806 _4_t _4(e, _5);
807 typedef detail::RFVisitorNode<D, _4_t> _3_t;
808 _3_t _3(d, _4);
809 typedef detail::RFVisitorNode<C, _3_t> _2_t;
810 _2_t _2(c, _3);
811 typedef detail::RFVisitorNode<B, _2_t> _1_t;
812 _1_t _1(b, _2);
813 typedef detail::RFVisitorNode<A, _1_t> _0_t;
814 _0_t _0(a, _1);
815 return _0;
816 }
817
818 template<typename A, typename B, typename C, typename D, typename E,
819 typename F, typename G, typename H, typename I, typename J>
820 detail::RFVisitorNode<A, detail::RFVisitorNode<B, detail::RFVisitorNode<C,
821 detail::RFVisitorNode<D, detail::RFVisitorNode<E, detail::RFVisitorNode<F,
822 detail::RFVisitorNode<G, detail::RFVisitorNode<H, detail::RFVisitorNode<I,
823 detail::RFVisitorNode<J> > > > > > > > > >
824 create_visitor(A & a, B & b, C & c, D & d, E & e, F & f, G & g, H & h, I & i,
825 J & j)
826 {
827 typedef detail::RFVisitorNode<J> _9_t;
828 _9_t _9(j);
829 typedef detail::RFVisitorNode<I, _9_t> _8_t;
830 _8_t _8(i, _9);
831 typedef detail::RFVisitorNode<H, _8_t> _7_t;
832 _7_t _7(h, _8);
833 typedef detail::RFVisitorNode<G, _7_t> _6_t;
834 _6_t _6(g, _7);
835 typedef detail::RFVisitorNode<F, _6_t> _5_t;
836 _5_t _5(f, _6);
837 typedef detail::RFVisitorNode<E, _5_t> _4_t;
838 _4_t _4(e, _5);
839 typedef detail::RFVisitorNode<D, _4_t> _3_t;
840 _3_t _3(d, _4);
841 typedef detail::RFVisitorNode<C, _3_t> _2_t;
842 _2_t _2(c, _3);
843 typedef detail::RFVisitorNode<B, _2_t> _1_t;
844 _1_t _1(b, _2);
845 typedef detail::RFVisitorNode<A, _1_t> _0_t;
846 _0_t _0(a, _1);
847 return _0;
848 }
849
850
851
852 } // namespace rf3
853 } // namespace vigra
854
855 #endif
0 /************************************************************************/
1 /* */
2 /* Copyright 2014-2015 by Ullrich Koethe and Philip Schill */
3 /* */
4 /* This file is part of the VIGRA computer vision library. */
5 /* The VIGRA Website is */
6 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
7 /* Please direct questions, bug reports, and contributions to */
8 /* ullrich.koethe@iwr.uni-heidelberg.de or */
9 /* vigra@informatik.uni-hamburg.de */
10 /* */
11 /* Permission is hereby granted, free of charge, to any person */
12 /* obtaining a copy of this software and associated documentation */
13 /* files (the "Software"), to deal in the Software without */
14 /* restriction, including without limitation the rights to use, */
15 /* copy, modify, merge, publish, distribute, sublicense, and/or */
16 /* sell copies of the Software, and to permit persons to whom the */
17 /* Software is furnished to do so, subject to the following */
18 /* conditions: */
19 /* */
20 /* The above copyright notice and this permission notice shall be */
21 /* included in all copies or substantial portions of the */
22 /* Software. */
23 /* */
24 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
25 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
26 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
27 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
28 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
29 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
30 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
32 /* */
33 /************************************************************************/
34 #ifndef VIGRA_RF3_HXX
35 #define VIGRA_RF3_HXX
36
37 #include <vector>
38 #include <set>
39 #include <map>
40 #include <stack>
41 #include <algorithm>
42
43 #include "multi_array.hxx"
44 #include "sampling.hxx"
45 #include "threading.hxx"
46 #include "threadpool.hxx"
47 #include "random_forest_3/random_forest.hxx"
48 #include "random_forest_3/random_forest_common.hxx"
49 #include "random_forest_3/random_forest_visitors.hxx"
50
51 namespace vigra
52 {
53
54 /** \addtogroup MachineLearning
55 **/
56 //@{
57
58 /** \brief Random forest version 3.
59
60 This namespace contains VIGRA's 3rd version of the random forest classification/regression algorithm.
61 This version is much easier to customize than previous versions because it consequently separates
62 algorithms from the forest representation, following the design of the LEMON graph library.
63 */
64 namespace rf3
65 {
66
67 template <typename FEATURES, typename LABELS>
68 struct DefaultRF
69 {
70 typedef RandomForest<FEATURES,
71 LABELS,
72 LessEqualSplitTest<typename FEATURES::value_type>,
73 ArgMaxVectorAcc<double> > type;
74 };
75
76 namespace detail
77 {
78
79 // In random forest training, you can store different items in the leaves,
80 // depending on the accumulator. Typically, you want to store the class
81 // distributions, but the ArgMaxAcc does not need this. The RFMapUpdater is
82 // used to store only the necessary information.
83
84 template <typename ACC>
85 struct RFMapUpdater
86 {
87 template <typename A, typename B>
88 void operator()(A & a, B const & b) const
89 {
90 a = b;
91 }
92 };
93
94
95
96 template <>
97 struct RFMapUpdater<ArgMaxAcc>
98 {
99 template <typename A, typename B>
100 void operator()(A & a, B const & b) const
101 {
102 auto it = std::max_element(b.begin(), b.end());
103 a = std::distance(b.begin(), it);
104 }
105 };
106
107
108
109 /// Loop over the split dimensions and compute the score of all considered splits.
110 template <typename FEATURES, typename LABELS, typename SAMPLER, typename SCORER>
111 void split_score(
112 FEATURES const & features,
113 LABELS const & labels,
114 std::vector<double> const & instance_weights,
115 std::vector<size_t> const & instances,
116 SAMPLER const & dim_sampler,
117 SCORER & score
118 ){
119 typedef typename FEATURES::value_type FeatureType;
120
121 auto feats = std::vector<FeatureType>(instances.size()); // storage for the features
122 auto sorted_indices = std::vector<size_t>(feats.size()); // storage for the index sort result
123 auto tosort_instances = std::vector<size_t>(feats.size()); // storage for the sorted instances
124
125 for (int i = 0; i < dim_sampler.sampleSize(); ++i)
126 {
127 size_t const d = dim_sampler[i];
128
129 // Copy the features to a vector with the correct size (so the sort is faster because of data locality).
130 for (size_t kk = 0; kk < instances.size(); ++kk)
131 feats[kk] = features(instances[kk], d);
132
133 // Sort the features.
134 indexSort(feats.begin(), feats.end(), sorted_indices.begin());
135 std::copy(instances.begin(), instances.end(), tosort_instances.begin());
136 applyPermutation(sorted_indices.begin(), sorted_indices.end(), instances.begin(), tosort_instances.begin());
137
138 // Get the score of the splits.
139 score(features, labels, instance_weights, tosort_instances.begin(), tosort_instances.end(), d);
140 }
141 }
142
143
144
145 /**
146 * @brief Train a single randomized decision tree.
147 */
148 template <typename RF, typename SCORER, typename VISITOR, typename STOP, typename RANDENGINE>
149 void random_forest_single_tree(
150 typename RF::Features const & features,
151 MultiArray<1, size_t> const & labels,
152 RandomForestOptions const & options,
153 VISITOR & visitor,
154 STOP stop,
155 RF & tree,
156 RANDENGINE const & randengine
157 ){
158 typedef typename RF::Features Features;
159 typedef typename Features::value_type FeatureType;
160 typedef LessEqualSplitTest<FeatureType> SplitTests;
161 typedef typename RF::Node Node;
162 typedef typename RF::ACC ACC;
163 typedef typename ACC::input_type ACCInputType;
164
165 static_assert(std::is_same<SplitTests, typename RF::SplitTests>::value,
166 "random_forest_single_tree(): Wrong Random Forest class.");
167
168 // the api is seriously broke...
169 int const num_instances = features.shape()[0];
170 size_t const num_features = features.shape()[1];
171 auto const & spec = tree.problem_spec_;
172
173 vigra_precondition(num_instances == labels.size(),
174 "random_forest_single_tree(): Shape mismatch between features and labels.");
175 vigra_precondition(num_features == spec.num_features_,
176 "random_forest_single_tree(): Wrong number of features.");
177
178 // Create the index vector for bookkeeping.
179 std::vector<size_t> instance_indices(num_instances);
180 std::iota(instance_indices.begin(), instance_indices.end(), 0);
181 typedef std::vector<size_t>::iterator InstanceIter;
182
183 // Create the weights for the bootstrap sample.
184 std::vector<double> instance_weights(num_instances, 1.0);
185 if (options.bootstrap_sampling_)
186 {
187 std::fill(instance_weights.begin(), instance_weights.end(), 0.0);
188 Sampler<MersenneTwister> sampler(num_instances,
189 SamplerOptions().withReplacement().stratified(options.use_stratification_),
190 &randengine);
191 sampler.sample();
192 for (int i = 0; i < sampler.sampleSize(); ++i)
193 {
194 int const index = sampler[i];
195 ++instance_weights[index];
196 }
197 }
198
199 // Multiply the instance weights by the class weights.
200 if (options.class_weights_.size() > 0)
201 {
202 for (size_t i = 0; i < instance_weights.size(); ++i)
203 instance_weights[i] *= options.class_weights_.at(labels(i));
204 }
205
206 // Create the sampler for the split dimensions.
207 auto const mtry = spec.actual_mtry_;
208 Sampler<MersenneTwister> dim_sampler(num_features, SamplerOptions().withoutReplacement().sampleSize(mtry), &randengine);
209
210 // Create the node stack and place the root node inside.
211 std::stack<Node> node_stack;
212 typedef std::pair<InstanceIter, InstanceIter> IterPair;
213 PropertyMap<Node, IterPair> instance_range; // begin and end of the instances of a node in the bookkeeping vector
214 PropertyMap<Node, std::vector<double> > node_distributions; // the class distributions in the nodes
215 PropertyMap<Node, size_t> node_depths; // the depth of each node
216 {
217 auto const rootnode = tree.graph_.addNode();
218 node_stack.push(rootnode);
219
220 instance_range.insert(rootnode, IterPair(instance_indices.begin(), instance_indices.end()));
221
222 std::vector<double> priors(spec.num_classes_, 0.0);
223 for (auto i : instance_indices)
224 priors[labels(i)] += instance_weights[i];
225 node_distributions.insert(rootnode, priors);
226
227 node_depths.insert(rootnode, 0);
228 }
229
230 // Call the visitor.
231 visitor.visit_before_tree(tree, features, labels, instance_weights);
232
233 // Split the nodes.
234 detail::RFMapUpdater<ACC> node_map_updater;
235 while (!node_stack.empty())
236 {
237 // Get the data of the current node.
238 auto const node = node_stack.top();
239 node_stack.pop();
240 auto const begin = instance_range.at(node).first;
241 auto const end = instance_range.at(node).second;
242 auto const & priors = node_distributions.at(node);
243 auto const depth = node_depths.at(node);
244
245 // Get the instances with weight > 0.
246 std::vector<size_t> used_instances;
247 for (auto it = begin; it != end; ++it)
248 if (instance_weights[*it] > 1e-10)
249 used_instances.push_back(*it);
250
251 // Find the best split.
252 dim_sampler.sample();
253 SCORER score(priors);
254 if (options.resample_count_ == 0 || used_instances.size() <= options.resample_count_)
255 {
256 // Find the split using all instances.
257 detail::split_score(
258 features,
259 labels,
260 instance_weights,
261 used_instances,
262 dim_sampler,
263 score
264 );
265 }
266 else
267 {
268 // Generate a random subset of the instances.
269 Sampler<MersenneTwister> resampler(used_instances.begin(), used_instances.end(), SamplerOptions().withoutReplacement().sampleSize(options.resample_count_), &randengine);
270 resampler.sample();
271 auto indices = std::vector<size_t>(options.resample_count_);
272 for (size_t i = 0; i < options.resample_count_; ++i)
273 indices[i] = used_instances[resampler[i]];
274
275 // Find the split using the subset.
276 detail::split_score(
277 features,
278 labels,
279 instance_weights,
280 indices,
281 dim_sampler,
282 score
283 );
284 }
285
286 // If no split was found, the node is terminal.
287 if (!score.split_found_)
288 {
289 tree.node_responses_.insert(node, ACCInputType());
290 node_map_updater(tree.node_responses_.at(node), node_distributions.at(node));
291 continue;
292 }
293
294 // Create the child nodes and split the instances accordingly.
295 auto const n_left = tree.graph_.addNode();
296 auto const n_right = tree.graph_.addNode();
297 tree.graph_.addArc(node, n_left);
298 tree.graph_.addArc(node, n_right);
299 auto const best_split = score.best_split_;
300 auto const best_dim = score.best_dim_;
301 auto const split_iter = std::partition(begin, end,
302 [&](size_t i)
303 {
304 return features(i, best_dim) <= best_split;
305 }
306 );
307
308 // Call the visitor.
309 visitor.visit_after_split(tree, features, labels, instance_weights, score, begin, split_iter, end);
310
311 instance_range.insert(n_left, IterPair(begin, split_iter));
312 instance_range.insert(n_right, IterPair(split_iter, end));
313 tree.split_tests_.insert(node, SplitTests(best_dim, best_split));
314 node_depths.insert(n_left, depth+1);
315 node_depths.insert(n_right, depth+1);
316
317 // Compute the class distribution for the left child.
318 auto priors_left = std::vector<double>(spec.num_classes_, 0.0);
319 for (auto it = begin; it != split_iter; ++it)
320 priors_left[labels(*it)] += instance_weights[*it];
321 node_distributions.insert(n_left, priors_left);
322
323 // Check if the left child is terminal.
324 if (stop(labels, RFNodeDescription<decltype(priors_left)>(depth+1, priors_left)))
325 {
326 tree.node_responses_.insert(n_left, ACCInputType());
327 node_map_updater(tree.node_responses_.at(n_left), node_distributions.at(n_left));
328 }
329 else
330 {
331 node_stack.push(n_left);
332 }
333
334 // Compute the class distribution for the right child.
335 auto priors_right = std::vector<double>(spec.num_classes_, 0.0);
336 for (auto it = split_iter; it != end; ++it)
337 priors_right[labels(*it)] += instance_weights[*it];
338 node_distributions.insert(n_right, priors_right);
339
340 // Check if the right child is terminal.
341 if (stop(labels, RFNodeDescription<decltype(priors_right)>(depth+1, priors_right)))
342 {
343 tree.node_responses_.insert(n_right, ACCInputType());
344 node_map_updater(tree.node_responses_.at(n_right), node_distributions.at(n_right));
345 }
346 else
347 {
348 node_stack.push(n_right);
349 }
350 }
351
352 // Call the visitor.
353 visitor.visit_after_tree(tree, features, labels, instance_weights);
354 }
355
356
357
358 /// \brief Preprocess the labels and call the train functions on the single trees.
359 template <typename FEATURES,
360 typename LABELS,
361 typename VISITOR,
362 typename SCORER,
363 typename STOP,
364 typename RANDENGINE>
365 RandomForest<FEATURES, LABELS>
366 random_forest_impl(
367 FEATURES const & features,
368 LABELS const & labels,
369 RandomForestOptions const & options,
370 VISITOR visitor,
371 STOP const & stop,
372 RANDENGINE & randengine
373 ){
374 // typedef FEATURES Features;
375 typedef LABELS Labels;
376 // typedef typename Features::value_type FeatureType;
377 typedef typename Labels::value_type LabelType;
378 typedef RandomForest<FEATURES, LABELS> RF;
379
380 ProblemSpec<LabelType> pspec;
381 pspec.num_instances(features.shape()[0])
382 .num_features(features.shape()[1])
383 .actual_mtry(options.get_features_per_node(features.shape()[1]))
384 .actual_msample(labels.size());
385
386 // Check the number of trees.
387 size_t const tree_count = options.tree_count_;
388 vigra_precondition(tree_count > 0, "random_forest_impl(): tree_count must not be zero.");
389 std::vector<RF> trees(tree_count);
390
391 // Transform the labels to 0, 1, 2, ...
392 std::set<LabelType> const dlabels(labels.begin(), labels.end());
393 std::vector<LabelType> const distinct_labels(dlabels.begin(), dlabels.end());
394 pspec.distinct_classes(distinct_labels);
395 std::map<LabelType, size_t> label_map;
396 for (size_t i = 0; i < distinct_labels.size(); ++i)
397 {
398 label_map[distinct_labels[i]] = i;
399 }
400
401 MultiArray<1, LabelType> transformed_labels(Shape1(labels.size()));
402 for (size_t i = 0; i < (size_t)labels.size(); ++i)
403 {
404 transformed_labels(i) = label_map[labels(i)];
405 }
406
407 // Check the vector with the class weights.
408 vigra_precondition(options.class_weights_.size() == 0 || options.class_weights_.size() == distinct_labels.size(),
409 "random_forest_impl(): The number of class weights must be 0 or equal to the number of classes.");
410
411 // Write the problem specification into the trees.
412 for (auto & t : trees)
413 t.problem_spec_ = pspec;
414
415 // Find the correct number of threads.
416 size_t n_threads = 1;
417 if (options.n_threads_ >= 1)
418 n_threads = options.n_threads_;
419 else if (options.n_threads_ == -1)
420 n_threads = std::thread::hardware_concurrency();
421
422 // Use the global random engine to create seeds for the random engines that run in the threads.
423 UniformIntRandomFunctor<RANDENGINE> rand_functor(randengine);
424 std::set<UInt32> seeds;
425 while (seeds.size() < n_threads)
426 {
427 seeds.insert(rand_functor());
428 }
429 vigra_assert(seeds.size() == n_threads, "random_forest_impl(): Could not create random seeds.");
430
431 // Create the random engines that run in the threads.
432 std::vector<RANDENGINE> rand_engines;
433 for (auto seed : seeds)
434 {
435 rand_engines.push_back(RANDENGINE(seed));
436 }
437
438 // Call the visitor.
439 visitor.visit_before_training();
440
441 // Copy the visitor for each tree.
442 // We must change the type, since the original visitor chain holds references and therefore a default copy would be useless.
443 typedef typename VisitorCopy<VISITOR>::type VisitorCopyType;
444 std::vector<VisitorCopyType> tree_visitors;
445 for (size_t i = 0; i < tree_count; ++i)
446 {
447 tree_visitors.emplace_back(visitor);
448 }
449
450 // Train the trees.
451 ThreadPool pool((size_t)n_threads);
452 std::vector<threading::future<void> > futures;
453 for (size_t i = 0; i < tree_count; ++i)
454 {
455 futures.emplace_back(
456 pool.enqueue([&features, &transformed_labels, &options, &tree_visitors, &stop, &trees, i, &rand_engines](size_t thread_id)
457 {
458 random_forest_single_tree<RF, SCORER, VisitorCopyType, STOP>(features, transformed_labels, options, tree_visitors[i], stop, trees[i], rand_engines[thread_id]);
459 }
460 )
461 );
462 }
463 for (auto & fut : futures)
464 fut.get();
465
466 // Merge the trees together.
467 RF rf(trees[0]);
468 rf.options_ = options;
469 for (size_t i = 1; i < trees.size(); ++i)
470 {
471 rf.merge(trees[i]);
472 }
473
474 // Call the visitor.
475 visitor.visit_after_training(tree_visitors, rf, features, labels);
476
477 return rf;
478 }
479
480
481
482 /// \brief Get the stop criterion from the option object and pass it as template argument.
483 template <typename FEATURES, typename LABELS, typename VISITOR, typename SCORER, typename RANDENGINE>
484 inline
485 RandomForest<FEATURES, LABELS>
486 random_forest_impl0(
487 FEATURES const & features,
488 LABELS const & labels,
489 RandomForestOptions const & options,
490 VISITOR visitor,
491 RANDENGINE & randengine
492 ){
493 if (options.max_depth_ > 0)
494 return random_forest_impl<FEATURES, LABELS, VISITOR, SCORER, DepthStop, RANDENGINE>(features, labels, options, visitor, DepthStop(options.max_depth_), randengine);
495 else if (options.min_num_instances_ > 1)
496 return random_forest_impl<FEATURES, LABELS, VISITOR, SCORER, NumInstancesStop, RANDENGINE>(features, labels, options, visitor, NumInstancesStop(options.min_num_instances_), randengine);
497 else if (options.node_complexity_tau_ > 0)
498 return random_forest_impl<FEATURES, LABELS, VISITOR, SCORER, NodeComplexityStop, RANDENGINE>(features, labels, options, visitor, NodeComplexityStop(options.node_complexity_tau_), randengine);
499 else
500 return random_forest_impl<FEATURES, LABELS, VISITOR, SCORER, PurityStop, RANDENGINE>(features, labels, options, visitor, PurityStop(), randengine);
501 }
502
503 } // namespace detail
504
505 /********************************************************/
506 /* */
507 /* random_forest */
508 /* */
509 /********************************************************/
510
511 /** \brief Train a \ref vigra::rf3::RandomForest classifier.
512
513 This factory function constructs a \ref vigra::rf3::RandomForest classifier and trains
514 it for the given features and labels. They must be given as a matrix with shape
515 <tt>num_instances x num_features</tt> and an array with length <tt>num_instances</tt> respectively.
516 Most training options (such as number of trees in the forest, termination and split criteria,
517 and number of threads for parallel training) are specified via an option object of type \ref vigra::rf3::RandomForestOptions. Optional visitors are typically used to compute the
518 out-of-bag error of the classifier (use \ref vigra::rf3::OOBError) and estimate variable importance
519 on the basis of the Gini gain (use \ref vigra::rf3::VariableImportance). You can also provide
520 a specific random number generator instance, which is especially useful when you want to
521 enforce deterministic algorithm behavior during debugging.
522
523 <b> Declaration:</b>
524
525 \code
526 namespace vigra { namespace rf3 {
527 template <typename FEATURES,
528 typename LABELS,
529 typename VISITOR = vigra::rf3::RFStopVisiting,
530 typename RANDENGINE = vigra::MersenneTwister>
531 vigra::rf3::RandomForest<FEATURES, LABELS>
532 random_forest(
533 FEATURES const & features,
534 LABELS const & labels,
535 vigra::rf3::RandomForestOptions const & options = vigra::rf3::RandomForestOptions(),
536 VISITOR visitor = vigra::rf3::RFStopVisiting(),
537 RANDENGINE & randengine = vigra::MersenneTwister::global()
538 );
539 }}
540 \endcode
541
542 <b> Usage:</b>
543
544 <b>\#include</b> \<vigra/random_forest_3.hxx\><br>
545 Namespace: vigra::rf3
546
547 \code
548 using namespace vigra;
549
550 int num_instances = ...;
551 int num_features = ...;
552 MultiArray<2, double> train_features(Shape2(num_instances, num_features));
553 MultiArray<1, int> train_labels(Shape1(num_instances));
554 ... // fill training data matrices
555
556 rf3::OOBError oob; // visitor to compute the out-of-bag error
557 auto rf = random_forest(train_features, train_labels,
558 rf3::RandomForestOptions().tree_count(100)
559 .features_per_node(rf3::RF_SQRT)
560 .n_threads(4)
561 rf3::create_visitor(oob));
562
563 std::cout << "Random forest training finished with out-of-bag error " << oob.oob_err_ << "\n";
564
565 int num_test_instances = ...;
566 MultiArray<2, double> test_features(Shape2(num_test_instances, num_features));
567 MultiArray<1, int> test_labels(Shape1(num_test_instances));
568 ... // fill feature matrix for test data
569
570 rf.predict(test_features, test_labels);
571
572 for(int i=0; i<num_test_instances; ++i)
573 std::cerr << "Prediction for test instance " << i << ": " << test_labels(i) << "\n";
574 \endcode
575 */
576 doxygen_overloaded_function(template <...> void random_forest)
577
578 template <typename FEATURES, typename LABELS, typename VISITOR, typename RANDENGINE>
579 inline
580 RandomForest<FEATURES, LABELS>
581 random_forest(
582 FEATURES const & features,
583 LABELS const & labels,
584 RandomForestOptions const & options,
585 VISITOR visitor,
586 RANDENGINE & randengine
587 ){
588 typedef detail::GeneralScorer<GiniScore> GiniScorer;
589 typedef detail::GeneralScorer<EntropyScore> EntropyScorer;
590 typedef detail::GeneralScorer<KolmogorovSmirnovScore> KSDScorer;
591 if (options.split_ == RF_GINI)
592 return detail::random_forest_impl0<FEATURES, LABELS, VISITOR, GiniScorer, RANDENGINE>(features, labels, options, visitor, randengine);
593 else if (options.split_ == RF_ENTROPY)
594 return detail::random_forest_impl0<FEATURES, LABELS, VISITOR, EntropyScorer, RANDENGINE>(features, labels, options, visitor, randengine);
595 else if (options.split_ == RF_KSD)
596 return detail::random_forest_impl0<FEATURES, LABELS, VISITOR, KSDScorer, RANDENGINE>(features, labels, options, visitor, randengine);
597 else
598 throw std::runtime_error("random_forest(): Unknown split criterion.");
599 }
600
601 template <typename FEATURES, typename LABELS, typename VISITOR>
602 inline
603 RandomForest<FEATURES, LABELS>
604 random_forest(
605 FEATURES const & features,
606 LABELS const & labels,
607 RandomForestOptions const & options,
608 VISITOR visitor
609 ){
610 auto randengine = MersenneTwister::global();
611 return random_forest(features, labels, options, visitor, randengine);
612 }
613
614 template <typename FEATURES, typename LABELS>
615 inline
616 RandomForest<FEATURES, LABELS>
617 random_forest(
618 FEATURES const & features,
619 LABELS const & labels,
620 RandomForestOptions const & options
621 ){
622 RFStopVisiting stop;
623 return random_forest(features, labels, options, stop);
624 }
625
626 template <typename FEATURES, typename LABELS>
627 inline
628 RandomForest<FEATURES, LABELS>
629 random_forest(
630 FEATURES const & features,
631 LABELS const & labels
632 ){
633 return random_forest(features, labels, RandomForestOptions());
634 }
635
636 } // namespace rf3
637
638 //@}
639
640 } // namespace vigra
641
642 #endif
0 /************************************************************************/
1 /* */
2 /* Copyright 2009,2014, 2015 by Sven Peter, Philip Schill, */
3 /* Rahul Nair and Ullrich Koethe */
4 /* */
5 /* This file is part of the VIGRA computer vision library. */
6 /* The VIGRA Website is */
7 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
8 /* Please direct questions, bug reports, and contributions to */
9 /* ullrich.koethe@iwr.uni-heidelberg.de or */
10 /* vigra@informatik.uni-hamburg.de */
11 /* */
12 /* Permission is hereby granted, free of charge, to any person */
13 /* obtaining a copy of this software and associated documentation */
14 /* files (the "Software"), to deal in the Software without */
15 /* restriction, including without limitation the rights to use, */
16 /* copy, modify, merge, publish, distribute, sublicense, and/or */
17 /* sell copies of the Software, and to permit persons to whom the */
18 /* Software is furnished to do so, subject to the following */
19 /* conditions: */
20 /* */
21 /* The above copyright notice and this permission notice shall be */
22 /* included in all copies or substantial portions of the */
23 /* Software. */
24 /* */
25 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
26 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
27 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
28 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
29 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
30 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
31 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
32 /* OTHER DEALINGS IN THE SOFTWARE. */
33 /* */
34 /************************************************************************/
35
36 #ifndef VIGRA_RF3_IMPEX_HDF5_HXX
37 #define VIGRA_RF3_IMPEX_HDF5_HXX
38
39 #include <string>
40 #include <sstream>
41 #include <iomanip>
42 #include <stack>
43
44 #include "config.hxx"
45 #include "random_forest_3/random_forest.hxx"
46 #include "random_forest_3/random_forest_common.hxx"
47 #include "random_forest_3/random_forest_visitors.hxx"
48 #include "hdf5impex.hxx"
49
50 namespace vigra
51 {
52 namespace rf3
53 {
54
55 // needs to be in sync with random_forest_hdf5_impex for backwards compatibility
56 static const char *const rf_hdf5_ext_param = "_ext_param";
57 static const char *const rf_hdf5_options = "_options";
58 static const char *const rf_hdf5_topology = "topology";
59 static const char *const rf_hdf5_parameters = "parameters";
60 static const char *const rf_hdf5_tree = "Tree_";
61 static const char *const rf_hdf5_version_group = ".";
62 static const char *const rf_hdf5_version_tag = "vigra_random_forest_version";
63 static const double rf_hdf5_version = 0.1;
64
65 // keep in sync with include/vigra/random_forest/rf_nodeproxy.hxx
66 enum NodeTags
67 {
68 rf_UnFilledNode = 42,
69 rf_AllColumns = 0x00000000,
70 rf_ToBePrunedTag = 0x80000000,
71 rf_LeafNodeTag = 0x40000000,
72
73 rf_i_ThresholdNode = 0,
74 rf_i_HyperplaneNode = 1,
75 rf_i_HypersphereNode = 2,
76 rf_e_ConstProbNode = 0 | rf_LeafNodeTag,
77 rf_e_LogRegProbNode = 1 | rf_LeafNodeTag
78 };
79
80 static const unsigned int rf_tag_mask = 0xf0000000;
81 static const unsigned int rf_type_mask = 0x00000003;
82 static const unsigned int rf_zero_mask = 0xffffffff & ~rf_tag_mask & ~rf_type_mask;
83
84 namespace detail
85 {
86 inline std::string get_cwd(HDF5File & h5context)
87 {
88 return h5context.get_absolute_path(h5context.pwd());
89 }
90 }
91
92 template <typename FEATURES, typename LABELS>
93 typename DefaultRF<FEATURES, LABELS>::type
94 random_forest_import_HDF5(HDF5File & h5ctx, std::string const & pathname = "")
95 {
96 typedef typename DefaultRF<FEATURES, LABELS>::type RF;
97 typedef typename RF::Graph Graph;
98 typedef typename RF::Node Node;
99 typedef typename RF::SplitTests SplitTest;
100 typedef typename LABELS::value_type LabelType;
101 typedef typename RF::AccInputType AccInputType;
102 typedef typename AccInputType::value_type AccValueType;
103
104 std::string cwd;
105
106 if (pathname.size()) {
107 cwd = detail::get_cwd(h5ctx);
108 h5ctx.cd(pathname);
109 }
110
111 if (h5ctx.existsAttribute(rf_hdf5_version_group, rf_hdf5_version_tag)) {
112 double version;
113 h5ctx.readAttribute(rf_hdf5_version_group, rf_hdf5_version_tag, version);
114 vigra_precondition(version <= rf_hdf5_version, "random_forest_import_HDF5(): unexpected file format version.");
115 }
116
117 // Read ext params.
118 size_t actual_mtry;
119 size_t num_instances;
120 size_t num_features;
121 size_t num_classes;
122 size_t msample;
123 int is_weighted_int;
124 MultiArray<1, LabelType> distinct_labels_marray;
125 MultiArray<1, double> class_weights_marray;
126
127 h5ctx.cd(rf_hdf5_ext_param);
128 h5ctx.read("actual_msample_", msample);
129 h5ctx.read("actual_mtry_", actual_mtry);
130 h5ctx.read("class_count_", num_classes);
131 h5ctx.readAndResize("class_weights_", class_weights_marray);
132 h5ctx.read("column_count_", num_features);
133 h5ctx.read("is_weighted_", is_weighted_int);
134 h5ctx.readAndResize("labels", distinct_labels_marray);
135 h5ctx.read("row_count_", num_instances);
136 h5ctx.cd_up();
137
138 bool is_weighted = is_weighted_int == 1 ? true : false;
139
140 // Read options.
141 size_t min_num_instances;
142 int mtry;
143 int mtry_switch_int;
144 int bootstrap_sampling_int;
145 int tree_count;
146 h5ctx.cd(rf_hdf5_options);
147 h5ctx.read("min_split_node_size_", min_num_instances);
148 h5ctx.read("mtry_", mtry);
149 h5ctx.read("mtry_switch_", mtry_switch_int);
150 h5ctx.read("sample_with_replacement_", bootstrap_sampling_int);
151 h5ctx.read("tree_count_", tree_count);
152 h5ctx.cd_up();
153
154 RandomForestOptionTags mtry_switch = (RandomForestOptionTags)mtry_switch_int;
155 bool bootstrap_sampling = bootstrap_sampling_int == 1 ? true : false;
156
157 std::vector<LabelType> const distinct_labels(distinct_labels_marray.begin(), distinct_labels_marray.end());
158 std::vector<double> const class_weights(class_weights_marray.begin(), class_weights_marray.end());
159
160 auto const pspec = ProblemSpec<LabelType>()
161 .num_features(num_features)
162 .num_instances(num_instances)
163 .num_classes(num_classes)
164 .distinct_classes(distinct_labels)
165 .actual_mtry(actual_mtry)
166 .actual_msample(msample);
167
168 auto options = RandomForestOptions()
169 .min_num_instances(min_num_instances)
170 .bootstrap_sampling(bootstrap_sampling)
171 .tree_count(tree_count);
172 options.features_per_node_switch_ = mtry_switch;
173 options.features_per_node_ = mtry;
174 if (is_weighted)
175 options.class_weights(class_weights);
176
177 Graph gr;
178 typename RF::template NodeMap<SplitTest>::type split_tests;
179 typename RF::template NodeMap<AccInputType>::type leaf_responses;
180
181 auto const groups = h5ctx.ls();
182 for (auto const & groupname : groups) {
183 if (groupname.substr(0, std::char_traits<char>::length(rf_hdf5_tree)).compare(rf_hdf5_tree) != 0) {
184 continue;
185 }
186
187 MultiArray<1, unsigned int> topology;
188 MultiArray<1, double> parameters;
189 h5ctx.cd(groupname);
190 h5ctx.readAndResize(rf_hdf5_topology, topology);
191 h5ctx.readAndResize(rf_hdf5_parameters, parameters);
192 h5ctx.cd_up();
193
194 vigra_precondition(topology[0] == num_features, "random_forest_import_HDF5(): number of features mismatch.");
195 vigra_precondition(topology[1] == num_classes, "random_forest_import_HDF5(): number of classes mismatch.");
196
197 Node const n = gr.addNode();
198
199 std::queue<std::pair<unsigned int, Node> > q;
200 q.emplace(2, n);
201 while (!q.empty()) {
202 auto const el = q.front();
203
204 unsigned int const index = el.first;
205 Node const parent = el.second;
206
207 vigra_precondition((topology[index] & rf_zero_mask) == 0, "random_forest_import_HDF5(): unexpected node type: type & zero_mask > 0");
208
209 if (topology[index] & rf_LeafNodeTag) {
210 unsigned int const probs_start = topology[index+1] + 1;
211
212 vigra_precondition((topology[index] & rf_tag_mask) == rf_LeafNodeTag, "random_forest_import_HDF5(): unexpected node type: additional tags in leaf node");
213
214 std::vector<AccValueType> node_response;
215
216 for (unsigned int i = 0; i < num_classes; ++i) {
217 node_response.push_back(parameters[probs_start + i]);
218 }
219
220 leaf_responses.insert(parent, node_response);
221
222 } else {
223 vigra_precondition(topology[index] == rf_i_ThresholdNode, "random_forest_import_HDF5(): unexpected node type.");
224
225 Node const left = gr.addNode();
226 Node const right = gr.addNode();
227
228 gr.addArc(parent, left);
229 gr.addArc(parent, right);
230
231 split_tests.insert(parent, SplitTest(topology[index+4], parameters[topology[index+1]+1]));
232
233 q.push(std::make_pair(topology[index+2], left));
234 q.push(std::make_pair(topology[index+3], right));
235 }
236
237 q.pop();
238 }
239 }
240
241 if (cwd.size()) {
242 h5ctx.cd(cwd);
243 }
244
245 RF rf(gr, split_tests, leaf_responses, pspec);
246 rf.options_ = options;
247 return rf;
248 }
249
250 namespace detail
251 {
252 class PaddedNumberString
253 {
254 public:
255
256 PaddedNumberString(int n)
257 {
258 ss_ << (n-1);
259 width_ = ss_.str().size();
260 }
261
262 std::string operator()(int k) const
263 {
264 ss_.str("");
265 ss_ << std::setw(width_) << std::setfill('0') << k;
266 return ss_.str();
267 }
268
269 private:
270
271 mutable std::ostringstream ss_;
272 unsigned int width_;
273 };
274 }
275
276 template <typename RF>
277 void random_forest_export_HDF5(
278 RF const & rf,
279 HDF5File & h5context,
280 std::string const & pathname = ""
281 ){
282 typedef typename RF::LabelType LabelType;
283 typedef typename RF::Node Node;
284
285 std::string cwd;
286 if (pathname.size()) {
287 cwd = detail::get_cwd(h5context);
288 h5context.cd_mk(pathname);
289 }
290
291 // version attribute
292 h5context.writeAttribute(rf_hdf5_version_group, rf_hdf5_version_tag,
293 rf_hdf5_version);
294
295
296 auto const & p = rf.problem_spec_;
297 auto const & opts = rf.options_;
298 MultiArray<1, LabelType> distinct_classes(Shape1(p.distinct_classes_.size()), p.distinct_classes_.data());
299 MultiArray<1, double> class_weights(Shape1(p.num_classes_), 1.0);
300 int is_weighted = 0;
301 if (opts.class_weights_.size() > 0)
302 {
303 is_weighted = 1;
304 for (size_t i = 0; i < opts.class_weights_.size(); ++i)
305 class_weights(i) = opts.class_weights_[i];
306 }
307
308 // Save external parameters.
309 h5context.cd_mk(rf_hdf5_ext_param);
310 h5context.write("column_count_", p.num_features_);
311 h5context.write("row_count_", p.num_instances_);
312 h5context.write("class_count_", p.num_classes_);
313 h5context.write("actual_mtry_", p.actual_mtry_);
314 h5context.write("actual_msample_", p.actual_msample_);
315 h5context.write("labels", distinct_classes);
316 h5context.write("is_weighted_", is_weighted);
317 h5context.write("class_weights_", class_weights);
318 h5context.write("precision_", 0.0);
319 h5context.write("problem_type_", 1.0);
320 h5context.write("response_size_", 1.0);
321 h5context.write("used_", 1.0);
322 h5context.cd_up();
323
324 // Save the options.
325 h5context.cd_mk(rf_hdf5_options);
326 h5context.write("min_split_node_size_", opts.min_num_instances_);
327 h5context.write("mtry_", opts.features_per_node_);
328 h5context.write("mtry_func_", 0.0);
329 h5context.write("mtry_switch_", opts.features_per_node_switch_);
330 h5context.write("predict_weighted_", 0.0);
331 h5context.write("prepare_online_learning_", 0.0);
332 h5context.write("sample_with_replacement_", opts.bootstrap_sampling_ ? 1 : 0);
333 h5context.write("stratification_method_", 3.0);
334 h5context.write("training_set_calc_switch_", 1.0);
335 h5context.write("training_set_func_", 0.0);
336 h5context.write("training_set_proportion_", 1.0);
337 h5context.write("training_set_size_", 0.0);
338 h5context.write("tree_count_", opts.tree_count_);
339 h5context.cd_up();
340
341 // Save the trees.
342 detail::PaddedNumberString tree_number(rf.num_trees());
343 for (size_t i = 0; i < rf.num_trees(); ++i)
344 {
345 // Create the topology and parameters arrays.
346 std::vector<UInt32> topology;
347 std::vector<double> parameters;
348 topology.push_back(p.num_features_);
349 topology.push_back(p.num_classes_);
350
351 auto const & probs = rf.node_responses_;
352 auto const & splits = rf.split_tests_;
353 auto const & gr = rf.graph_;
354 auto const root = gr.getRoot(i);
355
356 // Write the tree nodes using a depth-first search.
357 // When a node is created, the indices of the child nodes are unknown.
358 // Therefore, they have to be updated once the child nodes are created.
359 // The stack holds the node and the topology-index that must be updated.
360 std::stack<std::pair<Node, std::ptrdiff_t> > stack;
361 stack.emplace(root, -1);
362 while (!stack.empty())
363 {
364 auto const n = stack.top().first; // the node descriptor
365 auto const i = stack.top().second; // index from the parent node that must be updated
366 stack.pop();
367
368 // Update the index in the parent node.
369 if (i != -1)
370 topology[i] = topology.size();
371
372 if (gr.numChildren(n) == 0)
373 {
374 // The node is a leaf.
375 // Topology: leaf node tag, index of weight in parameters array.
376 // Parameters: node weight, class probabilities.
377 topology.push_back(rf_LeafNodeTag);
378 topology.push_back(parameters.size());
379 auto const & prob = probs.at(n);
380 auto const weight = std::accumulate(prob.begin(), prob.end(), 0.0);
381 parameters.push_back(weight);
382 parameters.insert(parameters.end(), prob.begin(), prob.end());
383 }
384 else
385 {
386 // The node is an inner node.
387 // Topology: threshold tag, index of weight in parameters array, index of left child, index of right child, split dimension.
388 // Parameters: node weight, split value.
389 topology.push_back(rf_i_ThresholdNode);
390 topology.push_back(parameters.size());
391 topology.push_back(-1); // index of left children (currently unknown, will be updated when the child node is taken from the stack)
392 topology.push_back(-1); // index of right children (see above)
393 topology.push_back(splits.at(n).dim_);
394 parameters.push_back(1.0); // inner nodes have the weight 1.
395 parameters.push_back(splits.at(n).val_);
396
397 // Place the children on the stack.
398 stack.emplace(gr.getChild(n, 0), topology.size()-3);
399 stack.emplace(gr.getChild(n, 1), topology.size()-2);
400 }
401 }
402
403 // Convert the vectors to multi arrays.
404 MultiArray<1, UInt32> topo(Shape1(topology.size()), topology.data());
405 MultiArray<1, double> para(Shape1(parameters.size()), parameters.data());
406
407 auto const name = rf_hdf5_tree + tree_number(i);
408 h5context.cd_mk(name);
409 h5context.write(rf_hdf5_topology, topo);
410 h5context.write(rf_hdf5_parameters, para);
411 h5context.cd_up();
412 }
413
414 if (pathname.size())
415 h5context.cd(cwd);
416 }
417
418
419
420 } // namespace rf3
421 } // namespace vigra
422
423 #endif // VIGRA_NEW_RANDOM_FOREST_IMPEX_HDF5_HXX
874874 This function implements the reduction by one resolution level (first signature)
875875 or across several pyramid levels (last signature) of a Gaussian pyramid as described in
876876
877 P. Burt, E. Adelson: <i>"The Laplacian Pyramid as a Compact Image Code"</i>, IEEE Trans. Communications, 9(4):532–540, 1983
877 P. Burt, E. Adelson: <i>"The Laplacian Pyramid as a Compact Image Code"</i>, IEEE Trans. Communications, 9(4):532-540, 1983
878878
879879 That is, it applies the smoothing filter
880880 \code
10251025 This function implements the expansion by one resolution level (first signature)
10261026 or across several pyramid levels (last signature) of a Gaussian pyramid as described in
10271027
1028 P. Burt, E. Adelson: <i>"The Laplacian Pyramid as a Compact Image Code"</i>, IEEE Trans. Communications, 9(4):532–540, 1983
1028 P. Burt, E. Adelson: <i>"The Laplacian Pyramid as a Compact Image Code"</i>, IEEE Trans. Communications, 9(4):532-540, 1983
10291029
10301030 That is, the function first places the pixel values of the low-resolution
10311031 image at the even pixel coordinates of the high-resolution image (pixels with
11801180 This function implements the reduction across several resolution levels of
11811181 a Laplacian pyramid as described in
11821182
1183 P. Burt, E. Adelson: <i>"The Laplacian Pyramid as a Compact Image Code"</i>, IEEE Trans. Communications, 9(4):532–540, 1983
1183 P. Burt, E. Adelson: <i>"The Laplacian Pyramid as a Compact Image Code"</i>, IEEE Trans. Communications, 9(4):532-540, 1983
11841184
11851185 It first creates a Gaussian pyramid using \ref pyramidReduceBurtFilter(), then
11861186 upsamples each level once using \ref pyramidExpandBurtFilter(), and finally
12141214 This function implements the reconstruction of a Gaussian pyramid
12151215 across several resolution levels of a Laplacian pyramid as described in
12161216
1217 P. Burt, E. Adelson: <i>"The Laplacian Pyramid as a Compact Image Code"</i>, IEEE Trans. Communications, 9(4):532–540, 1983
1217 P. Burt, E. Adelson: <i>"The Laplacian Pyramid as a Compact Image Code"</i>, IEEE Trans. Communications, 9(4):532-540, 1983
12181218
12191219 At each level starting from <tt>fromLevel</tt>, this function calls
12201220 \ref pyramidExpandBurtFilter() to interpolate the image to the next highest
132132 {
133133 return prefilterCoefficients_;
134134 }
135
135
136136 protected:
137137 static ArrayVector<double> prefilterCoefficients_;
138138 unsigned int m_;
144144
145145
146146
147 /** \addtogroup GeometricTransformations Geometric Transformations
148 Zoom up and down by repeating pixels, or using various interpolation schemes.
149
150 See also:
151 <ul>
152 <li> \ref ResamplingConvolutionFilters to resize by means of pyramids or smoothing filters</li>
153 <li> \ref resampleImage() to just drop or repeat pixels</li>
154 <li> \ref resizeMultiArraySplineInterpolation() for multi-dimensional interpolation</li>
155 </ul>
156
157 <b>\#include</b> \<vigra/stdimagefunctions.hxx\><br>
158 <b>or</b><br>
159 <b>\#include</b> \<vigra/resizeimage.hxx\><br>
147 /** \addtogroup GeometricTransformations
160148 */
161149 //@{
162150
180168 ad.set(as(i1), id);
181169 return;
182170 }
183
171
184172 double dx = (double)(wold - 1) / (wnew - 1);
185173 double x = 0.5;
186174 for(; id != idend; ++id, x += dx)
252240 \code
253241 MultiArray<2, unsigned char> src(w, h);
254242 MultiArray<2, float> dest(w_new, h_new);
255
243
256244 resizeImageNoInterpolation(src, dest);
257245 \endcode
258246
457445 \code
458446 MultiArray<2, unsigned char> src(w, h);
459447 MultiArray<2, float> dest(w_new, h_new);
460
448
461449 resizeImageLinearInterpolation(src, dest);
462450 \endcode
463451
680668 \code
681669 MultiArray<2, unsigned char> src(w, h);
682670 MultiArray<2, float> dest(w_new, h_new);
683
671
684672 // use default cubic spline interpolator
685673 resizeImageSplineInterpolation(src, dest);
686
674
687675 // use 5th-order spline interpolator
688676 resizeImageSplineInterpolation(src, dest, BSpline<5, double>());
689677 \endcode
980968
981969 <b>\#include</b> \<vigra/resizeimage.hxx\><br>
982970 Namespace: vigra
983
971
984972 \code
985973 MultiArray<2, unsigned char> src(w, h);
986974 MultiArray<2, float> dest(w_new, h_new);
987
975
988976 resizeImageCatmullRomInterpolation(src, dest);
989977 \endcode
990978 */
10721060
10731061 <b>\#include</b> \<vigra/resizeimage.hxx\><br>
10741062 Namespace: vigra
1075
1063
10761064 \code
10771065 MultiArray<2, unsigned char> src(w, h);
10781066 MultiArray<2, float> dest(w_new, h_new);
1079
1067
10801068 resizeImageCoscotInterpolation(src, dest);
10811069 \endcode
10821070 */
4444 namespace vigra
4545 {
4646
47 /** \addtogroup MachineLearning Machine Learning
48 **/
49 //@{
50
51
5247 /**\brief Options object for the Sampler class.
53
54 <b>usage:</b>
55
48
49 \ingroup MachineLearning
50
51 <b>Usage:</b>
52
5653 \code
5754 SamplerOptions opt = SamplerOptions()
5855 .withReplacement()
5956 .sampleProportion(0.5);
6057 \endcode
61
58
6259 Note that the return value of all methods is <tt>*this</tt> which makes
6360 concatenating of options as above possible.
6461 */
7067 unsigned int sample_size;
7168 bool sample_with_replacement;
7269 bool stratified_sampling;
73
70
7471 SamplerOptions()
7572 : sample_proportion(1.0),
76 sample_size(0),
73 sample_size(0),
7774 sample_with_replacement(true),
7875 stratified_sampling(false)
7976 {}
10097
10198 /**\brief Draw the given number of samples.
10299 * If stratifiedSampling is true, the \a size is equally distributed
103 * across all strata (e.g. <tt>size / strataCount</tt> samples are taken
100 * across all strata (e.g. <tt>size / strataCount</tt> samples are taken
104101 * from each stratum, subject to rounding).
105102 *
106103 * <br> Default: 0 (i.e. determine the count by means of sampleProportion())
113110
114111
115112 /**\brief Determine the number of samples to draw as a proportion of the total
116 * number. That is, we draw <tt>count = totalCount * proportion</tt> samples.
113 * number. That is, we draw <tt>count = totalCount * proportion</tt> samples.
117114 * This option is overridden when an absolute count is specified by sampleSize().
118 *
115 *
119116 * If stratifiedSampling is true, the count is equally distributed
120 * across all strata (e.g. <tt>totalCount * proportion / strataCount</tt> samples are taken
117 * across all strata (e.g. <tt>totalCount * proportion / strataCount</tt> samples are taken
121118 * from each stratum).
122119 *
123120 * <br> Default: 1.0
130127 return *this;
131128 }
132129
133 /**\brief Draw equally many samples from each "stratum".
134 * A stratum is a group of like entities, e.g. pixels belonging
135 * to the same object class. This is useful to create balanced samples
130 /**\brief Draw equally many samples from each "stratum".
131 * A stratum is a group of like entities, e.g. pixels belonging
132 * to the same object class. This is useful to create balanced samples
136133 * when the class probabilities are very unbalanced (e.g.
137134 * when there are many background and few foreground pixels).
138 * Stratified sampling thus avoids that a trained classifier is biased
139 * towards the majority class.
135 * Stratified sampling thus avoids that a trained classifier is biased
136 * towards the majority class.
140137 *
141138 * <br> Default (if you don't call this function): false
142139 */
155152
156153 /** \brief Create random samples from a sequence of indices.
157154
155 \ingroup MachineLearning
156
158157 Selecting data items at random is a basic task of machine learning,
159158 for example in boostrapping, RandomForest training, and cross validation.
160 This class implements various ways to select random samples via their indices.
159 This class implements various ways to select random samples via their indices.
161160 Indices are assumed to be consecutive in
162161 the range <tt>0 &lt;= index &lt; total_sample_count</tt>.
163
164 The class always contains a current sample which can be accessed by
162
163 The class always contains a current sample which can be accessed by
165164 the index operator or by the function sampledIndices(). The indices
166165 that are not in the current sample (out-of-bag indices) can be accessed
167166 via the function oobIndices().
168
167
169168 The sampling method (with/without replacement, stratified or not) and the
170 number of samples to draw are determined by the option object
169 number of samples to draw are determined by the option object
171170 SamplerOptions.
172
171
173172 <b>Usage:</b>
174
173
175174 <b>\#include</b> \<vigra/sampling.hxx\><br>
176175 Namespace: vigra
177
178 Create a Sampler with default options, i.e. sample as many indices as there
179 are data elements, with replacement. On average, the sample will contain
176
177 Create a Sampler with default options, i.e. sample as many indices as there
178 are data elements, with replacement. On average, the sample will contain
180179 <tt>0.63*totalCount</tt> distinct indices.
181
180
182181 \code
183182 int totalCount = 10000; // total number of data elements
184 int numberOfSamples = 20; // repeat experiment 20 times
183 int numberOfSamples = 20; // repeat experiment 20 times
185184 Sampler<> sampler(totalCount);
186185 for(int k=0; k<numberOfSamples; ++k)
187186 {
195194 sampler.sample();
196195 }
197196 \endcode
198
197
199198 Create a Sampler for stratified sampling, without replacement.
200
199
201200 \code
202201 // prepare the strata (i.e. specify which stratum each element belongs to)
203202 int stratumSize1 = 2000, stratumSize2 = 8000,
207206 strata[i] = 1;
208207 for(int i=stratumSize1; i<stratumSize2; ++i)
209208 strata[i] = 2;
210
209
211210 int sampleSize = 200; // i.e. sample 100 elements from each of the two strata
212 int numberOfSamples = 20; // repeat experiment 20 times
211 int numberOfSamples = 20; // repeat experiment 20 times
213212 Sampler<> stratifiedSampler(strata.begin(), strata.end(),
214213 SamplerOptions().withoutReplacement().stratified().sampleSize(sampleSize));
215214 // create first sample
237236 requires extension of the random number generator classes.
238237 */
239238 typedef Int32 IndexType;
240
239
241240 typedef ArrayVector<IndexType> IndexArrayType;
242
243 /** Type of the array view object that is returned by
241
242 /** Type of the array view object that is returned by
244243 sampledIndices() and oobIndices().
245244 */
246245 typedef ArrayVectorView <IndexType> IndexArrayViewType;
250249 typedef std::map<IndexType, int> StrataSizesType;
251250 typedef ArrayVector<bool> IsUsedArrayType;
252251 typedef ArrayVectorView<bool> IsUsedArrayViewType;
253
252
254253 static const int oobInvalid = -1;
255254
256255 int total_count_, sample_size_;
271270 int strata_sample_size = static_cast<int>(std::ceil(double(sample_size_) / strataCount()));
272271 int strata_total_count = strata_sample_size * strataCount();
273272
274 for(StrataIndicesType::iterator i = strata_indices_.begin();
273 for(StrataIndicesType::iterator i = strata_indices_.begin();
275274 i != strata_indices_.end(); ++i)
276275 {
277276 if(strata_total_count > sample_size_)
287286 }
288287
289288 public:
290
289
291290 /** Create a sampler for \a totalCount data objects.
292
291
293292 In each invocation of <tt>sample()</tt> below, it will sample
294 indices according to the options passed. If no options are given,
293 indices according to the options passed. If no options are given,
295294 <tt>totalCount</tt> indices will be drawn with replacement.
296295 */
297 Sampler(UInt32 totalCount, SamplerOptions const & opt = SamplerOptions(),
296 Sampler(UInt32 totalCount, SamplerOptions const & opt = SamplerOptions(),
298297 Random const * rnd = 0)
299298 : total_count_(totalCount),
300299 sample_size_(opt.sample_size == 0
310309 {
311310 vigra_precondition(opt.sample_with_replacement || sample_size_ <= total_count_,
312311 "Sampler(): Cannot draw without replacement when data size is smaller than sample count.");
313
312
314313 vigra_precondition(!opt.stratified_sampling,
315314 "Sampler(): Stratified sampling requested, but no strata given.");
316
315
317316 // initialize a single stratum containing all data
318317 strata_indices_[0].resize(total_count_);
319318 for(int i=0; i<total_count_; ++i)
323322 //this is screwing up the random forest tests.
324323 //sample();
325324 }
326
325
327326 /** Create a sampler for stratified sampling.
328
329 <tt>strataBegin</tt> and <tt>strataEnd</tt> must refer to a sequence
327
328 <tt>strataBegin</tt> and <tt>strataEnd</tt> must refer to a sequence
330329 which specifies for each sample the stratum it belongs to. The
331330 total number of data objects will be set to <tt>strataEnd - strataBegin</tt>.
332 Equally many samples (subject to rounding) will be drawn from each stratum,
333 unless the option object explicitly requests unstratified sampling,
331 Equally many samples (subject to rounding) will be drawn from each stratum,
332 unless the option object explicitly requests unstratified sampling,
334333 in which case the strata are ignored.
335334 */
336335 template <class Iterator>
337 Sampler(Iterator strataBegin, Iterator strataEnd, SamplerOptions const & opt = SamplerOptions(),
336 Sampler(Iterator strataBegin, Iterator strataEnd, SamplerOptions const & opt = SamplerOptions(),
338337 Random const * rnd = 0)
339338 : total_count_(strataEnd - strataBegin),
340339 sample_size_(opt.sample_size == 0
350349 {
351350 vigra_precondition(opt.sample_with_replacement || sample_size_ <= total_count_,
352351 "Sampler(): Cannot draw without replacement when data size is smaller than sample count.");
353
352
354353 // copy the strata indices
355354 if(opt.stratified_sampling)
356355 {
365364 for(int i=0; i<total_count_; ++i)
366365 strata_indices_[0][i] = i;
367366 }
368
367
369368 vigra_precondition(sample_size_ >= static_cast<int>(strata_indices_.size()),
370369 "Sampler(): Requested sample count must be at least as large as the number of strata.");
371370
422421 {
423422 return options_.stratified_sampling;
424423 }
425
424
426425 /** Whether sampling should be performed with replacement.
427426 */
428427 bool withReplacement() const
429428 {
430429 return options_.sample_with_replacement;
431430 }
432
431
433432 /** Return an array view containing the indices in the current sample.
434433 */
435434 IndexArrayViewType sampledIndices() const
436435 {
437436 return current_sample_;
438437 }
439
438
440439 /** Return an array view containing the out-of-bag indices.
441440 (i.e. the indices that are not in the current sample)
442441 */
468467 {
469468 current_oob_count_ = oobInvalid;
470469 is_used_.init(false);
471
470
472471 if(options_.sample_with_replacement)
473472 {
474473 //Go thru all strata
515514 double lambda;
516515 int minIndex;
517516 int maxIndex;
518
517
519518 PoissonSampler(double lambda,IndexType minIndex,IndexType maxIndex)
520519 : lambda(lambda),
521520 minIndex(minIndex),
552551 {
553552 return used_indices_[in];
554553 }
555
554
556555 int numOfSamples() const
557556 {
558557 return used_indices_.size();
559558 }
560559 };
561560
562 //@}
563
564561 } // namespace vigra
565562
566563 #endif /*VIGRA_SAMPLING_HXX*/
161161
162162 } // namespace detail
163163
164 /** \addtogroup SeededRegionGrowing Region Segmentation Algorithms
165 Region growing, watersheds, and voronoi tesselation
164 /** \addtogroup Superpixels
166165 */
167166 //@{
168167
173172 /********************************************************/
174173
175174 /** Choose between different types of Region Growing */
176 enum SRGType {
177 CompleteGrow = 0,
178 KeepContours = 1,
179 StopAtThreshold = 2,
180 SRGWatershedLabel = -1
175 enum SRGType {
176 CompleteGrow = 0,
177 KeepContours = 1,
178 StopAtThreshold = 2,
179 SRGWatershedLabel = -1
181180 };
182181
183182 /** \brief Region Segmentation by means of Seeded Region Growing.
196195 The seed image is a partly segmented image which contains uniquely
197196 labeled regions (the seeds) and unlabeled pixels (the candidates, label 0).
198197 The highest seed label found in the seed image is returned by the algorithm.
199
198
200199 Seed regions can be as large as you wish and as small as one pixel. If
201200 there are no candidates, the algorithm will simply copy the seed image
202201 into the output image. Otherwise it will aggregate the candidates into
203 the existing regions so that a cost function is minimized.
204 Candidates are taken from the neighborhood of the already assigned pixels,
202 the existing regions so that a cost function is minimized.
203 Candidates are taken from the neighborhood of the already assigned pixels,
205204 where the type of neighborhood is determined by parameter <tt>neighborhood</tt>
206 which can take the values <tt>FourNeighborCode()</tt> (the default)
207 or <tt>EightNeighborCode()</tt>. The algorithm basically works as follows
205 which can take the values <tt>FourNeighborCode()</tt> (the default)
206 or <tt>EightNeighborCode()</tt>. The algorithm basically works as follows
208207 (illustrated for 4-neighborhood, but 8-neighborhood works in the same way):
209208
210209 <ol>
228227 </ol>
229228
230229 <tt>SRGType</tt> can take the following values:
231
230
232231 <DL>
233232 <DT><tt>CompleteGrow</tt> <DD> produce a complete tesselation of the volume (default).
234233 <DT><tt>KeepContours</tt> <DD> keep a 1-voxel wide unlabeled contour between all regions.
235 <DT><tt>StopAtThreshold</tt> <DD> stop when the boundary indicator values exceed the
234 <DT><tt>StopAtThreshold</tt> <DD> stop when the boundary indicator values exceed the
236235 threshold given by parameter <tt>max_cost</tt>.
237236 <DT><tt>KeepContours | StopAtThreshold</tt> <DD> keep 1-voxel wide contour and stop at given <tt>max_cost</tt>.
238237 </DL>
258257 the original statistics.
259258
260259 If a candidate could be merged into more than one regions with identical
261 cost, the algorithm will favour the nearest region. If <tt>StopAtThreshold</tt> is active,
262 and the cost of the current candidate at any point in the algorithm exceeds the optional
263 <tt>max_cost</tt> value (which defaults to <tt>NumericTraits<double>::max()</tt>),
260 cost, the algorithm will favour the nearest region. If <tt>StopAtThreshold</tt> is active,
261 and the cost of the current candidate at any point in the algorithm exceeds the optional
262 <tt>max_cost</tt> value (which defaults to <tt>NumericTraits<double>::max()</tt>),
264263 region growing is aborted, and all voxels not yet assigned to a region remain unlabeled.
265264
266265 In some cases, the cost only depends on the feature value of the current
283282 MultiArrayView<2, TS, AS> const & seeds,
284283 MultiArrayView<2, T2, S2> labels,
285284 RegionStatisticsArray & stats,
286 SRGType srgType = CompleteGrow,
285 SRGType srgType = CompleteGrow,
287286 Neighborhood n = FourNeighborCode(),
288287 double max_cost = NumericTraits<double>::max());
289288 }
297296 class SeedImageIterator, class SeedAccessor,
298297 class DestIterator, class DestAccessor,
299298 class RegionStatisticsArray, class Neighborhood>
300 typename SeedAccessor::value_type
299 typename SeedAccessor::value_type
301300 seededRegionGrowing(SrcIterator srcul, SrcIterator srclr, SrcAccessor as,
302301 SeedImageIterator seedsul, SeedAccessor aseeds,
303302 DestIterator destul, DestAccessor ad,
349348 // init statistics functor
350349 ArrayOfRegionStatistics<SeedRgDirectValueFunctor<float> > stats(max_region_label);
351350
352 // find voronoi region of each point (the point image is overwritten with the
351 // find voronoi region of each point (the point image is overwritten with the
353352 // voronoi region labels)
354353 seededRegionGrowing(dist, points, points, stats);
355354 \endcode
453452
454453 SeedRgPixelHeap pheap;
455454 int cneighbor, maxRegionLabel = 0;
456
455
457456 typedef typename Neighborhood::Direction Direction;
458457 int directionCount = Neighborhood::DirectionCount;
459
458
460459 Point2D pos(0,0);
461460 for(isy=srcul, iry=ir, pos.y=0; pos.y<h;
462461 ++pos.y, ++isy.y, ++iry.y)
490489 }
491490 }
492491 }
493
492
494493 // perform region growing
495494 while(pheap.size() != 0)
496495 {
548547 }
549548 }
550549 }
551
550
552551 // free temporary memory
553552 while(pheap.size() != 0)
554553 {
628627 pair<SeedImageIterator, SeedAccessor> img3,
629628 pair<DestIterator, DestAccessor> img4,
630629 RegionStatisticsArray & stats,
631 SRGType srgType,
630 SRGType srgType,
632631 Neighborhood n,
633632 double max_cost = NumericTraits<double>::max())
634633 {
680679 MultiArrayView<2, TS, AS> const & img3,
681680 MultiArrayView<2, T2, S2> img4,
682681 RegionStatisticsArray & stats,
683 SRGType srgType,
682 SRGType srgType,
684683 Neighborhood n,
685684 double max_cost = NumericTraits<double>::max())
686685 {
738737 template <class SrcIterator, class SrcAccessor,
739738 class DestIterator, class DestAccessor,
740739 class RegionStatisticsArray, class Neighborhood>
741 typename DestAccessor::value_type
740 typename DestAccessor::value_type
742741 fastSeededRegionGrowing(SrcIterator srcul, SrcIterator srclr, SrcAccessor as,
743742 DestIterator destul, DestAccessor ad,
744743 RegionStatisticsArray & stats,
751750
752751 vigra_precondition((srgType & KeepContours) == 0,
753752 "fastSeededRegionGrowing(): the turbo algorithm doesn't support 'KeepContours', sorry.");
754
753
755754 int w = srclr.x - srcul.x;
756755 int h = srclr.y - srcul.y;
757756
760759
761760 BucketQueue<Point2D, true> pqueue(bucket_count);
762761 LabelType maxRegionLabel = 0;
763
762
764763 Point2D pos(0,0);
765764 for(isy=srcul, idy = destul, pos.y=0; pos.y<h; ++pos.y, ++isy.y, ++idy.y)
766765 {
774773
775774 if(maxRegionLabel < label)
776775 maxRegionLabel = label;
777
776
778777 AtImageBorder atBorder = isAtImageBorder(pos.x, pos.y, w, h);
779778 if(atBorder == NotAtBorder)
780779 {
792791 }
793792 else
794793 {
795 RestrictedNeighborhoodCirculator<DestIterator, Neighborhood>
794 RestrictedNeighborhoodCirculator<DestIterator, Neighborhood>
796795 c(idx, atBorder), cend(c);
797796 do
798797 {
808807 }
809808 }
810809 }
811
810
812811 // perform region growing
813812 while(!pqueue.empty())
814813 {
815814 Point2D pos = pqueue.top();
816815 std::ptrdiff_t cost = pqueue.topPriority();
817816 pqueue.pop();
818
817
819818 if((srgType & StopAtThreshold) != 0 && cost > max_cost)
820819 break;
821820
822821 idx = destul + pos;
823822 isx = srcul + pos;
824
823
825824 std::ptrdiff_t label = ad(idx);
826825
827826 AtImageBorder atBorder = isAtImageBorder(pos.x, pos.y, w, h);
828827 if(atBorder == NotAtBorder)
829828 {
830829 NeighborhoodCirculator<DestIterator, Neighborhood> c(idx), cend(c);
831
830
832831 do
833832 {
834833 std::ptrdiff_t nlabel = ad(c);
835834 if(nlabel == 0)
836835 {
837836 ad.set(label, idx, c.diff());
838 std::ptrdiff_t priority =
837 std::ptrdiff_t priority =
839838 std::max((std::ptrdiff_t)stats[label].cost(as(isx, c.diff())), cost);
840839 pqueue.push(pos+c.diff(), priority);
841840 }
844843 }
845844 else
846845 {
847 RestrictedNeighborhoodCirculator<DestIterator, Neighborhood>
846 RestrictedNeighborhoodCirculator<DestIterator, Neighborhood>
848847 c(idx, atBorder), cend(c);
849848 do
850849 {
852851 if(nlabel == 0)
853852 {
854853 ad.set(label, idx, c.diff());
855 std::ptrdiff_t priority =
854 std::ptrdiff_t priority =
856855 std::max((std::ptrdiff_t)stats[label].cost(as(isx, c.diff())), cost);
857856 pqueue.push(pos+c.diff(), priority);
858857 }
860859 while(++c != cend);
861860 }
862861 }
863
862
864863 return maxRegionLabel;
865864 }
866865
867866 template <class SrcIterator, class SrcAccessor,
868867 class DestIterator, class DestAccessor,
869868 class RegionStatisticsArray, class Neighborhood>
870 inline typename DestAccessor::value_type
869 inline typename DestAccessor::value_type
871870 fastSeededRegionGrowing(SrcIterator srcul, SrcIterator srclr, SrcAccessor as,
872871 DestIterator destul, DestAccessor ad,
873872 RegionStatisticsArray & stats,
882881 template <class SrcIterator, class SrcAccessor,
883882 class DestIterator, class DestAccessor,
884883 class RegionStatisticsArray>
885 inline typename DestAccessor::value_type
884 inline typename DestAccessor::value_type
886885 fastSeededRegionGrowing(SrcIterator srcul, SrcIterator srclr, SrcAccessor as,
887886 DestIterator destul, DestAccessor ad,
888887 RegionStatisticsArray & stats,
896895 template <class SrcIterator, class SrcAccessor,
897896 class DestIterator, class DestAccessor,
898897 class RegionStatisticsArray>
899 inline typename DestAccessor::value_type
898 inline typename DestAccessor::value_type
900899 fastSeededRegionGrowing(SrcIterator srcul, SrcIterator srclr, SrcAccessor as,
901900 DestIterator destul, DestAccessor ad,
902901 RegionStatisticsArray & stats)
909908 template <class SrcIterator, class SrcAccessor,
910909 class DestIterator, class DestAccessor,
911910 class RegionStatisticsArray, class Neighborhood>
912 inline typename DestAccessor::value_type
911 inline typename DestAccessor::value_type
913912 fastSeededRegionGrowing(triple<SrcIterator, SrcIterator, SrcAccessor> src,
914913 pair<DestIterator, DestAccessor> dest,
915914 RegionStatisticsArray & stats,
916 SRGType srgType,
915 SRGType srgType,
917916 Neighborhood n,
918917 double max_cost,
919918 std::ptrdiff_t bucket_count = 256)
930929 fastSeededRegionGrowing(MultiArrayView<2, T1, S1> const & src,
931930 MultiArrayView<2, T2, S2> dest,
932931 RegionStatisticsArray & stats,
933 SRGType srgType,
932 SRGType srgType,
934933 Neighborhood n,
935934 double max_cost,
936935 std::ptrdiff_t bucket_count = 256)
162162
163163 } // namespace detail
164164
165 /** \addtogroup SeededRegionGrowing
165 /** \addtogroup Superpixels
166166 */
167167 //@{
168168
182182 there are no candidates, the algorithm will simply copy the seed array
183183 into the output array. Otherwise it will aggregate the candidates into
184184 the existing regions so that a cost function is minimized.
185 Candidates are taken from the neighborhood of the already assigned pixels,
185 Candidates are taken from the neighborhood of the already assigned pixels,
186186 where the type of neighborhood is determined by parameter <tt>neighborhood</tt>
187 which can take the values <tt>NeighborCode3DSix()</tt> (the default)
188 or <tt>NeighborCode3DTwentySix()</tt>. The algorithm basically works as follows
187 which can take the values <tt>NeighborCode3DSix()</tt> (the default)
188 or <tt>NeighborCode3DTwentySix()</tt>. The algorithm basically works as follows
189189 (illustrated for 6-neighborhood, but 26-neighborhood works in the same way):
190190
191191 <ol>
209209 </ol>
210210
211211 <tt>SRGType</tt> can take the following values:
212
212
213213 <DL>
214214 <DT><tt>CompleteGrow</tt> <DD> produce a complete tesselation of the volume (default).
215215 <DT><tt>KeepContours</tt> <DD> keep a 1-voxel wide unlabeled contour between all regions.
216 <DT><tt>StopAtThreshold</tt> <DD> stop when the boundary indicator values exceed the
216 <DT><tt>StopAtThreshold</tt> <DD> stop when the boundary indicator values exceed the
217217 threshold given by parameter <tt>max_cost</tt>.
218218 <DT><tt>KeepContours | StopAtThreshold</tt> <DD> keep 1-voxel wide contour and stop at given <tt>max_cost</tt>.
219219 </DL>
238238 the original statistics.
239239
240240 If a candidate could be merged into more than one regions with identical
241 cost, the algorithm will favour the nearest region. If <tt>StopAtThreshold</tt> is active,
242 and the cost of the current candidate at any point in the algorithm exceeds the optional
243 <tt>max_cost</tt> value (which defaults to <tt>NumericTraits<double>::max()</tt>),
241 cost, the algorithm will favour the nearest region. If <tt>StopAtThreshold</tt> is active,
242 and the cost of the current candidate at any point in the algorithm exceeds the optional
243 <tt>max_cost</tt> value (which defaults to <tt>NumericTraits<double>::max()</tt>),
244244 region growing is aborted, and all voxels not yet assigned to a region remain unlabeled.
245245
246246 In some cases, the cost only depends on the feature value of the current
261261 seededRegionGrowing3D(MultiArrayView<3, T1, S1> const & src,
262262 MultiArrayView<3, TS, AS> const & seeds,
263263 MultiArrayView<3, T2, S2> labels,
264 RegionStatisticsArray & stats,
264 RegionStatisticsArray & stats,
265265 SRGType srgType = CompleteGrow,
266266 Neighborhood neighborhood = NeighborCode3DSix(),
267267 double max_cost = NumericTraits<double>::max());
276276 class SeedImageIterator, class SeedAccessor,
277277 class DestImageIterator, class DestAccessor,
278278 class RegionStatisticsArray, class Neighborhood>
279 void
279 void
280280 seededRegionGrowing3D(SrcImageIterator srcul, Shape shape, SrcAccessor as,
281281 SeedImageIterator seedsul, SeedAccessor aseeds,
282282 DestImageIterator destul, DestAccessor ad,
283 RegionStatisticsArray & stats,
283 RegionStatisticsArray & stats,
284284 SRGType srgType = CompleteGrow,
285285 Neighborhood neighborhood = NeighborCode3DSix(),
286286 double max_cost = NumericTraits<double>::max());
297297 seededRegionGrowing3D(triple<SrcImageIterator, Shape, SrcAccessor> src,
298298 pair<SeedImageIterator, SeedAccessor> seeds,
299299 pair<DestImageIterator, DestAccessor> dest,
300 RegionStatisticsArray & stats,
300 RegionStatisticsArray & stats,
301301 SRGType srgType = CompleteGrow,
302 Neighborhood neighborhood = NeighborCode3DSix(),
302 Neighborhood neighborhood = NeighborCode3DSix(),
303303 double max_cost = NumericTraits<double>::max());
304304 }
305305 \endcode
306306 \deprecatedEnd
307307
308308 <b> Usage:</b>
309
309
310310 <b>\#include</b> \<vigra/seededregiongrowing3d.hxx\><br>
311311 Namespace: vigra
312
312
313313 See \ref seededRegionGrowing() for an example
314314 */
315315 doxygen_overloaded_function(template <...> void seededRegionGrowing3D)
318318 class SeedImageIterator, class SeedAccessor,
319319 class DestImageIterator, class DestAccessor,
320320 class RegionStatisticsArray, class Neighborhood>
321 void
321 void
322322 seededRegionGrowing3D(SrcImageIterator srcul, Diff_type shape, SrcAccessor as,
323323 SeedImageIterator seedsul, SeedAccessor aseeds,
324324 DestImageIterator destul, DestAccessor ad,
325 RegionStatisticsArray & stats,
325 RegionStatisticsArray & stats,
326326 SRGType srgType,
327327 Neighborhood,
328328 double max_cost)
355355 IVolume regions(regionshape);
356356 Traverser ir = regions.traverser_begin();
357357 ir = ir + Diff_type(1,1,1);
358
358
359359 //IVolume::Iterator iry, irx, irz;
360360 Traverser iry, irx, irz;
361361
362362 //initImageBorder(destImageRange(regions), 1, SRGWatershedLabel);
363 initMultiArrayBorder(destMultiArrayRange(regions), 1, SRGWatershedLabel);
364
363 initMultiArrayBorder(destMultiArrayRange(regions), 1, SRGWatershedLabel);
364
365365 copyMultiArray(seedsul, Diff_type(w,h,d), aseeds, ir, AccessorTraits<int>::default_accessor());
366366
367367 // allocate and init memory for the results
383383 pos[1]++, isy.dim1()++, iry.dim1()++)
384384 {
385385 //std::cerr << "Y = " << pos[1] << std::endl;
386
386
387387 for(isx=isy, irx=iry, pos[0]=0; pos[0]<w;
388388 pos[0]++, isx.dim0()++, irx.dim0()++)
389389 {
390390 //std::cerr << "X = " << pos[0] << std::endl;
391
391
392392 if(*irx == 0)
393393 {
394394 // find candidate pixels for growing and fill heap
408408 }
409409 }
410410 }
411
411
412412 // perform region growing
413413 while(pheap.size() != 0)
414414 {
466466 }
467467 }
468468 }
469
469
470470 // free temporary memory
471471 while(pheap.size() != 0)
472472 {
475475 }
476476
477477 // write result
478 transformMultiArray(ir, Diff_type(w,h,d), AccessorTraits<int>::default_accessor(),
478 transformMultiArray(ir, Diff_type(w,h,d), AccessorTraits<int>::default_accessor(),
479479 destul, ad, detail::UnlabelWatersheds());
480480 }
481481
489489 DestImageIterator destul, DestAccessor ad,
490490 RegionStatisticsArray & stats, SRGType srgType, Neighborhood n)
491491 {
492 seededRegionGrowing3D( srcul, shape, as, seedsul, aseeds,
492 seededRegionGrowing3D( srcul, shape, as, seedsul, aseeds,
493493 destul, ad, stats, srgType, n, NumericTraits<double>::max());
494494 }
495495
503503 DestImageIterator destul, DestAccessor ad,
504504 RegionStatisticsArray & stats, SRGType srgType)
505505 {
506 seededRegionGrowing3D( srcul, shape, as, seedsul, aseeds,
506 seededRegionGrowing3D( srcul, shape, as, seedsul, aseeds,
507507 destul, ad, stats, srgType, NeighborCode3DSix());
508508 }
509509
517517 DestImageIterator destul, DestAccessor ad,
518518 RegionStatisticsArray & stats)
519519 {
520 seededRegionGrowing3D( srcul, shape, as, seedsul, aseeds, destul, ad,
520 seededRegionGrowing3D( srcul, shape, as, seedsul, aseeds, destul, ad,
521521 stats, CompleteGrow);
522522 }
523523
529529 seededRegionGrowing3D(triple<SrcImageIterator, Shape, SrcAccessor> img1,
530530 pair<SeedImageIterator, SeedAccessor> img3,
531531 pair<DestImageIterator, DestAccessor> img4,
532 RegionStatisticsArray & stats,
532 RegionStatisticsArray & stats,
533533 SRGType srgType, Neighborhood n, double max_cost)
534534 {
535535 seededRegionGrowing3D(img1.first, img1.second, img1.third,
546546 seededRegionGrowing3D(triple<SrcImageIterator, Shape, SrcAccessor> img1,
547547 pair<SeedImageIterator, SeedAccessor> img3,
548548 pair<DestImageIterator, DestAccessor> img4,
549 RegionStatisticsArray & stats,
549 RegionStatisticsArray & stats,
550550 SRGType srgType, Neighborhood n)
551551 {
552552 seededRegionGrowing3D(img1.first, img1.second, img1.third,
595595 seededRegionGrowing3D(MultiArrayView<3, T1, S1> const & img1,
596596 MultiArrayView<3, TS, AS> const & img3,
597597 MultiArrayView<3, T2, S2> img4,
598 RegionStatisticsArray & stats,
598 RegionStatisticsArray & stats,
599599 SRGType srgType, Neighborhood n, double max_cost)
600600 {
601601 vigra_precondition(img1.shape() == img3.shape(),
614614 seededRegionGrowing3D(MultiArrayView<3, T1, S1> const & img1,
615615 MultiArrayView<3, TS, AS> const & img3,
616616 MultiArrayView<3, T2, S2> img4,
617 RegionStatisticsArray & stats,
617 RegionStatisticsArray & stats,
618618 SRGType srgType, Neighborhood n)
619619 {
620620 vigra_precondition(img1.shape() == img3.shape(),
23992399 // first calculate required kernel sizes
24002400 int radius;
24012401 if(windowRatio == 0.0)
2402 radius = (int)(3.0 * std_dev + 0.5 * order + 0.5);
2402 radius = (int)((3.0 + 0.5 * order) * std_dev + 0.5);
24032403 else
24042404 radius = (int)(windowRatio * std_dev + 0.5);
24052405 if(radius == 0)
201201 VIGRA_EXPORT void readSIF(const SIFImportInfo &info, MultiArrayView<3, float> array);
202202
203203 template <unsigned int N, class T, class S>
204 void readSIF(const SIFImportInfo &info, MultiArrayView<N, T, S> array)
204 void readSIF(const SIFImportInfo &, MultiArrayView<N, T, S>)
205205 {
206206 vigra_precondition(false, "readSIF(): Destination array must be MultiArrayView<3, float>.");
207207 }
228228 VIGRA_EXPORT void readSIFBlock(const SIFImportInfo &info, Shape3 offset, Shape3 shape, MultiArrayView<3, float> array);
229229
230230 template <unsigned int N, class T, class S>
231 void readSIFBlock(const SIFImportInfo &info, Shape3 offset, Shape3 shape, MultiArrayView<N, T, S> array)
231 void readSIFBlock(const SIFImportInfo &, Shape3, Shape3, MultiArrayView<N, T, S>)
232232 {
233233 vigra_precondition(false, "readSIFBlock(): Destination array must be MultiArrayView<3, float>.");
234234 }
5555 double length, salience;
5656 MultiArrayIndex partial_area;
5757 bool is_loop;
58
58
5959 SkeletonNode()
6060 : parent(lemon::INVALID)
6161 , principal_child(lemon::INVALID)
6464 , partial_area(0)
6565 , is_loop(false)
6666 {}
67
67
6868 SkeletonNode(Node const & s)
6969 : parent(s)
7070 , principal_child(lemon::INVALID)
8080 {
8181 typedef SkeletonNode<Node> SNode;
8282 typedef std::map<Node, SNode> Skeleton;
83
83
8484 Node anchor, lower, upper;
8585 Skeleton skeleton;
86
86
8787 SkeletonRegion()
8888 : anchor(lemon::INVALID)
8989 , lower(NumericTraits<MultiArrayIndex>::max())
9090 , upper(NumericTraits<MultiArrayIndex>::min())
9191 {}
92
92
9393 void addNode(Node const & n)
9494 {
9595 skeleton[n] = SNode(n);
100100 };
101101
102102 template <class Graph, class Node, class NodeMap>
103 inline unsigned int
103 inline unsigned int
104104 neighborhoodConfiguration(Graph const & g, Node const & n, NodeMap const & labels)
105105 {
106106 typedef typename Graph::OutArcIt ArcIt;
107107 typedef typename NodeMap::value_type LabelType;
108
108
109109 LabelType label = labels[n];
110110 unsigned int v = 0;
111111 for(ArcIt arc(g, n); arc != lemon::INVALID; ++arc)
151151 // use std::greater because we need the smallest distances at the top of the queue
152152 // (std::priority_queue is a max queue by default)
153153 std::priority_queue<SP, std::vector<SP>, std::greater<SP> > pqueue;
154
154
155155 bool isSimpleStrong[256] = {
156 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1,
157 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,
158 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
159 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0,
160 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1,
161 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
162 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
163 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1,
164 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1,
165 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,
166 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0,
156 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1,
157 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,
158 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
159 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0,
160 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1,
161 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
162 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
163 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1,
164 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1,
165 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,
166 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0,
167167 };
168
168
169169 bool isSimplePreserveEndPoints[256] = {
170 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
171 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1,
172 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
173 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
174 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
175 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
176 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
177 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
178 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
179 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0,
170 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
171 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1,
172 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
173 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
174 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
175 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
176 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
177 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
178 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
179 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0,
180180 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
181181 };
182
182
183183 bool * isSimplePoint = preserve_endpoints
184184 ? isSimplePreserveEndPoints
185185 : isSimpleStrong;
186
186
187187 int max_degree = g.maxDegree();
188188 double epsilon = 0.5/labels.size(), offset = 0;
189189 for (NodeIt node(g); node != lemon::INVALID; ++node)
209209 {
210210 continue; // point already deleted or no longer simple
211211 }
212
212
213213 labels[p] = 0; // delete simple point
214214
215215 for (ArcIt arc(g, p); arc != lemon::INVALID; ++arc)
231231 {
232232 Label label;
233233 Labels const & labels;
234
234
235235 CheckForHole(Label l, Labels const & ls)
236236 : label(l)
237237 , labels(ls)
238238 {}
239
239
240240 template <class Node>
241241 bool operator()(Node const & n) const
242242 {
244244 }
245245 };
246246
247 } // namespace detail
248
249 /** \addtogroup MultiArrayDistanceTransform
247 } // namespace detail
248
249 /** \addtogroup DistanceTransform
250250 */
251251 //@{
252252
268268 PruneSalienceRelative = PruneSalience + Relative,
269269 PruneTopology = PreserveTopology + Prune
270270 };
271
271
272272 SkeletonMode mode;
273273 double pruning_threshold;
274
274
275275 /** \brief construct with default settings
276
276
277277 (default: <tt>pruneSalienceRelative(0.2, true)</tt>)
278278 */
279279 SkeletonOptions()
280280 : mode(SkeletonMode(PruneSalienceRelative | PreserveTopology))
281281 , pruning_threshold(0.2)
282282 {}
283
283
284284 /** \brief return the un-pruned skeletong
285285 */
286286 SkeletonOptions & dontPrune()
288288 mode = DontPrune;
289289 return *this;
290290 }
291
291
292292 /** \brief return only the region's center line (i.e. skeleton graph diameter)
293293 */
294294 SkeletonOptions & pruneCenterLine()
296296 mode = PruneCenterLine;
297297 return *this;
298298 }
299
299
300300 /** \brief Don't prune and return the length of each skeleton segment.
301301 */
302302 SkeletonOptions & returnLength()
304304 mode = Length;
305305 return *this;
306306 }
307
307
308308 /** \brief prune skeleton segments whose length is below the given threshold
309
309
310310 If \a preserve_topology is <tt>true</tt> (default), skeleton loops
311 (i.e. parts enclosing a hole in the region) are preserved even if their
311 (i.e. parts enclosing a hole in the region) are preserved even if their
312312 length is below the threshold. Otherwise, loops are pruned as well.
313313 */
314314 SkeletonOptions & pruneLength(double threshold, bool preserve_topology=true)
319319 pruning_threshold = threshold;
320320 return *this;
321321 }
322
322
323323 /** \brief prune skeleton segments whose relative length is below the given threshold
324
325 This works like <tt>pruneLength()</tt>, but the threshold is specified as a
324
325 This works like <tt>pruneLength()</tt>, but the threshold is specified as a
326326 fraction of the maximum segment length in the skeleton.
327327 */
328328 SkeletonOptions & pruneLengthRelative(double threshold, bool preserve_topology=true)
333333 pruning_threshold = threshold;
334334 return *this;
335335 }
336
336
337337 /** \brief Don't prune and return the salience of each skeleton segment.
338338 */
339339 SkeletonOptions & returnSalience()
341341 mode = Salience;
342342 return *this;
343343 }
344
344
345345 /** \brief prune skeleton segments whose salience is below the given threshold
346
346
347347 If \a preserve_topology is <tt>true</tt> (default), skeleton loops
348 (i.e. parts enclosing a hole in the region) are preserved even if their
348 (i.e. parts enclosing a hole in the region) are preserved even if their
349349 salience is below the threshold. Otherwise, loops are pruned as well.
350350 */
351351 SkeletonOptions & pruneSalience(double threshold, bool preserve_topology=true)
356356 pruning_threshold = threshold;
357357 return *this;
358358 }
359
359
360360 /** \brief prune skeleton segments whose relative salience is below the given threshold
361
362 This works like <tt>pruneSalience()</tt>, but the threshold is specified as a
361
362 This works like <tt>pruneSalience()</tt>, but the threshold is specified as a
363363 fraction of the maximum segment salience in the skeleton.
364364 */
365365 SkeletonOptions & pruneSalienceRelative(double threshold, bool preserve_topology=true)
370370 pruning_threshold = threshold;
371371 return *this;
372372 }
373
373
374374 /** \brief prune such that only the topology is preserved
375
375
376376 If \a preserve_center is <tt>true</tt> (default), the eccentricity center
377377 of the skeleton will not be pruned, even if it is not essential for the topology.
378 Otherwise, the center is only preserved if it is essential. The center is always
378 Otherwise, the center is only preserved if it is essential. The center is always
379379 preserved (and is the only remaining point) when the region has no holes.
380380 */
381381 SkeletonOptions & pruneTopology(bool preserve_center=true)
408408 typedef double WeightType;
409409 typedef detail::SkeletonNode<Node> SNode;
410410 typedef std::map<Node, SNode> Skeleton;
411
411
412412 vigra_precondition(labels.shape() == dest.shape(),
413413 "skeleton(): shape mismatch between input and output.");
414
414
415415 MultiArray<N, MultiArrayIndex> squared_distance;
416416 dest = 0;
417417 T1 maxLabel = 0;
418418 // find skeleton points
419419 {
420420 using namespace multi_math;
421
421
422422 MultiArray<N, Shape> vectors(labels.shape());
423423 boundaryVectorDistance(labels, vectors, false, OuterBoundary);
424424 squared_distance = squaredNorm(vectors);
425
425
426426 ArrayVector<Node> ends_to_be_checked;
427427 Graph g(labels.shape());
428428 for (EdgeIt edge(g); edge != lemon::INVALID; ++edge)
429429 {
430430 Node p1 = g.u(*edge),
431431 p2 = g.v(*edge);
432 T1 l1 = labels[p1],
432 T1 l1 = labels[p1],
433433 l2 = labels[p2];
434434 maxLabel = max(maxLabel, max(l1, l2));
435435 if(l1 == l2)
436436 {
437437 if(l1 <= 0) // only consider positive labels
438438 continue;
439
439
440440 const Node v1 = vectors[p1],
441441 v2 = vectors[p2],
442442 dp = p2 - p1,
443443 dv = v2 - v1 + dp;
444444 if(max(abs(dv)) <= 1) // points whose support points coincide or are adjacent
445445 continue; // don't belong to the skeleton
446
447 // among p1 and p2, the point which is closer to the bisector
446
447 // among p1 and p2, the point which is closer to the bisector
448448 // of the support points p1 + v1 and p2 + v2 belongs to the skeleton
449449 const MultiArrayIndex d1 = dot(dv, dp),
450450 d2 = dot(dv, v1+v2);
469469 dest[p2] = l2;
470470 }
471471 }
472
473
472
473
474474 // add a point when a skeleton line stops short of the shape boundary
475475 // FIXME: can this be solved during the initial detection above?
476476 Graph g8(labels.shape(), IndirectNeighborhood);
477477 for (unsigned k=0; k<ends_to_be_checked.size(); ++k)
478478 {
479479 // The phenomenon only occurs at points whose distance from the background is 2.
480 // We've put these points into ends_to_be_checked.
480 // We've put these points into ends_to_be_checked.
481481 Node p1 = ends_to_be_checked[k];
482482 T2 label = dest[p1];
483483 int count = 0;
493493
494494 // from here on, we only need the squared DT, not the vector DT
495495 }
496
496
497497 // The true skeleton is defined by the interpixel edges between the
498498 // Voronoi regions of the DT. Our skeleton detection algorithm affectively
499499 // rounds the interpixel edges to the nearest pixel such that the result
500 // is mainly 8-connected and thin. However, thick skeleton pieces may still
500 // is mainly 8-connected and thin. However, thick skeleton pieces may still
501501 // arise when two interpixel contours are only one pixel apart, i.e. a
502 // Voronoi region is only one pixel wide. Since this happens rarely, we
502 // Voronoi region is only one pixel wide. Since this happens rarely, we
503503 // can simply remove these cases by thinning.
504504 detail::skeletonThinning(squared_distance, dest);
505
505
506506 if(options.mode == SkeletonOptions::PruneCenterLine)
507507 dest = 0;
508
508
509509 // Reduce the full grid graph to a skeleton graph by inserting infinite
510510 // edge weights between skeleton pixels and non-skeleton pixels.
511511 if(features)
521521 T2 label = dest[p1];
522522 if(label <= 0)
523523 continue;
524
524
525525 // FIXME: consider using an AdjacencyListGraph from here on
526526 regions[(size_t)label].addNode(p1);
527527
534534 weights[*arc] = infiniteWeight;
535535 }
536536 }
537
537
538538 ShortestPathDijkstra<Graph, WeightType> pathFinder(g);
539539 // Handle the skeleton of each region individually.
540540 for(std::size_t label=1; label < regions.size(); ++label)
542542 Skeleton & skeleton = regions[label].skeleton;
543543 if(skeleton.size() == 0) // label doesn't exist
544544 continue;
545
545
546546 // Find a diameter (longest path) in the skeleton.
547547 Node anchor = regions[label].anchor,
548548 lower = regions[label].lower,
549549 upper = regions[label].upper + Shape(1);
550
550
551551 pathFinder.run(lower, upper, weights, anchor, lemon::INVALID, maxWeight);
552552 anchor = pathFinder.target();
553553 pathFinder.reRun(weights, anchor, lemon::INVALID, maxWeight);
554554 anchor = pathFinder.target();
555
555
556556 Polygon<Shape> center_line;
557557 center_line.push_back_unsafe(anchor);
558558 while(pathFinder.predecessors()[center_line.back()] != center_line.back())
559559 center_line.push_back_unsafe(pathFinder.predecessors()[center_line.back()]);
560
560
561561 if(options.mode == SkeletonOptions::PruneCenterLine)
562562 {
563563 for(unsigned int k=0; k<center_line.size(); ++k)
564564 dest[center_line[k]] = (T2)label;
565565 continue; // to next label
566566 }
567
567
568568 // Perform the eccentricity transform of the skeleton
569569 Node center = center_line[roundi(center_line.arcLengthQuantile(0.5))];
570570 pathFinder.reRun(weights, center, lemon::INVALID, maxWeight);
571
571
572572 bool compute_salience = (options.mode & SkeletonOptions::Salience) != 0;
573573 ArrayVector<Node> raw_skeleton(pathFinder.discoveryOrder());
574574 // from periphery to center: create skeleton tree and compute salience
579579 SNode & n1 = skeleton[p1];
580580 SNode & n2 = skeleton[p2];
581581 n1.parent = p2;
582
582
583583
584584 // remove non-skeleton edges (i.e. set weight = infiniteWeight)
585585 for (BackArcIt arc(g, p1); arc != lemon::INVALID; ++arc)
591591 continue; // edge belongs to the skeleton
592592 if(pathFinder.predecessors()[p] == p1)
593593 continue; // edge belongs to the skeleton
594 if(n1.principal_child == lemon::INVALID ||
594 if(n1.principal_child == lemon::INVALID ||
595595 skeleton[p].principal_child == lemon::INVALID)
596596 continue; // edge may belong to a loop => test later
597597 weights[*arc] = infiniteWeight;
604604 n2.length = l;
605605 n2.principal_child = p1;
606606 }
607
607
608608 if(compute_salience)
609609 {
610610 const double min_length = 4.0; // salience is meaningless for shorter segments due
612612 if(n1.length >= min_length)
613613 {
614614 n1.salience = max(n1.salience, (n1.length + 0.5) / sqrt(squared_distance[p1]));
615
615
616616 // propagate salience to parent if this is the most salient subtree
617617 if(n2.salience < n1.salience)
618618 n2.salience = n1.salience;
623623 else
624624 n1.salience = n1.length;
625625 }
626
626
627627 // from center to periphery: propagate salience and compute twice the partial area
628628 for(int k=0; k < (int)raw_skeleton.size(); ++k)
629629 {
631631 SNode & n1 = skeleton[p1];
632632 Node p2 = n1.parent;
633633 SNode & n2 = skeleton[p2];
634
634
635635 if(p1 == n2.principal_child)
636636 {
637637 n1.length = n2.length;
644644 n1.partial_area = n2.partial_area + (p1[0]*p2[1] - p1[1]*p2[0]);
645645 }
646646
647 // always treat eccentricity center as a loop, so that it cannot be pruned
647 // always treat eccentricity center as a loop, so that it cannot be pruned
648648 // away unless (options.mode & PreserveTopology) is false.
649649 skeleton[center].is_loop = true;
650
650
651651 // from periphery to center: * find and propagate loops
652652 // * delete branches not reaching the boundary
653653 detail::CheckForHole<std::size_t, MultiArrayView<2, T1, S1> > hasNoHole(label, labels);
664664 {
665665 Node p2 = g.target(*arc);
666666 SNode * n2 = &skeleton[p2];
667
667
668668 if(n1.parent == p2)
669669 continue; // going back to the parent can't result in a loop
670670 if(weights[*arc] == infiniteWeight)
673673 MultiArrayIndex area2 = abs(n1.partial_area - (p1[0]*p2[1] - p1[1]*p2[0]) - n2->partial_area);
674674 if(area2 <= 3) // area is too small to enclose a hole => loop is a discretization artifact
675675 continue;
676
676
677677 // use Dijkstra to find the loop
678 WeightType edge_length = weights[*arc];
679678 weights[*arc] = infiniteWeight;
680679 pathFinder.reRun(weights, p1, p2);
681680 Polygon<Shape2> poly;
683682 poly.push_back_unsafe(p1);
684683 poly.push_back_unsafe(p2);
685684 Node p = p2;
686 do
685 do
687686 {
688687 p = pathFinder.predecessors()[p];
689688 poly.push_back_unsafe(p);
697696 ++hole_count;
698697 total_length += n1.length + n2->length;
699698 double max_salience = max(n1.salience, n2->salience);
700 for(int p=1; p<poly.size(); ++p)
699 for(decltype(poly.size()) p=1; p<poly.size(); ++p)
701700 {
702701 SNode & n = skeleton[poly[p]];
703702 n.is_loop = true;
725724 }
726725 }
727726 }
728
727
729728 if(n1.is_loop)
730729 skeleton[n1.parent].is_loop = true;
731730 }
732
731
733732 bool dont_prune = (options.mode & SkeletonOptions::Prune) == 0;
734733 bool preserve_topology = (options.mode & SkeletonOptions::PreserveTopology) != 0 ||
735734 options.mode == SkeletonOptions::Prune;
748747 Node p1 = raw_skeleton[k];
749748 SNode & n1 = skeleton[p1];
750749 Node p2 = n1.parent;
751 SNode & n2 = skeleton[p2];
752 if(n1.principal_child == lemon::INVALID &&
753 n1.salience >= threshold &&
750 if(n1.principal_child == lemon::INVALID &&
751 n1.salience >= threshold &&
754752 !n1.is_loop)
755753 {
756754 ++branch_count;
769767 }
770768 if(branch_count > 0)
771769 average_length /= branch_count;
772
770
773771 if(features)
774772 {
775773 (*features)[label].diameter = center_line.length();
783781 (*features)[label].euclidean_diameter = norm(center_line.back()-center_line.front());
784782 }
785783 }
786
784
787785 if(options.mode == SkeletonOptions::Prune)
788786 detail::skeletonThinning(squared_distance, dest, false);
789787 }
794792 double diameter, total_length, average_length, euclidean_diameter;
795793 UInt32 branch_count, hole_count;
796794 Shape2 center, terminal1, terminal2;
797
795
798796 SkeletonFeatures()
799797 : diameter(0)
800798 , total_length(0)
812810 /********************************************************/
813811
814812 /*
815 To compute the skeleton reliably in higher dimensions, we have to work on
816 a topological grid. The tricks to work with rounded skeletons on the
813 To compute the skeleton reliably in higher dimensions, we have to work on
814 a topological grid. The tricks to work with rounded skeletons on the
817815 pixel grid probably don't generalize from 2D to 3D and higher. Specifically:
818
816
819817 * Compute Voronoi regions of the vector distance transformation according to
820818 identical support point to make sure that disconnected Voronoi regions
821819 still get only a single label.
822820 * Merge Voronoi regions whose support points are adjacent.
823821 * Mark skeleton candidates on the interpixel grid after the basic merge.
824822 * Detect skeleton segments simply by connected components labeling in the interpixel grid.
825 * Skeleton segments form hyperplanes => use this property to compute segment
823 * Skeleton segments form hyperplanes => use this property to compute segment
826824 attributes.
827825 * Detect holes (and therefore, skeleton segments that are critical for topology)
828826 by computing the depth of each region/surface in the homotopy tree.
829 * Add a pruning mode where holes are only preserved if their size exceeds a threshold.
830
827 * Add a pruning mode where holes are only preserved if their size exceeds a threshold.
828
831829 To implement this cleanly, we first need a good implementation of the topological grid graph.
832830 */
833831 // template <unsigned int N, class T1, class S1,
853851 }
854852 \endcode
855853
856 This function computes the skeleton for each region in the 2D label image \a labels
857 and paints the results into the result image \a dest. Input label
858 <tt>0</tt> is interpreted as background and always ignored. Skeletons will be
859 marked with the same label as the corresponding region (unless options
860 <tt>returnLength()</tt> or <tt>returnSalience()</tt> are selected, see below).
854 This function computes the skeleton for each region in the 2D label image \a labels
855 and paints the results into the result image \a dest. Input label
856 <tt>0</tt> is interpreted as background and always ignored. Skeletons will be
857 marked with the same label as the corresponding region (unless options
858 <tt>returnLength()</tt> or <tt>returnSalience()</tt> are selected, see below).
861859 Non-skeleton pixels will receive label <tt>0</tt>.
862860
863861 For each region, the algorithm proceeds in the following steps:
864862 <ol>
865863 <li>Compute the \ref boundaryVectorDistance() relative to the \ref OuterBoundary of the region.</li>
866 <li>Mark the raw skeleton: find 4-adjacent pixel pairs whose nearest boundary points are neither equal
864 <li>Mark the raw skeleton: find 4-adjacent pixel pairs whose nearest boundary points are neither equal
867865 nor adjacent and mark one pixel of the pair as a skeleton candidate. The resulting raw skeleton
868866 is 8-connected and thin. Skip the remaining steps when option <tt>dontPrune()</tt> is selected.</li>
869 <li>Compute the eccentricity transform of the raw skeleton and turn the skeleton into a tree
867 <li>Compute the eccentricity transform of the raw skeleton and turn the skeleton into a tree
870868 whose root is the eccentricity center. When option <tt>pruneCenterLine()</tt> is selected,
871 delete all skeleton points that do not belong to the two longest tree branches and
869 delete all skeleton points that do not belong to the two longest tree branches and
872870 skip the remaining steps.</li>
873871 <li>For each pixel on the skeleton, compute its <tt>length</tt> attribute as the depth of the
874 pixel's longest subtree. Compute its <tt>salience</tt> attribute as the ratio between
875 <tt>length</tt> and <tt>distance</tt>, where <tt>distance</tt> is the pixel's distance to
872 pixel's longest subtree. Compute its <tt>salience</tt> attribute as the ratio between
873 <tt>length</tt> and <tt>distance</tt>, where <tt>distance</tt> is the pixel's distance to
876874 the nearest boundary point according to the distance transform. It holds that <tt>length >= 0.5</tt>
877875 and <tt>salience >= 1.0</tt>.</li>
878 <li>Detect skeleton branching points and define <i>skeleton segments</i> as maximal connected pieces
876 <li>Detect skeleton branching points and define <i>skeleton segments</i> as maximal connected pieces
879877 without branching points.</li>
880878 <li>Compute <tt>length</tt> and <tt>salience</tt> of each segment as the maximum of these
881 attributes among the pixels in the segment. When options <tt>returnLength()</tt> or
882 <tt>returnSalience()</tt> are selected, skip the remaining steps and return the
883 requested segment attribute in <tt>dest</tt>. In this case, <tt>dest</tt>'s
884 <tt>value_type</tt> should be a floating point type to exactly accomodate the
879 attributes among the pixels in the segment. When options <tt>returnLength()</tt> or
880 <tt>returnSalience()</tt> are selected, skip the remaining steps and return the
881 requested segment attribute in <tt>dest</tt>. In this case, <tt>dest</tt>'s
882 <tt>value_type</tt> should be a floating point type to exactly accomodate the
885883 attribute values.</li>
886884 <li>Detect minimal cycles in the raw skeleton that enclose holes in the region (if any) and mark
887885 the corresponding pixels as critical for skeleton topology.</li>
888 <li>Prune skeleton segments according to the selected pruning strategy and return the result.
886 <li>Prune skeleton segments according to the selected pruning strategy and return the result.
889887 The following pruning strategies are available:
890888 <ul>
891889 <li><tt>pruneLength(threshold, preserve_topology)</tt>: Retain only segments whose length attribute
893891 (the defult), cycles around holes are preserved regardless of their length.
894892 Otherwise, they are pruned as well.</li>
895893 <li><tt>pruneLengthRelative(threshold, preserve_topology)</tt>: Like <tt>pruneLength()</tt>,
896 but the threshold is specified as a fraction of the maximum segment length in
894 but the threshold is specified as a fraction of the maximum segment length in
897895 the present region.</li>
898896 <li><tt>pruneSalience(threshold, preserve_topology)</tt>: Retain only segments whose salience attribute
899897 exceeds the given <tt>threshold</tt>. When <tt>preserve_topology</tt> is true
900898 (the defult), cycles around holes are preserved regardless of their salience.
901899 Otherwise, they are pruned as well.</li>
902900 <li><tt>pruneSalienceRelative(threshold, preserve_topology)</tt>: Like <tt>pruneSalience()</tt>,
903 but the threshold is specified as a fraction of the maximum segment salience in
901 but the threshold is specified as a fraction of the maximum segment salience in
904902 the present region.</li>
905903 <li><tt>pruneTopology(preserve_center)</tt>: Retain only segments that are essential for the region's
906904 topology. If <tt>preserve_center</tt> is true (the default), the eccentricity
907 center is also preserved, even if it is not essential. Otherwise, it might be
905 center is also preserved, even if it is not essential. Otherwise, it might be
908906 removed. The eccentricity center is always the only remaining point when
909907 the region has no holes.</li>
910908 </ul></li>
911909 </ol>
912
910
913911 The skeleton has the following properties:
914912 <ul>
915 <li>It is 8-connected and thin (except when two independent branches happen to run alongside
913 <li>It is 8-connected and thin (except when two independent branches happen to run alongside
916914 before they divert). Skeleton points are defined by rounding the exact Euclidean skeleton
917915 locations to the nearest pixel.</li>
918 <li>Skeleton branches terminate either at the region boundary or at a cycle. There are no branch
916 <li>Skeleton branches terminate either at the region boundary or at a cycle. There are no branch
919917 end points in the region interior.</li>
920 <li>The salience threshold acts as a scale parameter: Large thresholds only retain skeleton
918 <li>The salience threshold acts as a scale parameter: Large thresholds only retain skeleton
921919 branches characterizing the general region shape. When the threshold gets smaller, ever
922920 more detailed boundary bulges will be represented by a skeleton branch.</li>
923921 </ul>
924
922
925923 Remark: If you have an application where a skeleton graph would be more useful
926924 than a skeleton image, function <tt>skeletonizeImage()</tt> can be changed/extended easily.
927925
958956
959957 template <class T, class S>
960958 void
961 extractSkeletonFeatures(MultiArrayView<2, T, S> const & labels,
959 extractSkeletonFeatures(MultiArrayView<2, T, S> const & labels,
962960 ArrayVector<SkeletonFeatures> & features,
963961 SkeletonOptions const & options = SkeletonOptions())
964962 {
4444
4545 namespace vigra {
4646
47 /** \addtogroup SeededRegionGrowing
47 /** \addtogroup Superpixels
4848 */
4949 //@{
5050
5656
5757 /** \brief Generate seeds for SLIC superpixel computation in arbitrary dimensions.
5858
59 The source array \a src must be a scalar boundary indicator such as the gradient
59 The source array \a src must be a scalar boundary indicator such as the gradient
6060 magnitude. Seeds are initially placed on a regular Cartesian grid with spacing
6161 \a seedDist und then moved to the point with smallest boundary indicator within
6262 a search region of radius \a searchRadius around the initial position. The resulting
6363 points are then marked in the output array \a seeds by consecutive labels.
64
65 The function returns the number of selected seeds, which equals the largest seed label
64
65 The function returns the number of selected seeds, which equals the largest seed label
6666 because labeling starts at 1.
6767
6868 <b> Declaration:</b>
7272 namespace vigra {
7373 template <unsigned int N, class T, class S1,
7474 class Label, class S2>
75 unsigned int
75 unsigned int
7676 generateSlicSeeds(MultiArrayView<N, T, S1> const & src,
7777 MultiArrayView<N, Label, S2> seeds,
7878 unsigned int seedDist,
8888 \code
8989 MultiArray<2, RGBValue<float> > src(Shape2(w, h));
9090 ... // fill src image
91
91
9292 // transform image to Lab color space
9393 transformImage(srcImageRange(src), destImage(src), RGBPrime2LabFunctor<float>());
94
94
9595 // compute image gradient magnitude at scale 1.0 as a boundary indicator
9696 MultiArray<2, float> grad(src.shape());
9797 gaussianGradientMagnitude(srcImageRange(src), destImage(grad), 1.0);
98
98
9999 MultiArray<2, unsigned int> seeds(src.shape());
100100 int seedDistance = 15;
101
101
102102 // place seeds on a grid with distance 15, but then move it to the lowest gradient
103103 // poistion in a 3x3 window
104104 generateSlicSeeds(grad, seeds, seedDistance);
110110
111111 template <unsigned int N, class T, class S1,
112112 class Label, class S2>
113 unsigned int
113 unsigned int
114114 generateSlicSeeds(MultiArrayView<N, T, S1> const & boundaryIndicatorImage,
115115 MultiArrayView<N, Label, S2> seeds,
116116 unsigned int seedDist,
122122 Shape shape(boundaryIndicatorImage.shape()),
123123 seedShape(floor(shape / double(seedDist))),
124124 offset((shape - (seedShape - Shape(1))*seedDist) / 2);
125
125
126126 unsigned int label = 0;
127127 MultiCoordinateIterator<N> iter(seedShape),
128128 end = iter.getEndIterator();
132132 Shape center = (*iter)*seedDist + offset;
133133 Shape startCoord = max(Shape(0), center-Shape(searchRadius));
134134 Shape endCoord = min(center+Shape(searchRadius+1), shape);
135
135
136136 // find the coordinate of minimum boundary indicator in window
137137 using namespace acc;
138138 AccumulatorChain<CoupledArrays<N, T>,
163163 : iter(10),
164164 sizeLimit(0)
165165 {}
166
166
167167 /** \brief Number of iterations.
168168
169169 Default: 10
173173 iter = i;
174174 return *this;
175175 }
176
176
177177 /** \brief Minimum superpixel size.
178
178
179179 If you set this to 1, no size filtering will be performed.
180180
181181 Default: 0 (determine size limit automatically as <tt>average size / 4</tt>)
185185 sizeLimit = s;
186186 return *this;
187187 }
188
188
189189 unsigned int iter;
190190 unsigned int sizeLimit;
191191 };
195195 template <unsigned int N, class T, class Label>
196196 class Slic
197197 {
198 public:
199 //
198 public:
199 //
200200 typedef MultiArrayView<N, T> DataImageType;
201 typedef MultiArrayView<N, Label> LabelImageType;
201 typedef MultiArrayView<N, Label> LabelImageType;
202202 typedef typename DataImageType::difference_type ShapeType;
203203 typedef typename PromoteTraits<
204204 typename NormTraits<T>::NormType,
205205 typename NormTraits<MultiArrayIndex>::NormType
206206 >::Promote DistanceType;
207207
208 Slic(DataImageType dataImage,
209 LabelImageType labelImage,
210 DistanceType intensityScaling,
211 int maxRadius,
208 Slic(DataImageType dataImage,
209 LabelImageType labelImage,
210 DistanceType intensityScaling,
211 int maxRadius,
212212 SlicOptions const & options = SlicOptions());
213
213
214214 unsigned int execute();
215215
216216 private:
217217 void updateAssigments();
218218 unsigned int postProcessing();
219
219
220220 typedef MultiArray<N,DistanceType> DistanceImageType;
221221
222222 ShapeType shape_;
226226 int max_radius_;
227227 DistanceType normalization_;
228228 SlicOptions options_;
229
229
230230 typedef acc::Select<acc::DataArg<1>, acc::LabelArg<2>, acc::Mean, acc::RegionCenter> Statistics;
231231 typedef acc::AccumulatorChainArray<CoupledArrays<N, T, Label>, Statistics> RegionFeatures;
232232 RegionFeatures clusters_;
236236
237237 template <unsigned int N, class T, class Label>
238238 Slic<N, T, Label>::Slic(
239 DataImageType dataImage,
239 DataImageType dataImage,
240240 LabelImageType labelImage,
241241 DistanceType intensityScaling,
242242 int maxRadius,
261261 // update mean for each cluster
262262 clusters_.reset();
263263 extractFeatures(dataImage_, labelImage_, clusters_);
264
264
265265 // update which pixels get assigned to which cluster
266266 updateAssigments();
267267 }
279279 {
280280 if(get<Count>(clusters_, c) == 0) // label doesn't exist
281281 continue;
282
282
283283 typedef typename LookupTag<RegionCenter, RegionFeatures>::value_type CenterType;
284284 CenterType center = get<RegionCenter>(clusters_, c);
285285
286286 // get ROI limits around region center
287 ShapeType pixelCenter(round(center)),
288 startCoord(max(ShapeType(0), pixelCenter - ShapeType(max_radius_))),
287 ShapeType pixelCenter(round(center)),
288 startCoord(max(ShapeType(0), pixelCenter - ShapeType(max_radius_))),
289289 endCoord(min(shape_, pixelCenter + ShapeType(max_radius_+1)));
290290 center -= startCoord; // need center relative to ROI
291
291
292292 // setup iterators for ROI
293293 typedef typename CoupledArrays<N, T, Label, DistanceType>::IteratorType Iterator;
294294 Iterator iter = createCoupledIterator(dataImage_, labelImage_, distance_).
295295 restrictToSubarray(startCoord, endCoord),
296296 end = iter.getEndIterator();
297
297
298298 // only pixels within the ROI can be assigned to a cluster
299299 for(; iter != end; ++iter)
300300 {
313313 }
314314
315315 template <unsigned int N, class T, class Label>
316 unsigned int
316 unsigned int
317317 Slic<N, T, Label>::postProcessing()
318318 {
319319 // get rid of regions below a size limit
320320 MultiArray<N,Label> tmpLabelImage(labelImage_);
321321 unsigned int maxLabel = labelMultiArray(tmpLabelImage, labelImage_, DirectNeighborhood);
322
322
323323 unsigned int sizeLimit = options_.sizeLimit == 0
324324 ? (unsigned int)(0.25 * labelImage_.size() / maxLabel)
325325 : options_.sizeLimit;
326326 if(sizeLimit == 1)
327327 return maxLabel;
328
328
329329 // determine region size
330330 using namespace acc;
331331 AccumulatorChainArray<CoupledArrays<N, Label>, Select<LabelArg<1>, Count> > sizes;
332332 extractFeatures(labelImage_, sizes);
333
333
334334 typedef GridGraph<N, undirected_tag> Graph;
335335 Graph graph(labelImage_.shape(), DirectNeighborhood);
336
336
337337 typedef typename Graph::NodeIt graph_scanner;
338338 typedef typename Graph::OutBackArcIt neighbor_iterator;
339339
340 ArrayVector<Label> regions(maxLabel+1);
340 vigra::UnionFindArray<Label> regions(maxLabel+1);
341 ArrayVector<unsigned char> done(maxLabel+1, false);
341342
342343 // make sure that all regions exceed the sizeLimit
343 for (graph_scanner node(graph); node != lemon::INVALID; ++node)
344 for (graph_scanner node(graph); node != lemon::INVALID; ++node)
344345 {
345346 Label label = labelImage_[*node];
346
347 if(regions[label] > 0)
347
348 if(done[label])
348349 continue; // already processed
349
350 regions[label] = label;
351
350
352351 if(get<Count>(sizes, label) < sizeLimit)
353352 {
354 // region is too small => merge into an existing neighbor
353 // region is too small => merge into a neighbor
355354 for (neighbor_iterator arc(graph, node); arc != lemon::INVALID; ++arc)
356355 {
357 regions[label] = regions[labelImage_[graph.target(*arc)]];
358 break;
356 Label other = labelImage_[graph.target(*arc)];
357 if(label != other)
358 {
359 regions.makeUnion(label, other);
360 done[label] = true;
361 break;
362 }
359363 }
360 }
361 }
362
363 // make labels contiguous after possible merging
364 unsigned int newMaxLabel = 0;
365 for(unsigned int i=1; i<=maxLabel; ++i)
366 {
367 if(regions[i] == i)
368 {
369 regions[i] = (Label)++newMaxLabel;
370364 }
371365 else
372366 {
373 regions[i] = regions[regions[i]];
367 done[label] = true;
374368 }
375369 }
376370
377 // update labels
378 for (graph_scanner node(graph); node != lemon::INVALID; ++node)
379 {
380 labelImage_[*node] = regions[labelImage_[*node]];
381 }
382
383 return newMaxLabel;
371 // make labels contiguous after possible merging
372 Label newMaxLabel = regions.makeContiguous();
373 for (graph_scanner node(graph); node != lemon::INVALID; ++node)
374 {
375 labelImage_[*node] = regions.findLabel(labelImage_[*node]);
376 }
377
378 return (unsigned int)newMaxLabel;
384379 }
385380
386381 } // namespace detail
388383
389384 /** \brief Compute SLIC superpixels in arbitrary dimensions.
390385
391 This function implements the algorithm described in
392
393 R. Achanta et al.: <em>"SLIC Superpixels Compared to State-of-the-Art
386 This function implements the algorithm described in
387
388 R. Achanta et al.: <em>"SLIC Superpixels Compared to State-of-the-Art
394389 Superpixel Methods"</em>, IEEE Trans. Patt. Analysis Mach. Intell. 34(11):2274-2281, 2012
395
396 The value type <tt>T</tt> of the source array \a src must provide the necessary functionality
397 to compute averages and squared distances (i.e. it must fulfill the requirements of a
390
391 The value type <tt>T</tt> of the source array \a src must provide the necessary functionality
392 to compute averages and squared distances (i.e. it must fulfill the requirements of a
398393 \ref LinearSpace and support squaredNorm(T const &)). This is true for all scalar types as well as
399394 \ref vigra::TinyVector and \ref vigra::RGBValue. The output array \a labels will be filled
400395 with labels designating membership of each point in one of the superpixel regions.
401
402 The output array can optionally contain seeds (which will be overwritten by the output)
396
397 The output array can optionally contain seeds (which will be overwritten by the output)
403398 to give you full control over seed placement. If \a labels is empty, seeds will be created
404 automatically by an internal call to generateSlicSeeds().
405
399 automatically by an internal call to generateSlicSeeds().
400
406401 The parameter \a seedDistance specifies the radius of the window around each seed (or, more
407 precisely, around the present regions centers) where the algorithm looks for potential members
402 precisely, around the present regions centers) where the algorithm looks for potential members
408403 of the corresponding superpixel. It thus places an upper limit on the superpixel size. When seeds
409404 are computed automatically, this parameter also determines the grid spacing for seed placement.
410
411 The parameter \a intensityScaling is used to normalize (i.e. divide) the color/intensity difference
405
406 The parameter \a intensityScaling is used to normalize (i.e. divide) the color/intensity difference
412407 before it is compared with the spatial distance. This corresponds to parameter <i>m</i> in equation
413408 (2) of the paper.
414
409
415410 The options object can be used to specify the number of iterations (<tt>SlicOptions::iterations()</tt>)
416 and an explicit minimal superpixel size (<tt>SlicOptions::minSize()</tt>). By default, the algorithm
411 and an explicit minimal superpixel size (<tt>SlicOptions::minSize()</tt>). By default, the algorithm
417412 merges all regions that are smaller than 1/4 of the average superpixel size.
418
419 The function returns the number of superpixels, which equals the largest label
413
414 The function returns the number of superpixels, which equals the largest label
420415 because labeling starts at 1.
421416
422417 <b> Declaration:</b>
427422 template <unsigned int N, class T, class S1,
428423 class Label, class S2,
429424 class DistanceType>
430 unsigned int
425 unsigned int
431426 slicSuperpixels(MultiArrayView<N, T, S1> const & src,
432427 MultiArrayView<N, Label, S2> labels,
433428 DistanceType intensityScaling,
434 unsigned int seedDistance,
429 unsigned int seedDistance,
435430 SlicOptions const & options = SlicOptions());
436431 }
437432 \endcode
444439 \code
445440 MultiArray<2, RGBValue<float> > src(Shape2(w, h));
446441 ... // fill src image
447
442
448443 // transform image to Lab color space
449 transformMultiArray(srcMultiArrayRange(src), destMultiArray(src), RGBPrime2LabFunctor<float>());
450
444 transformMultiArray(srcMultiArrayRange(src), destMultiArray(src), RGBPrime2LabFunctor<float>());
445
451446 MultiArray<2, unsigned int> labels(src.shape());
452447 int seedDistance = 15;
453448 double intensityScaling = 20.0;
454
449
455450 // compute seeds automatically, perform 40 iterations, and scale intensity differences
456451 // down to 1/20 before comparing with spatial distances
457452 slicSuperpixels(src, labels, intensityScaling, seedDistance, SlicOptions().iterations(40));
458453 \endcode
459
454
460455 This works for arbitrary-dimensional arrays.
461456 */
462457 doxygen_overloaded_function(template <...> unsigned int slicSuperpixels)
464459 template <unsigned int N, class T, class S1,
465460 class Label, class S2,
466461 class DistanceType>
467 unsigned int
462 unsigned int
468463 slicSuperpixels(MultiArrayView<N, T, S1> const & src,
469464 MultiArrayView<N, Label, S2> labels,
470465 DistanceType intensityScaling,
471 unsigned int seedDistance,
466 unsigned int seedDistance,
472467 SlicOptions const & options = SlicOptions())
473468 {
474469 if(!labels.any())
163163 sum_m = 0.0,
164164 sum_pm = 0.0,
165165 m = 0.0,
166 dist = 0.0,
166 // dist = 0.0,
167167 p = 0.0;
168168
169169 SrcIterator ys = s_ul;
371371 sum_m = 0.0,
372372 sum_pm = 0.0,
373373 m = 0.0,
374 dist = 0.0,
374 // dist = 0.0,
375375 p = 0.0;
376376
377377 SrcIterator ys = s_ul;
11931193
11941194 } //end of namespace vigra
11951195
1196 #endif //VIGRA_SPECKLEFILTERS_HXX
1196 #endif //VIGRA_SPECKLEFILTERS_HXX
880880 return operator()(x, y);
881881 }
882882
883 value_type dx(double x, double y) const
884 { return NumericTraits<VALUETYPE>::zero(); }
885
886 value_type dy(double x, double y) const
887 { return NumericTraits<VALUETYPE>::zero(); }
888
889 value_type dxx(double x, double y) const
890 { return NumericTraits<VALUETYPE>::zero(); }
891
892 value_type dxy(double x, double y) const
893 { return NumericTraits<VALUETYPE>::zero(); }
894
895 value_type dyy(double x, double y) const
896 { return NumericTraits<VALUETYPE>::zero(); }
897
898 value_type dx3(double x, double y) const
899 { return NumericTraits<VALUETYPE>::zero(); }
900
901 value_type dy3(double x, double y) const
902 { return NumericTraits<VALUETYPE>::zero(); }
903
904 value_type dxxy(double x, double y) const
905 { return NumericTraits<VALUETYPE>::zero(); }
906
907 value_type dxyy(double x, double y) const
883 value_type dx(double /*x*/, double /*y*/) const
884 { return NumericTraits<VALUETYPE>::zero(); }
885
886 value_type dy(double /*x*/, double /*y*/) const
887 { return NumericTraits<VALUETYPE>::zero(); }
888
889 value_type dxx(double /*x*/, double /*y*/) const
890 { return NumericTraits<VALUETYPE>::zero(); }
891
892 value_type dxy(double /*x*/, double /*y*/) const
893 { return NumericTraits<VALUETYPE>::zero(); }
894
895 value_type dyy(double /*x*/, double /*y*/) const
896 { return NumericTraits<VALUETYPE>::zero(); }
897
898 value_type dx3(double /*x*/, double /*y*/) const
899 { return NumericTraits<VALUETYPE>::zero(); }
900
901 value_type dy3(double /*x*/, double /*y*/) const
902 { return NumericTraits<VALUETYPE>::zero(); }
903
904 value_type dxxy(double /*x*/, double /*y*/) const
905 { return NumericTraits<VALUETYPE>::zero(); }
906
907 value_type dxyy(double /*x*/, double /*y*/) const
908908 { return NumericTraits<VALUETYPE>::zero(); }
909909
910910 value_type operator()(difference_type const & d) const
913913 value_type operator()(difference_type const & d, unsigned int dx, unsigned int dy) const
914914 { return operator()(d[0], d[1], dx, dy); }
915915
916 value_type dx(difference_type const & d) const
917 { return NumericTraits<VALUETYPE>::zero(); }
918
919 value_type dy(difference_type const & d) const
920 { return NumericTraits<VALUETYPE>::zero(); }
921
922 value_type dxx(difference_type const & d) const
923 { return NumericTraits<VALUETYPE>::zero(); }
924
925 value_type dxy(difference_type const & d) const
926 { return NumericTraits<VALUETYPE>::zero(); }
927
928 value_type dyy(difference_type const & d) const
929 { return NumericTraits<VALUETYPE>::zero(); }
930
931 value_type dx3(difference_type const & d) const
932 { return NumericTraits<VALUETYPE>::zero(); }
933
934 value_type dy3(difference_type const & d) const
935 { return NumericTraits<VALUETYPE>::zero(); }
936
937 value_type dxxy(difference_type const & d) const
938 { return NumericTraits<VALUETYPE>::zero(); }
939
940 value_type dxyy(difference_type const & d) const
941 { return NumericTraits<VALUETYPE>::zero(); }
942
943 SquaredNormType g2(double x, double y) const
944 { return NumericTraits<SquaredNormType>::zero(); }
945
946 SquaredNormType g2x(double x, double y) const
947 { return NumericTraits<SquaredNormType>::zero(); }
948
949 SquaredNormType g2y(double x, double y) const
950 { return NumericTraits<SquaredNormType>::zero(); }
951
952 SquaredNormType g2xx(double x, double y) const
953 { return NumericTraits<SquaredNormType>::zero(); }
954
955 SquaredNormType g2xy(double x, double y) const
956 { return NumericTraits<SquaredNormType>::zero(); }
957
958 SquaredNormType g2yy(double x, double y) const
959 { return NumericTraits<SquaredNormType>::zero(); }
960
961 SquaredNormType g2(difference_type const & d) const
962 { return NumericTraits<SquaredNormType>::zero(); }
963
964 SquaredNormType g2x(difference_type const & d) const
965 { return NumericTraits<SquaredNormType>::zero(); }
966
967 SquaredNormType g2y(difference_type const & d) const
968 { return NumericTraits<SquaredNormType>::zero(); }
969
970 SquaredNormType g2xx(difference_type const & d) const
971 { return NumericTraits<SquaredNormType>::zero(); }
972
973 SquaredNormType g2xy(difference_type const & d) const
974 { return NumericTraits<SquaredNormType>::zero(); }
975
976 SquaredNormType g2yy(difference_type const & d) const
916 value_type dx(difference_type const & /*d*/) const
917 { return NumericTraits<VALUETYPE>::zero(); }
918
919 value_type dy(difference_type const & /*d*/) const
920 { return NumericTraits<VALUETYPE>::zero(); }
921
922 value_type dxx(difference_type const & /*d*/) const
923 { return NumericTraits<VALUETYPE>::zero(); }
924
925 value_type dxy(difference_type const & /*d*/) const
926 { return NumericTraits<VALUETYPE>::zero(); }
927
928 value_type dyy(difference_type const & /*d*/) const
929 { return NumericTraits<VALUETYPE>::zero(); }
930
931 value_type dx3(difference_type const & /*d*/) const
932 { return NumericTraits<VALUETYPE>::zero(); }
933
934 value_type dy3(difference_type const & /*d*/) const
935 { return NumericTraits<VALUETYPE>::zero(); }
936
937 value_type dxxy(difference_type const & /*d*/) const
938 { return NumericTraits<VALUETYPE>::zero(); }
939
940 value_type dxyy(difference_type const & /*d*/) const
941 { return NumericTraits<VALUETYPE>::zero(); }
942
943 SquaredNormType g2(double /*x*/, double /*y*/) const
944 { return NumericTraits<SquaredNormType>::zero(); }
945
946 SquaredNormType g2x(double /*x*/, double /*y*/) const
947 { return NumericTraits<SquaredNormType>::zero(); }
948
949 SquaredNormType g2y(double /*x*/, double /*y*/) const
950 { return NumericTraits<SquaredNormType>::zero(); }
951
952 SquaredNormType g2xx(double /*x*/, double /*y*/) const
953 { return NumericTraits<SquaredNormType>::zero(); }
954
955 SquaredNormType g2xy(double /*x*/, double /*y*/) const
956 { return NumericTraits<SquaredNormType>::zero(); }
957
958 SquaredNormType g2yy(double /*x*/, double /*y*/) const
959 { return NumericTraits<SquaredNormType>::zero(); }
960
961 SquaredNormType g2(difference_type const & /*d*/) const
962 { return NumericTraits<SquaredNormType>::zero(); }
963
964 SquaredNormType g2x(difference_type const & /*d*/) const
965 { return NumericTraits<SquaredNormType>::zero(); }
966
967 SquaredNormType g2y(difference_type const & /*d*/) const
968 { return NumericTraits<SquaredNormType>::zero(); }
969
970 SquaredNormType g2xx(difference_type const & /*d*/) const
971 { return NumericTraits<SquaredNormType>::zero(); }
972
973 SquaredNormType g2xy(difference_type const & /*d*/) const
974 { return NumericTraits<SquaredNormType>::zero(); }
975
976 SquaredNormType g2yy(difference_type const & /*d*/) const
977977 { return NumericTraits<SquaredNormType>::zero(); }
978978
979979 unsigned int width() const
10611061 /* when traverser and accessor types passed to the constructor are the same as the corresponding
10621062 internal types, we need not copy the image (speed up)
10631063 */
1064 SplineImageView0(InternalTraverser is, InternalTraverser iend, InternalAccessor sa)
1064 SplineImageView0(InternalTraverser is, InternalTraverser iend, InternalAccessor /*sa*/)
10651065 : Base(iend.x - is.x, iend.y - is.y, is)
10661066 {}
10671067
10691069 : Base(s.second.x - s.first.x, s.second.y - s.first.y, s.first)
10701070 {}
10711071
1072 SplineImageView0(InternalConstTraverser is, InternalConstTraverser iend, InternalConstAccessor sa)
1072 SplineImageView0(InternalConstTraverser is, InternalConstTraverser iend, InternalConstAccessor /*sa*/)
10731073 : Base(iend.x - is.x, iend.y - is.y, is)
10741074 {}
10751075
14581458 value_type dy(double x, double y) const
14591459 { return operator()(x, y, 0, 1); }
14601460
1461 value_type dxx(double x, double y) const
1461 value_type dxx(double /*x*/, double /*y*/) const
14621462 { return NumericTraits<VALUETYPE>::zero(); }
14631463
14641464 value_type dxy(double x, double y) const
14651465 { return operator()(x, y, 1, 1); }
14661466
1467 value_type dyy(double x, double y) const
1468 { return NumericTraits<VALUETYPE>::zero(); }
1469
1470 value_type dx3(double x, double y) const
1471 { return NumericTraits<VALUETYPE>::zero(); }
1472
1473 value_type dy3(double x, double y) const
1474 { return NumericTraits<VALUETYPE>::zero(); }
1475
1476 value_type dxxy(double x, double y) const
1477 { return NumericTraits<VALUETYPE>::zero(); }
1478
1479 value_type dxyy(double x, double y) const
1467 value_type dyy(double /*x*/, double /*y*/) const
1468 { return NumericTraits<VALUETYPE>::zero(); }
1469
1470 value_type dx3(double /*x*/, double /*y*/) const
1471 { return NumericTraits<VALUETYPE>::zero(); }
1472
1473 value_type dy3(double /*x*/, double /*y*/) const
1474 { return NumericTraits<VALUETYPE>::zero(); }
1475
1476 value_type dxxy(double /*x*/, double /*y*/) const
1477 { return NumericTraits<VALUETYPE>::zero(); }
1478
1479 value_type dxyy(double /*x*/, double /*y*/) const
14801480 { return NumericTraits<VALUETYPE>::zero(); }
14811481
14821482 value_type operator()(difference_type const & d) const
14911491 value_type dy(difference_type const & d) const
14921492 { return operator()(d[0], d[1], 0, 1); }
14931493
1494 value_type dxx(difference_type const & d) const
1494 value_type dxx(difference_type const & /*d*/) const
14951495 { return NumericTraits<VALUETYPE>::zero(); }
14961496
14971497 value_type dxy(difference_type const & d) const
14981498 { return operator()(d[0], d[1], 1, 1); }
14991499
1500 value_type dyy(difference_type const & d) const
1501 { return NumericTraits<VALUETYPE>::zero(); }
1502
1503 value_type dx3(difference_type const & d) const
1504 { return NumericTraits<VALUETYPE>::zero(); }
1505
1506 value_type dy3(difference_type const & d) const
1507 { return NumericTraits<VALUETYPE>::zero(); }
1508
1509 value_type dxxy(difference_type const & d) const
1510 { return NumericTraits<VALUETYPE>::zero(); }
1511
1512 value_type dxyy(difference_type const & d) const
1500 value_type dyy(difference_type const & /*d*/) const
1501 { return NumericTraits<VALUETYPE>::zero(); }
1502
1503 value_type dx3(difference_type const & /*d*/) const
1504 { return NumericTraits<VALUETYPE>::zero(); }
1505
1506 value_type dy3(difference_type const & /*d*/) const
1507 { return NumericTraits<VALUETYPE>::zero(); }
1508
1509 value_type dxxy(difference_type const & /*d*/) const
1510 { return NumericTraits<VALUETYPE>::zero(); }
1511
1512 value_type dxyy(difference_type const & /*d*/) const
15131513 { return NumericTraits<VALUETYPE>::zero(); }
15141514
15151515 SquaredNormType g2(double x, double y) const
15161516 { return squaredNorm(dx(x,y)) + squaredNorm(dy(x,y)); }
15171517
1518 SquaredNormType g2x(double x, double y) const
1519 { return NumericTraits<SquaredNormType>::zero(); }
1520
1521 SquaredNormType g2y(double x, double y) const
1522 { return NumericTraits<SquaredNormType>::zero(); }
1523
1524 SquaredNormType g2xx(double x, double y) const
1525 { return NumericTraits<SquaredNormType>::zero(); }
1526
1527 SquaredNormType g2xy(double x, double y) const
1528 { return NumericTraits<SquaredNormType>::zero(); }
1529
1530 SquaredNormType g2yy(double x, double y) const
1518 SquaredNormType g2x(double /*x*/, double /*y*/) const
1519 { return NumericTraits<SquaredNormType>::zero(); }
1520
1521 SquaredNormType g2y(double /*x*/, double /*y*/) const
1522 { return NumericTraits<SquaredNormType>::zero(); }
1523
1524 SquaredNormType g2xx(double /*x*/, double /*y*/) const
1525 { return NumericTraits<SquaredNormType>::zero(); }
1526
1527 SquaredNormType g2xy(double /*x*/, double /*y*/) const
1528 { return NumericTraits<SquaredNormType>::zero(); }
1529
1530 SquaredNormType g2yy(double /*x*/, double /*y*/) const
15311531 { return NumericTraits<SquaredNormType>::zero(); }
15321532
15331533 SquaredNormType g2(difference_type const & d) const
15341534 { return g2(d[0], d[1]); }
15351535
1536 SquaredNormType g2x(difference_type const & d) const
1537 { return NumericTraits<SquaredNormType>::zero(); }
1538
1539 SquaredNormType g2y(difference_type const & d) const
1540 { return NumericTraits<SquaredNormType>::zero(); }
1541
1542 SquaredNormType g2xx(difference_type const & d) const
1543 { return NumericTraits<SquaredNormType>::zero(); }
1544
1545 SquaredNormType g2xy(difference_type const & d) const
1546 { return NumericTraits<SquaredNormType>::zero(); }
1547
1548 SquaredNormType g2yy(difference_type const & d) const
1536 SquaredNormType g2x(difference_type const & /*d*/) const
1537 { return NumericTraits<SquaredNormType>::zero(); }
1538
1539 SquaredNormType g2y(difference_type const & /*d*/) const
1540 { return NumericTraits<SquaredNormType>::zero(); }
1541
1542 SquaredNormType g2xx(difference_type const & /*d*/) const
1543 { return NumericTraits<SquaredNormType>::zero(); }
1544
1545 SquaredNormType g2xy(difference_type const & /*d*/) const
1546 { return NumericTraits<SquaredNormType>::zero(); }
1547
1548 SquaredNormType g2yy(difference_type const & /*d*/) const
15491549 { return NumericTraits<SquaredNormType>::zero(); }
15501550
15511551 unsigned int width() const
16981698 /* when traverser and accessor types passed to the constructor are the same as the corresponding
16991699 internal types, we need not copy the image (speed up)
17001700 */
1701 SplineImageView1(InternalTraverser is, InternalTraverser iend, InternalAccessor sa)
1701 SplineImageView1(InternalTraverser is, InternalTraverser iend, InternalAccessor /*sa*/)
17021702 : Base(iend.x - is.x, iend.y - is.y, is)
17031703 {}
17041704
17061706 : Base(s.second.x - s.first.x, s.second.y - s.first.y, s.first)
17071707 {}
17081708
1709 SplineImageView1(InternalConstTraverser is, InternalConstTraverser iend, InternalConstAccessor sa)
1709 SplineImageView1(InternalConstTraverser is, InternalConstTraverser iend, InternalConstAccessor /*sa*/)
17101710 : Base(iend.x - is.x, iend.y - is.y, is)
17111711 {}
17121712
3838 #include <cmath>
3939 #include "config.hxx"
4040 #include "mathutil.hxx"
41 #include "polynomial.hxx"
4241 #include "array_vector.hxx"
4342 #include "fixedpoint.hxx"
4443
7675 \f[ B_n(x) = B_0(x) * B_{n-1}(x)
7776 \f]
7877
79 where * denotes convolution, and <i>n</i> is the spline order given by the
80 template parameter <tt>ORDER</tt>. These spline classes can be used as
78 where * denotes convolution, and <i>n</i> is the spline order given by the template
79 parameter <tt>ORDER</tt> with <tt>ORDER < 18</tt>. These spline classes can be used as
8180 unary and binary functors, as kernels for \ref resamplingConvolveImage(),
8281 and as arguments for \ref vigra::SplineImageView. Note that the spline order
8382 is given as a template argument.
8988 class BSplineBase
9089 {
9190 public:
91
92 static_assert (ORDER < 18 , "BSpline: ORDER must be less than 18." );
9293
9394 /** the value type if used as a kernel in \ref resamplingConvolveImage().
9495 */
177178 return weightMatrix_;
178179 }
179180
181 static ArrayVector<double> getPrefilterCoefficients();
182
180183 protected:
181184 result_type exec(first_argument_type x, second_argument_type derivative_order) const;
182
183 // factory function for the prefilter coefficients array
184 static ArrayVector<double> calculatePrefilterCoefficients();
185185
186186 // factory function for the weight matrix
187187 static WeightMatrix calculateWeightMatrix();
192192 };
193193
194194 template <int ORDER, class T>
195 ArrayVector<double> BSplineBase<ORDER, T>::prefilterCoefficients_(BSplineBase<ORDER, T>::calculatePrefilterCoefficients());
195 ArrayVector<double> BSplineBase<ORDER, T>::prefilterCoefficients_(getPrefilterCoefficients());
196196
197197 template <int ORDER, class T>
198198 typename BSplineBase<ORDER, T>::WeightMatrix BSplineBase<ORDER, T>::weightMatrix_(calculateWeightMatrix());
214214 }
215215
216216 template <int ORDER, class T>
217 ArrayVector<double>
218 BSplineBase<ORDER, T>::calculatePrefilterCoefficients()
219 {
220 ArrayVector<double> res;
221 if(ORDER > 1)
222 {
223 const int r = ORDER / 2;
224 StaticPolynomial<2*r, double> p(2*r);
225 BSplineBase spline;
226 for(int i = 0; i <= 2*r; ++i)
227 p[i] = spline(T(i-r));
228 ArrayVector<double> roots;
229 polynomialRealRoots(p, roots);
230 for(unsigned int i = 0; i < roots.size(); ++i)
231 if(VIGRA_CSTD::fabs(roots[i]) < 1.0)
232 res.push_back(roots[i]);
233 }
234 return res;
217 ArrayVector<double>
218 BSplineBase<ORDER, T>::getPrefilterCoefficients()
219 {
220 static const double coeffs[18][8] = {
221 { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
222 { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
223 { -0.17157287525380971, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
224 { -0.26794919243112281, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
225 { -0.36134122590022018, -0.01372542929733912, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
226 { -0.43057534709997379, -0.04309628820326465, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
227 { -0.48829458930304398, -0.081679271076237972, -0.0014141518083258175, 0.0, 0.0, 0.0, 0.0, 0.0 },
228 { -0.53528043079643672, -0.1225546151923274, -0.0091486948096082786, 0.0, 0.0, 0.0, 0.0, 0.0 },
229 { -0.57468690924876631, -0.16303526929728085, -0.023632294694844857, -0.00015382131064169087, 0.0, 0.0, 0.0, 0.0 },
230 { -0.60799738916862989, -0.20175052019315337, -0.043222608540481752, -0.0021213069031808186, 0.0, 0.0, 0.0, 0.0 },
231 { -0.63655066396942439, -0.2381827983775629, -0.065727033228308585, -0.0075281946755486927, -1.6982762823274658e-5, 0.0, 0.0, 0.0 },
232 { -0.66126606890072925, -0.27218034929478602, -0.089759599793713341, -0.016669627366234657, -0.00051055753444650205, 0.0, 0.0, 0.0 },
233 { -0.68286488419772362, -0.30378079328825425, -0.11435052002713579, -0.028836190198663809, -0.0025161662172613372, -1.8833056450639017e-6, 0.0, 0.0 },
234 { -0.70189425181681642, -0.33310723293062366, -0.13890111319431958, -0.043213866740363663, -0.0067380314152449142, -0.00012510011321441875, 0.0, 0.0 },
235 { -0.71878378723997516, -0.3603190719169625, -0.1630335147992984, -0.059089482194831018, -0.013246756734847919, -0.00086402404095333829, -2.0913096775275374e-7, 0.0 },
236 { -0.73387257168487741, -0.38558573427843323, -0.18652010845096478, -0.075907592047668185, -0.02175206579654047, -0.0028011514820764556, -3.093568045147443e-5, 0.0 },
237 { -0.747432387772212103, -0.409073604757528353, -0.29228719338953817, -9.32547189803214355e-2 -3.18677061204386616e-2, -6.25840678512839046e-3, -3.01565363306955866e-4, -2.32324863642097035e-8 },
238 { -0.75968322407189071, -0.43093965318039579, -0.23108984359927232, -0.1108289933162471, -0.043213911456684129, -0.011258183689471605, -0.0011859331251521767, -7.6875625812546846e-6 }
239 };
240 return ArrayVector<double>(coeffs[ORDER], coeffs[ORDER]+ORDER/2);
235241 }
236242
237243 template <int ORDER, class T>
314320 template <class U, int N>
315321 autodiff::DualVector<U, N> operator()(autodiff::DualVector<U, N> const & x) const
316322 {
317 return x < 0.5 && -0.5 <= x
323 return x < 0.5 && -0.5 <= x
318324 ? autodiff::DualVector<U, N>(1.0)
319325 : autodiff::DualVector<U, N>(0.0);
320326 }
339345 }
340346
341347 typedef T WeightMatrix[1][1];
342
348
343349 static WeightMatrix const & weights()
344350 {
345351 return weightMatrix_;
408414 autodiff::DualVector<U, N> operator()(autodiff::DualVector<U, N> x) const
409415 {
410416 x = abs(x);
411 return x < 1.0
417 return x < 1.0
412418 ? 1.0 - x
413419 : autodiff::DualVector<U, N>(0.0);
414420 }
531537 autodiff::DualVector<U, N> operator()(autodiff::DualVector<U, N> x) const
532538 {
533539 x = abs(x);
534 return x < 0.5
540 return x < 0.5
535541 ? 0.75 - x*x
536 : x < 1.5
542 : x < 1.5
537543 ? 0.5 * sq(1.5 - x)
538544 : autodiff::DualVector<U, N>(0.0);
539545 }
579585 ArrayVector<double> BSpline<2, T>::prefilterCoefficients_(1, 2.0*M_SQRT2 - 3.0);
580586
581587 template <class T>
582 typename BSpline<2, T>::WeightMatrix BSpline<2, T>::weightMatrix_ =
588 typename BSpline<2, T>::WeightMatrix BSpline<2, T>::weightMatrix_ =
583589 {{ 0.125, 0.75, 0.125},
584590 {-0.5, 0.0, 0.5},
585591 { 0.5, -1.0, 0.5}};
736742 ArrayVector<double> BSpline<3, T>::prefilterCoefficients_(1, VIGRA_CSTD::sqrt(3.0) - 2.0);
737743
738744 template <class T>
739 typename BSpline<3, T>::WeightMatrix BSpline<3, T>::weightMatrix_ =
745 typename BSpline<3, T>::WeightMatrix BSpline<3, T>::weightMatrix_ =
740746 {{ 1.0 / 6.0, 2.0 / 3.0, 1.0 / 6.0, 0.0},
741747 {-0.5, 0.0, 0.5, 0.0},
742748 { 0.5, -1.0, 0.5, 0.0},
891897 protected:
892898 result_type exec(first_argument_type x, second_argument_type derivative_order) const;
893899
894 static ArrayVector<double> calculatePrefilterCoefficients()
895 {
896 ArrayVector<double> b(2);
897 // -19 + 4*sqrt(19) + 2*sqrt(2*(83 - 19*sqrt(19)))
898 b[0] = -0.361341225900220177092212841325;
899 // -19 - 4*sqrt(19) + 2*sqrt(2*(83 + 19*sqrt(19)))
900 b[1] = -0.013725429297339121360331226939;
901 return b;
902 }
903
904900 unsigned int derivativeOrder_;
905901 static ArrayVector<double> prefilterCoefficients_;
906902 static WeightMatrix weightMatrix_;
907903 };
908904
909905 template <class T>
910 ArrayVector<double> BSpline<4, T>::prefilterCoefficients_(calculatePrefilterCoefficients());
911
912 template <class T>
913 typename BSpline<4, T>::WeightMatrix BSpline<4, T>::weightMatrix_ =
906 ArrayVector<double> BSpline<4, T>::prefilterCoefficients_(BSplineBase<4, T>::getPrefilterCoefficients());
907
908 template <class T>
909 typename BSpline<4, T>::WeightMatrix BSpline<4, T>::weightMatrix_ =
914910 {{ 1.0/384.0, 19.0/96.0, 115.0/192.0, 19.0/96.0, 1.0/384.0},
915911 {-1.0/48.0, -11.0/24.0, 0.0, 11.0/24.0, 1.0/48.0},
916912 { 1.0/16.0, 1.0/4.0, -5.0/8.0, 1.0/4.0, 1.0/16.0},
10141010 : x < -0.5
10151011 ? -4.0
10161012 : 6.0
1017 : x < 0.5
1013 : x < 0.5
10181014 ? 6.0
10191015 : x < 1.5
10201016 ? -4.0
11181114 protected:
11191115 result_type exec(first_argument_type x, second_argument_type derivative_order) const;
11201116
1121 static ArrayVector<double> calculatePrefilterCoefficients()
1122 {
1123 ArrayVector<double> b(2);
1124 // -(13/2) + sqrt(105)/2 + sqrt(1/2*((135 - 13*sqrt(105))))
1125 b[0] = -0.430575347099973791851434783493;
1126 // (1/2)*((-13) - sqrt(105) + sqrt(2*((135 + 13*sqrt(105)))))
1127 b[1] = -0.043096288203264653822712376822;
1128 return b;
1129 }
1130
11311117 unsigned int derivativeOrder_;
11321118 static ArrayVector<double> prefilterCoefficients_;
11331119 static WeightMatrix weightMatrix_;
11341120 };
11351121
11361122 template <class T>
1137 ArrayVector<double> BSpline<5, T>::prefilterCoefficients_(calculatePrefilterCoefficients());
1138
1139 template <class T>
1140 typename BSpline<5, T>::WeightMatrix BSpline<5, T>::weightMatrix_ =
1123 ArrayVector<double> BSpline<5, T>::prefilterCoefficients_(BSplineBase<5, T>::getPrefilterCoefficients());
1124
1125 template <class T>
1126 typename BSpline<5, T>::WeightMatrix BSpline<5, T>::weightMatrix_ =
11411127 {{ 1.0/120.0, 13.0/60.0, 11.0/20.0, 13.0/60.0, 1.0/120.0, 0.0},
11421128 {-1.0/24.0, -5.0/12.0, 0.0, 5.0/12.0, 1.0/24.0, 0.0},
11431129 { 1.0/12.0, 1.0/6.0, -0.5, 1.0/6.0, 1.0/12.0, 0.0},
13481334 {
13491335 return prefilterCoefficients_;
13501336 }
1351
1337
13521338 protected:
13531339 static ArrayVector<double> prefilterCoefficients_;
13541340 };
5050 template <class ARITHTYPE>
5151 class Kernel2D;
5252
53 /** \addtogroup CommonConvolutionFilters
53 /** \addtogroup ConvolutionFilters
5454 */
5555 //@{
5656
854854 {
855855 if(this != &k)
856856 {
857 kernel_ = k.kernel_;
857 kernel_ = k.kernel_;
858858 left_ = k.left_;
859859 right_ = k.right_;
860860 norm_ = k.norm_;
861 border_treatment_ = k.border_treatment_;
861 border_treatment_ = k.border_treatment_;
862862 }
863863 return *this;
864864 }
11461146 Kernel2D & initExplicitly(Diff2D const & upperleft, Diff2D const & lowerright)
11471147 {
11481148 return initExplicitly(Shape2(upperleft), Shape2(lowerright));
1149 }
1150
1151 /** Init the kernel by providing a BasicImage with the kernel values.
1152
1153 The kernel's origin is placed at the center of the given image.
1154 The norm is set to the sum of the image values.
1155
1156 <b> Preconditions:</b>
1157
1158 odd image width and height;
1159 */
1160 Kernel2D & initExplicitly(BasicImage<value_type> const & image)
1161 {
1162 vigra_precondition(image.width() % 2 != 0 && image.height() % 2 != 0,
1163 "Kernel2D::initExplicitly(): kernel sizes must be odd.");
1164
1165 left_ = Point2D((image.width() - 1) / -2, (image.height() - 1) / -2);
1166 right_ = Point2D((image.width() - 1) / 2, (image.height() - 1) / 2);
1167
1168 norm_ = 0;
1169 for (auto iter = image.begin(); iter != image.end(); ++iter)
1170 {
1171 norm_ += *iter;
1172 }
1173
1174 kernel_ = image;
1175
1176 return *this;
11491177 }
11501178
11511179 /** Coordinates of the upper left corner of the kernel.
4747 namespace vigra
4848 {
4949
50 /** \addtogroup ParallelProcessing Functions and classes for parallel processing.
50 /** \addtogroup ParallelProcessing
5151 */
5252
5353 //@{
381381 /********************************************************/
382382
383383 // nItems must be either zero or std::distance(iter, end).
384 // NOTE: the redundancy of nItems and iter,end here is due to the fact that, for forward iterators,
385 // computing the distance from iterators is costly, and, for input iterators, we might not know in advance
386 // how many items there are (e.g., stream iterators).
384387 template<class ITER, class F>
385388 inline void parallel_foreach_impl(
386389 ThreadPool & pool,
482485 F && f,
483486 std::input_iterator_tag
484487 ){
485 size_t num_items = 0;
488 std::ptrdiff_t num_items = 0;
486489 std::vector<threading::future<void> > futures;
487490 for (; iter != end; ++iter)
488491 {
510513 F && f,
511514 const std::ptrdiff_t nItems = 0
512515 ){
513 size_t n = 0;
516 std::ptrdiff_t n = 0;
514517 for (; begin != end; ++begin)
515518 {
516519 f(0, *begin);
520523 }
521524
522525 /** \brief Apply a functor to all items in a range in parallel.
523
524 Create a thread pool (or use an existing one) to apply the functor \arg f
525 to all items in the range <tt>[begin, end)</tt> in parallel. \arg f must
526 be callable with two arguments of type <tt>size_t</tt> and <tt>T</tt>, where
527 the first argument is the thread index (starting at 0) and T is convertible
528 from the iterator's <tt>reference_type</tt> (i.e. the result of <tt>*begin</tt>).
529
530 If the iterators are forward iterators (<tt>std::forward_iterator_tag</tt>), you
531 can provide the optional argument <tt>nItems</tt> to avoid the a
532 <tt>std::distance(begin, end)</tt> call to compute the range's length.
533
534 Parameter <tt>nThreads</tt> controls the number of threads. <tt>parallel_foreach</tt>
535 will split the work into about three times as many parallel tasks.
536 If <tt>nThreads = ParallelOptions::Auto</tt>, the number of threads is set to
537 the machine default (<tt>std::thread::hardware_concurrency()</tt>).
538
539 If <tt>nThreads = 0</tt>, the function will not use threads,
540 but will call the functor sequentially. This can also be enforced by setting the
541 preprocessor flag <tt>VIGRA_SINGLE_THREADED</tt>, ignoring the value of
542 <tt>nThreads</tt> (useful for debugging).
543526
544527 <b> Declarations:</b>
545528
574557 F && f);
575558 }
576559 \endcode
560
561 Create a thread pool (or use an existing one) to apply the functor \arg f
562 to all items in the range <tt>[begin, end)</tt> in parallel. \arg f must
563 be callable with two arguments of type <tt>size_t</tt> and <tt>T</tt>, where
564 the first argument is the thread index (starting at 0) and T is convertible
565 from the iterator's <tt>reference_type</tt> (i.e. the result of <tt>*begin</tt>).
566
567 If the iterators are forward iterators (<tt>std::forward_iterator_tag</tt>), you
568 can provide the optional argument <tt>nItems</tt> to avoid the a
569 <tt>std::distance(begin, end)</tt> call to compute the range's length.
570
571 Parameter <tt>nThreads</tt> controls the number of threads. <tt>parallel_foreach</tt>
572 will split the work into about three times as many parallel tasks.
573 If <tt>nThreads = ParallelOptions::Auto</tt>, the number of threads is set to
574 the machine default (<tt>std::thread::hardware_concurrency()</tt>).
575
576 If <tt>nThreads = 0</tt>, the function will not use threads,
577 but will call the functor sequentially. This can also be enforced by setting the
578 preprocessor flag <tt>VIGRA_SINGLE_THREADED</tt>, ignoring the value of
579 <tt>nThreads</tt> (useful for debugging).
577580
578581 <b>Usage:</b>
579582
23362336 }
23372337 return res;
23382338 }
2339
2340 /** \brief Round up values to the nearest power of 2.
2341 Implemented only for UInt32
2342 */
2343 template<int SIZE>
2344 inline TinyVector<UInt32,SIZE> ceilPower2(vigra::TinyVector<UInt32,SIZE> const & t)
2345 {
2346 TinyVector<UInt32,SIZE> res(SkipInitialization);
2347 for( size_t k = 0; k < SIZE; k++)
2348 res[k] = ceilPower2(t[k]);
2349 return res;
2350 }
2351
2352 /** \brief Round down values to the nearest power of 2.
2353 Implemented only for UInt32
2354 */
2355 template<int SIZE>
2356 inline TinyVector<UInt32,SIZE> floorPower2(vigra::TinyVector<UInt32,SIZE> const & t)
2357 {
2358 TinyVector<UInt32, SIZE> res(SkipInitialization);
2359 for( size_t k = 0; k < SIZE; k++)
2360 res[k] = floorPower2(t[k]);
2361 return res;
2362 }
23392363
23402364 template<class V,int SIZE>
23412365 inline
9595 /** \brief Apply unary point transformation to each pixel.
9696
9797 After the introduction of arithmetic and algebraic \ref MultiMathModule "array expressions",
98 this function is rarely needed. Moreover, \ref transformMultiArray() provides the
98 this function is rarely needed. Moreover, \ref transformMultiArray() provides the
9999 same functionality for arbitrary dimensional arrays.
100100
101101 The transformation given by the functor is applied to every source
154154 MultiArray<2, float> src(100, 200),
155155 dest(100, 200);
156156 ...
157
157
158158 transformImage(src, dest, &std::sqrt );
159159 \endcode
160160
185185
186186 \endcode
187187 \deprecatedEnd
188
188
189189 \see TransformFunctor, MultiMathModule, \ref FunctorExpressions
190190 */
191191 doxygen_overloaded_function(template <...> void transformImage)
242242 (i.e., where the mask is non-zero).
243243
244244 After the introduction of arithmetic and algebraic \ref MultiMathModule "array expressions",
245 this function is rarely needed. Moreover, \ref combineTwoMultiArrays() provides the
245 this function is rarely needed. Moreover, \ref combineTwoMultiArrays() provides the
246246 same functionality for arbitrary dimensional arrays.
247247
248248 The transformation given by the functor is applied to every source
310310
311311 \code
312312 #include <cmath> // for sqrt()
313
313
314314 MultiArray<2, unsigned char> mask(100, 200),
315315 MultiArray<2, float> src(100, 200),
316316 dest(100, 200);
317317 ... // fill src and mask
318
318
319319 transformImageIf(src, mask, dest, &std::sqrt );
320320 \endcode
321321
348348
349349 \endcode
350350 \deprecatedEnd
351
351
352352 \see TransformFunctor, MultiMathModule, \ref FunctorExpressions
353353 */
354354 doxygen_overloaded_function(template <...> void transformImageIf)
430430 \code
431431 namespace vigra {
432432 template <class T1, class S1,
433 class T2, class S2,
433 class T2, class S2,
434434 class Functor>
435435 void
436436 gradientBasedTransform(MultiArrayView<2, T1, S1> const & src,
437 MultiArrayView<2, T2, S2> dest,
437 MultiArrayView<2, T2, S2> dest,
438438 Functor const & grad);
439439 }
440440 \endcode
503503
504504 \endcode
505505 \deprecatedEnd
506
506
507507 \see TransformFunctor, MultiMathModule, \ref FunctorExpressions
508508 */
509509 doxygen_overloaded_function(template <...> void gradientBasedTransform)
823823 and <tt>offset = dest_min / scale - src_min</tt>. As a result,
824824 the pixel values <tt>src_max</tt>, <tt>src_min</tt> in the source image
825825 are mapped onto <tt>dest_max</tt>, <tt>dest_min</tt> respectively.
826 This works for scalar as well as vector pixel types. Instead of
826 This works for scalar as well as vector pixel types. Instead of
827827 <tt>src_min</tt> and <tt>src_max</tt>, you may also pass a functor
828 \ref FindMinMax.
828 \ref FindMinMax.
829829
830830 <b> Declaration:</b>
831831
938938 /********************************************************/
939939
940940 /** \brief Threshold an image.
941
942 <b>Note:</b> Nowadays, it is probably easier to perform thresholding by means of
943 C++ 11 lambda functions or \ref MultiMathModule "array expressions".
941944
942945 If a source pixel is above or equal the lower and below
943946 or equal the higher threshold (i.e. within the closed interval
342342 double alpha_par, double beta_par, double sigma_par, double rho_par, double K_par){
343343
344344 using namespace multi_math;
345 int width=data.shape(0),height=data.shape(1);
346345
347346 MultiArray<2,double> smooth(data.shape()),tmp(data.shape());
348347 vigra::Kernel1D<double> gauss;
763762 //@}
764763 } // closing namespace vigra
765764
766 #endif // VIGRA_TV_FILTER_HXX
765 #endif // VIGRA_TV_FILTER_HXX
4444 #include <exception> // for exception, bad_exception
4545 #include <stdexcept>
4646 #include <iostream>
47 #include <iomanip>
4748 #include <limits>
4849 #include <cfloat>
4950 #include <cmath>
146147
147148 #define failTest VIGRA_ERROR
148149
150 #ifdef __GNUC__
151 #pragma GCC diagnostic push
152 #pragma GCC diagnostic ignored "-Wsign-compare"
153 #endif
154
149155 namespace vigra {
150156
151157 class test_suite;
152158
153159 namespace detail {
160
161 typedef std::pair<std::string, int> CheckpointType;
154162
155163 struct errstream
156164 {
160168 errstream & operator<<(T t) { buf << t; return *this; }
161169 };
162170
163 inline std::string & exception_checkpoint()
164 {
165 static std::string test_checkpoint_;
171 inline CheckpointType & exception_checkpoint()
172 {
173 static CheckpointType test_checkpoint_;
166174 return test_checkpoint_;
167175 }
168176
171179 const char * name, const char * info )
172180 {
173181 os << "Unexpected " << name << " " << info << "\n";
174 if(exception_checkpoint().size() > 0)
175 {
176 os << "Last checkpoint: " << exception_checkpoint() << "\n";
182 if(exception_checkpoint().first.size() > 0)
183 {
184 os << " (occured after line " << exception_checkpoint().second << " in file '" << exception_checkpoint().first << "')\n";
177185 }
178186 }
179187
446454 };
447455
448456 inline void
449 checkpoint_impl(const char * message, const char * file, int line)
450 {
451 detail::errstream buf;
452 buf << message << " (" << file <<":" << line << ")";
453 exception_checkpoint() = buf.str();
457 checkpoint_impl(const char * file, int line)
458 {
459 exception_checkpoint().first = file;
460 exception_checkpoint().second = line;
454461 }
455462
456463 inline void
457464 should_impl(bool predicate, const char * message, const char * file, int line)
458465 {
459 checkpoint_impl(message, file, line);
466 checkpoint_impl(file, line);
460467 if(!predicate)
461468 {
462469 detail::errstream buf;
475482 void
476483 sequence_equal_impl(Iter1 i1, Iter1 end1, Iter2 i2, const char * file, int line)
477484 {
485 checkpoint_impl(file, line);
478486 for(int counter = 0; i1 != end1; ++i1, ++i2, ++counter)
479487 {
480488 if(*i1 != *i2)
481489 {
482490 detail::errstream buf;
483 buf << "Sequence items differ at index " << counter <<
491 buf << "Sequences differ at index " << counter <<
484492 " ["<< *i1 << " != " << *i2 << "]";
485493 should_impl(false, buf.str().c_str(), file, line);
486494 }
605613 template <class T1, class T2, class T3>
606614 void
607615 tolerance_equal_impl(T1 left, T2 right, T3 epsilon,
608 const char * message, const char * file, int line, ScalarType)
609 {
610 detail::errstream buf;
611 buf << message << " [" << left << " != " << right << "]";
612
616 const char * message, const char * file, int line, ScalarType, std::ptrdiff_t index = -1)
617 {
618 checkpoint_impl(file, line);
613619 close_at_tolerance<T3> fcomparator( epsilon );
614 bool compare = fcomparator ( (T3)left , (T3)right );
615 should_impl(compare, buf.str().c_str(), file, line);
616
620 if (!fcomparator((T3)left, (T3)right))
621 {
622 detail::errstream buf;
623 if(index >= 0)
624 buf << "Sequences differ at index " << index;
625 buf << message << " [" << std::setprecision(17) << left << " != " << right << " at tolerance " << epsilon << "]";
626 should_impl(false, buf.str().c_str(), file, line);
627 }
617628 }
618629
619630 template <class T1, class T2, class T3>
620631 void
621632 tolerance_equal_impl(T1 left, T2 right, T3 epsilon,
622 const char * message, const char * file, int line, VectorType)
623 {
624 detail::errstream buf;
625 buf << message << " [" << left << " != " << right << "]";
626
627 bool compare = true;
633 const char * message, const char * file, int line, VectorType, std::ptrdiff_t index = -1)
634 {
635 checkpoint_impl(file, line);
628636 for(unsigned int i=0; i<epsilon.size(); ++i)
629637 {
630638 close_at_tolerance<typename T3::value_type> fcomparator( epsilon[i] );
631 compare = compare && fcomparator ( left[i] , right[i] );
632 }
633 should_impl(compare, buf.str().c_str(), file, line);
639 if (!fcomparator(left[i], right[i]))
640 {
641 detail::errstream buf;
642 if(index >= 0)
643 {
644 buf << "Sequences differ at index " << index << ", element " << i;
645 }
646 else
647 {
648 buf << "Vectors differ at element " << i;
649 }
650 buf << message << " [" << std::setprecision(17) << left << " != " << right << " at tolerance " << epsilon << "]";
651 should_impl(false, buf.str().c_str(), file, line);
652 }
653 }
634654 }
635655
636656 template <class T1, class T2, class T3>
647667 {
648668 for(int counter = 0; i1 != end1; ++i1, ++i2, ++counter)
649669 {
650 detail::errstream buf;
651 buf << "Sequence items differ at index " << counter;
652 tolerance_equal_impl(*i1, *i2, epsilon, buf.str().c_str(), file, line, typename FloatTraits<T>::ScalarOrVector());
670 tolerance_equal_impl(*i1, *i2, epsilon, "", file, line, typename FloatTraits<T>::ScalarOrVector(), counter);
653671 }
654672 }
655673
657675 void
658676 equal_impl(Left left, Right right, const char * message, const char * file, int line)
659677 {
660 detail::errstream buf;
661 buf << message << " [" << left << " != " << right << "]";
662 should_impl(left == right, buf.str().c_str(), file, line);
678 checkpoint_impl(file, line);
679 if (left != right)
680 {
681 detail::errstream buf;
682 buf << message << " [" << left << " != " << right << "]";
683 should_impl(false, buf.str().c_str(), file, line);
684 }
663685 }
664686
665687 template <class Left, class Right>
666688 void
667689 equal_impl(Left * left, Right * right, const char * message, const char * file, int line)
668690 {
669 detail::errstream buf;
670 buf << message << " [" << (void*)left << " != " << (void*)right << "]";
671 should_impl(left == right, buf.str().c_str(), file, line);
691 checkpoint_impl(file, line);
692 if (left != right)
693 {
694 detail::errstream buf;
695 buf << message << " [" << (void*)left << " != " << (void*)right << "]";
696 should_impl(false, buf.str().c_str(), file, line);
697 }
672698 }
673699
674700 inline void
928954
929955 int init()
930956 {
931 exception_checkpoint() = "";
957 exception_checkpoint().first = "";
932958 report_ = "";
933959 int failed = 0;
934960
10371063 return 0;
10381064
10391065 report_ = "";
1040 exception_checkpoint() = "";
1066 exception_checkpoint().first = "";
10411067
10421068 detail::errstream buf;
10431069 buf << "\nFailure in " << name() << "\n";
10881114 return 0;
10891115
10901116 report_ = "";
1091 exception_checkpoint() = "";
1117 exception_checkpoint().first = "";
10921118
10931119 detail::errstream buf;
10941120 buf << "\nFailure in " << name() << "\n";
11741200
11751201 #endif
11761202
1203 #ifdef __GNUC__
1204 #pragma GCC diagnostic pop
1205 #endif
1206
11771207
11781208 #endif /* VIGRA_UNIT_TEST_HPP */
4545 #include <string>
4646 #include <sstream>
4747 #include <cctype>
48 #include <tuple>
4849
4950 /*! \file */
5051
155156 /** Emulate the 'finally' keyword as known from Python and other languages.
156157
157158 This macro improves upon the famous
158 <a href="http://en.wikipedia.org/wiki/Resource_Acquisition_Is_Initialization">Resource Acquisition Is Initialization</a> idiom, where a resource (e.g. heap memory or a mutex) is automatically free'ed when program execution leaves the current scope. Normally, this is implemented by calling a suitable function in the destructor of a dedicated helper class (e.g. <tt>std::unique_ptr</tt> or <tt>std::lock_guard<std::mutex></tt>).
159
160 Traditionally, a separate helper class has to be implemented for each kind of resource to be handled. In contrast, the macro <tt>VIGRA_FINALLY</tt> creates such a class on the fly by means of an embedded lambda expression.
159 <a href="http://en.wikipedia.org/wiki/Resource_Acquisition_Is_Initialization">Resource Acquisition Is Initialization</a> idiom, where a resource (e.g. heap memory or a mutex) is automatically free'ed when program execution leaves the current scope. This is normally achieved by placing a call which releases the resource into the destructor of a dedicated helper class (e.g. <tt>std::unique_ptr</tt> or <tt>std::lock_guard<std::mutex></tt>).
160
161 Traditionally, a separate helper class is needed for every type of resource to be handled. In contrast, the macro <tt>VIGRA_FINALLY</tt> creates such a class on the fly by means of an embedded lambda expression.
161162
162163 <b>Usage:</b>
163164
200201 assert(i == 5); // 'finally' code was executed in reversed order at end-of-scope
201202 \endcode
202203
203 This idea was popularized by Marko Tintor in "<a href="http://blog.memsql.com/c-error-handling-with-auto/">The Auto Macro: A Clean Approach to C++ Error Handling</a>".
204 This idea was popularized by Marko Tintor in <a href="http://blog.memsql.com/c-error-handling-with-auto/">The Auto Macro: A Clean Approach to C++ Error Handling</a>.
204205 */
205206 #define VIGRA_FINALLY(destructor) \
206207 VIGRA_FINALLY_IMPL(destructor, __COUNTER__)
215216 }
216217
217218 }
219
220 namespace vigra
221 {
222 namespace detail
223 {
224 template <typename TPL, size_t N, typename FUNCTOR>
225 struct for_each_in_tuple_impl
226 {
227 typedef for_each_in_tuple_impl<TPL, N-1, FUNCTOR> ForEachRecursion;
228
229 void operator()(TPL && t, FUNCTOR && f) const
230 {
231 ForEachRecursion()(std::forward<TPL>(t), std::forward<FUNCTOR>(f));
232 f(std::get<N-1>(std::forward<TPL>(t)));
233 }
234 };
235
236 template <typename TPL, typename FUNCTOR>
237 struct for_each_in_tuple_impl<TPL, 0, FUNCTOR>
238 {
239 void operator()(TPL && t, FUNCTOR && f) const
240 {}
241 };
242 } // namespace detail
243
244 /**
245 * The for_each_in_tuple function calls the functor f on all elements of the tuple t.
246 * For each element type in the tuple, the functor must have an appropriate overload of operator().
247 *
248 * Example:
249 * \code
250 * #include <iostream>
251 * #include <tuple>
252 * #include <vigra/utilities.hxx>
253 *
254 * struct print
255 * {
256 * template <typename T>
257 * void operator()(T const & t) const
258 * {
259 * std::cout << t << std::endl;
260 * }
261 * };
262 *
263 * struct add_one
264 * {
265 * template <typename T>
266 * void operator()(T & t) const
267 * {
268 * t += 1;
269 * }
270 * };
271 *
272 * int main()
273 * {
274 * std::tuple<int, double, size_t> tpl(-5, 0.4, 10);
275 * vigra::for_each_in_tuple(tpl, add_one());
276 * vigra::for_each_in_tuple(tpl, print());
277 * }
278 * \endcode
279 */
280 template <typename TPL, typename FUNCTOR>
281 void for_each_in_tuple(TPL && t, FUNCTOR && f)
282 {
283 typedef typename std::decay<TPL>::type UNQUALIFIED_TPL;
284 typedef detail::for_each_in_tuple_impl<TPL, std::tuple_size<UNQUALIFIED_TPL>::value, FUNCTOR> ForEachImpl;
285
286 ForEachImpl()(std::forward<TPL>(t), std::forward<FUNCTOR>(f));
287 }
288
289 } // namespace vigra
218290
219291 /** \page Utilities Utilities
220292 Basic helper functionality needed throughout.
307307
308308 } // namespace detail
309309
310 /** \addtogroup MultiArrayDistanceTransform
310 /** \addtogroup DistanceTransform
311311 */
312312 //@{
313313
386386 transformMultiArray( source, dest,
387387 ifThenElse( Arg1() != Param(0), Param(maxDist), Param(rzero) ));
388388
389 for(int d = 0; d < N; ++d )
389 for(unsigned d = 0; d < N; ++d )
390390 {
391391 Navigator nav( dest.traverser_begin(), dest.shape(), d);
392392 for( ; nav.hasMore(); nav++ )
495495
496496 T2 maxDist(2*sum(labels.shape()*pixelPitch));
497497 dest = maxDist;
498 for( int d = 0; d < N; ++d )
498 for( unsigned d = 0; d < N; ++d )
499499 {
500500 LabelNavigator lnav( labels.traverser_begin(), labels.shape(), d );
501501 DNavigator dnav( dest.traverser_begin(), dest.shape(), d );
8989 class Shape, class Visitor>
9090 static void exec(const MultiArrayView<0, Data, S1>& u_data, MultiArrayView<0, Label, S2> u_labels,
9191 const MultiArrayView<0, Data, S1>& v_data, MultiArrayView<0, Label, S2> v_labels,
92 const Shape& block_difference, NeighborhoodType neighborhood, Visitor visitor)
92 const Shape& block_difference, NeighborhoodType, Visitor visitor)
9393 {
9494 visitor(u_data(0), u_labels(0), v_data(0), v_labels(0), block_difference);
9595 }
193193 static unsigned int b[];
194194 static unsigned int c[];
195195 static Direction bd[43][6];
196 static Direction bc[43][3];
196 static Direction bc[43][4];
197197 static Diff3D d[];
198198 static Diff3D rd[][6];
199199 };
415415 };
416416
417417 template <int DUMMY>
418 Direction NeighborCode3D::StaticData<DUMMY>::bc[43][3] = {
419 { InFront, North, West}, // 0 - NotAtBorder
420 { InFront, North, West}, // 1 - AtRightBorder
421 { InFront, North, Error}, // 2 - AtLeftBorder
422 { Error, Error, Error},
423 { InFront, West, Error}, // 4 - AtTopBorder
424 { InFront, West, Error}, // 5 - AtTopRightBorder
425 { InFront, Error,Error}, // 6 - AtTopLeftBorder
426 { Error, Error, Error},
427 { InFront, North, West}, // 8 - AtBottomBorder
428 { InFront, North, West}, // 9 - AtBottomRightBorder
429 { InFront, North, Error}, //10- AtBottomLeftBorder
430 { Error, Error, Error},
431 { Error, Error, Error},
432 { Error, Error, Error},
433 { Error, Error, Error},
434 { Error, Error, Error},
435 { North, West, Error}, //16 - AtFrontBorder
436 { North, West, Error}, //17 - AtFrontRightBorder
437 { North, Error, Error}, //18 - AtFrontLeftBorder
438 { Error, Error, Error},
439 { West, Error, Error}, //20 - AtTopFrontBorder
440 { West, Error, Error}, //21 - AtTopRightFrontBorder
441 { Error, Error, Error}, //22 - AtTopLeftFrontBorder
442 { Error, Error, Error},
443 { North, West, Error}, //24 - AtBottomFrontBorder
444 { North, West, Error}, //25 - AtBottomRightFrontBorder
445 { North, Error, Error}, //26 - AtBottomLeftFrontBorder
446 { Error, Error, Error},
447 { Error, Error, Error},
448 { Error, Error, Error},
449 { Error, Error, Error},
450 { Error, Error, Error},
451 { InFront, North, West}, //32 - AtRearBorder
452 { InFront, North, West}, //33 - AtRearRightBorder
453 { InFront, North, Error}, //34 - AtRearLeftBorder
454 { Error, Error, Error},
455 { InFront, West, Error}, //36 - AtTopRearBorder
456 { InFront, West, Error}, //37 - AtTopRightRearBorder
457 { InFront, Error, Error}, //38 - AtTopLeftRearBorder
458 { Error, Error, Error},
459 { InFront, North, West}, //40 - AtBottomRearBorder
460 { InFront, North, West}, //41 - AtBottomRightRearBorder
461 { InFront, North, Error} //42 - AtBottomLeftRearBorder
418 Direction NeighborCode3D::StaticData<DUMMY>::bc[43][4] = {
419 { InFront, North, West, Error}, // 0 - NotAtBorder
420 { InFront, North, West, Error}, // 1 - AtRightBorder
421 { InFront, North, Error, Error}, // 2 - AtLeftBorder
422 { Error, Error, Error, Error},
423 { InFront, West, Error, Error}, // 4 - AtTopBorder
424 { InFront, West, Error, Error}, // 5 - AtTopRightBorder
425 { InFront, Error,Error, Error}, // 6 - AtTopLeftBorder
426 { Error, Error, Error, Error},
427 { InFront, North, West, Error}, // 8 - AtBottomBorder
428 { InFront, North, West, Error}, // 9 - AtBottomRightBorder
429 { InFront, North, Error, Error}, //10- AtBottomLeftBorder
430 { Error, Error, Error, Error},
431 { Error, Error, Error, Error},
432 { Error, Error, Error, Error},
433 { Error, Error, Error, Error},
434 { Error, Error, Error, Error},
435 { North, West, Error, Error}, //16 - AtFrontBorder
436 { North, West, Error, Error}, //17 - AtFrontRightBorder
437 { North, Error, Error, Error}, //18 - AtFrontLeftBorder
438 { Error, Error, Error, Error},
439 { West, Error, Error, Error}, //20 - AtTopFrontBorder
440 { West, Error, Error, Error}, //21 - AtTopRightFrontBorder
441 { Error, Error, Error, Error}, //22 - AtTopLeftFrontBorder
442 { Error, Error, Error, Error},
443 { North, West, Error, Error}, //24 - AtBottomFrontBorder
444 { North, West, Error, Error}, //25 - AtBottomRightFrontBorder
445 { North, Error, Error, Error}, //26 - AtBottomLeftFrontBorder
446 { Error, Error, Error, Error},
447 { Error, Error, Error, Error},
448 { Error, Error, Error, Error},
449 { Error, Error, Error, Error},
450 { Error, Error, Error, Error},
451 { InFront, North, West, Error}, //32 - AtRearBorder
452 { InFront, North, West, Error}, //33 - AtRearRightBorder
453 { InFront, North, Error, Error}, //34 - AtRearLeftBorder
454 { Error, Error, Error, Error},
455 { InFront, West, Error, Error}, //36 - AtTopRearBorder
456 { InFront, West, Error, Error}, //37 - AtTopRightRearBorder
457 { InFront, Error, Error, Error}, //38 - AtTopLeftRearBorder
458 { Error, Error, Error, Error},
459 { InFront, North, West, Error}, //40 - AtBottomRearBorder
460 { InFront, North, West, Error}, //41 - AtBottomRightRearBorder
461 { InFront, North, Error, Error} //42 - AtBottomLeftRearBorder
462462 };
463463
464464 template <int DUMMY>
305305 }
306306 }
307307
308 /** \addtogroup SeededRegionGrowing Region Segmentation Algorithms
309 Region growing, watersheds, and voronoi tesselation
308 /** \addtogroup Superpixels
310309 */
311310 //@{
312311
316315
317316 <b>\#include</b> \<vigra/watersheds.hxx\><br>
318317 Namespace: vigra
319
318
320319 \code
321320 MultiArray<2, float> boundary_indicator(w, h);
322321 MultiArray<2, int> seeds(boundary_indicator.shape());
323
322
324323 // detect all minima in 'boundary_indicator' that are below gray level 22
325324 generateWatershedSeeds(boundary_indicator, seeds,
326325 SeedOptions().minima().threshold(22.0));
330329 {
331330 public:
332331 enum DetectMinima { LevelSets, Minima, ExtendedMinima, Unspecified };
333
332
334333 double thresh;
335334 DetectMinima mini;
336
335
337336 /**\brief Construct default options object.
338337 *
339338 Defaults are: detect minima without thresholding (i.e. all minima).
342341 : thresh(NumericTraits<double>::max()),
343342 mini(Minima)
344343 {}
345
344
346345 /** Generate seeds at minima.
347
346
348347 Default: true
349348 */
350349 SeedOptions & minima()
352351 mini = Minima;
353352 return *this;
354353 }
355
354
356355 /** Generate seeds at minima and minimal plateaus.
357
356
358357 Default: false
359358 */
360359 SeedOptions & extendedMinima()
362361 mini = ExtendedMinima;
363362 return *this;
364363 }
365
364
366365 /** Generate seeds as level sets.
367
366
368367 Note that you must also set a threshold to define which level set is to be used.<br>
369368 Default: false
370369 */
373372 mini = LevelSets;
374373 return *this;
375374 }
376
375
377376 /** Generate seeds as level sets at given threshold.
378
377
379378 Equivalent to <tt>SeedOptions().levelSet().threshold(threshold)</tt><br>
380379 Default: false
381380 */
385384 thresh = threshold;
386385 return *this;
387386 }
388
387
389388 /** Set threshold.
390
389
391390 The threshold will be used by both the minima and level set variants
392391 of seed generation.<br>
393392 Default: no thresholding
397396 thresh = threshold;
398397 return *this;
399398 }
400
399
401400 // check whether the threshold has been set for the target type T
402401 template <class T>
403402 bool thresholdIsValid() const
404403 {
405404 return thresh < double(NumericTraits<T>::max());
406405 }
407
406
408407 // indicate that this option object is invalid (for internal use in watersheds)
409408 SeedOptions & unspecified()
410409 {
422421 looking for local minima (possibly including minimal plateaus) of the boundaryness,
423422 of by looking at level sets (i.e. regions where the boundaryness is below a threshold).
424423 Both methods can also be combined, so that only minima below a threshold are returned.
425 The particular seeding strategy is specified by the <tt>options</tt> object
424 The particular seeding strategy is specified by the <tt>options</tt> object
426425 (see \ref SeedOptions).
427
426
428427 The pixel type of the input image must be <tt>LessThanComparable</tt>.
429428 The pixel type of the output image must be large enough to hold the labels for all seeds.
430429 (typically, you will use <tt>UInt32</tt>). The function will label seeds by consecutive integers
431430 (starting from 1) and returns the largest label it used.
432
433 Pass \ref vigra::NeighborhoodType "IndirectNeighborhood" or \ref vigra::NeighborhoodType "DirectNeighborhood"
434 (first form of the function)
435 or \ref vigra::EightNeighborCode or \ref vigra::FourNeighborCode (second and third forms) to determine the
436 neighborhood where pixel values are compared.
431
432 Pass \ref vigra::NeighborhoodType "IndirectNeighborhood" or \ref vigra::NeighborhoodType "DirectNeighborhood"
433 (first form of the function)
434 or \ref vigra::EightNeighborCode or \ref vigra::FourNeighborCode (second and third forms) to determine the
435 neighborhood where pixel values are compared.
437436
438437 <b> Declarations:</b>
439438
459458 class Neighborhood = EightNeighborCode>
460459 unsigned int
461460 generateWatershedSeeds(SrcIterator upperlefts, SrcIterator lowerrights, SrcAccessor sa,
462 DestIterator upperleftd, DestAccessor da,
461 DestIterator upperleftd, DestAccessor da,
463462 Neighborhood neighborhood = EightNeighborCode(),
464463 SeedOptions const & options = SeedOptions());
465464 }
472471 class Neighborhood = EightNeighborCode>
473472 unsigned int
474473 generateWatershedSeeds(triple<SrcIterator, SrcIterator, SrcAccessor> src,
475 pair<DestIterator, DestAccessor> dest,
474 pair<DestIterator, DestAccessor> dest,
476475 Neighborhood neighborhood = EightNeighborCode(),
477476 SeedOptions const & options = SeedOptions());
478477 }
494493 class Neighborhood>
495494 unsigned int
496495 generateWatershedSeeds(SrcIterator upperlefts, SrcIterator lowerrights, SrcAccessor sa,
497 DestIterator upperleftd, DestAccessor da,
498 Neighborhood neighborhood,
496 DestIterator upperleftd, DestAccessor da,
497 Neighborhood,
499498 SeedOptions const & options = SeedOptions())
500499 {
501500 using namespace functor;
502501 typedef typename SrcAccessor::value_type SrcType;
503
504 vigra_precondition(options.mini != SeedOptions::LevelSets ||
502
503 vigra_precondition(options.mini != SeedOptions::LevelSets ||
505504 options.thresholdIsValid<SrcType>(),
506505 "generateWatershedSeeds(): SeedOptions.levelSets() must be specified with threshold.");
507
506
508507 Diff2D shape = lowerrights - upperlefts;
509508 BImage seeds(shape);
510
509
511510 if(options.mini == SeedOptions::LevelSets)
512511 {
513512 transformImage(srcIterRange(upperlefts, lowerrights, sa),
523522 .allowPlateaus(options.mini == SeedOptions::ExtendedMinima);
524523 if(options.thresholdIsValid<SrcType>())
525524 lm_options.threshold(options.thresh);
526
525
527526 localMinima(srcIterRange(upperlefts, lowerrights, sa), destImage(seeds),
528527 lm_options);
529528 }
530
531 return labelImageWithBackground(srcImageRange(seeds), destIter(upperleftd, da),
529
530 return labelImageWithBackground(srcImageRange(seeds), destIter(upperleftd, da),
532531 Neighborhood::DirectionCount == 8, 0);
533532 }
534533
536535 class DestIterator, class DestAccessor>
537536 inline unsigned int
538537 generateWatershedSeeds(SrcIterator upperlefts, SrcIterator lowerrights, SrcAccessor sa,
539 DestIterator upperleftd, DestAccessor da,
538 DestIterator upperleftd, DestAccessor da,
540539 SeedOptions const & options = SeedOptions())
541540 {
542 return generateWatershedSeeds(upperlefts, lowerrights, sa, upperleftd, da,
541 return generateWatershedSeeds(upperlefts, lowerrights, sa, upperleftd, da,
543542 EightNeighborCode(), options);
544543 }
545544
548547 class Neighborhood>
549548 inline unsigned int
550549 generateWatershedSeeds(triple<SrcIterator, SrcIterator, SrcAccessor> src,
551 pair<DestIterator, DestAccessor> dest,
550 pair<DestIterator, DestAccessor> dest,
552551 Neighborhood neighborhood,
553552 SeedOptions const & options = SeedOptions())
554553 {
555554 return generateWatershedSeeds(src.first, src.second, src.third,
556 dest.first, dest.second,
555 dest.first, dest.second,
557556 neighborhood, options);
558557 }
559558
561560 class DestIterator, class DestAccessor>
562561 inline unsigned int
563562 generateWatershedSeeds(triple<SrcIterator, SrcIterator, SrcAccessor> src,
564 pair<DestIterator, DestAccessor> dest,
563 pair<DestIterator, DestAccessor> dest,
565564 SeedOptions const & options = SeedOptions())
566565 {
567566 return generateWatershedSeeds(src.first, src.second, src.third,
568 dest.first, dest.second,
567 dest.first, dest.second,
569568 EightNeighborCode(), options);
570569 }
571570
579578
580579 Note: This function is largely obsolete, \ref watershedsMultiArray() should be
581580 preferred unless top speed is required.
582
581
583582 This function implements the union-find version of the watershed algorithms
584583 described as algorithm 4.7 in
585584
607606 class Neighborhood>
608607 unsigned int
609608 watershedsUnionFind(MultiArrayView<2, T1, S1> const & src,
610 MultiArrayView<2, T2, S2> dest,
609 MultiArrayView<2, T2, S2> dest,
611610 Neighborhood neighborhood = EightNeighborCode());
612611 }
613612 \endcode
701700 class Neighborhood>
702701 unsigned int
703702 watershedsUnionFind(SrcIterator upperlefts, SrcIterator lowerrights, SrcAccessor sa,
704 DestIterator upperleftd, DestAccessor da,
703 DestIterator upperleftd, DestAccessor da,
705704 Neighborhood neighborhood)
706705 {
707706 SImage orientationImage(lowerrights - upperlefts);
728727 watershedsUnionFind(triple<SrcIterator, SrcIterator, SrcAccessor> src,
729728 pair<DestIterator, DestAccessor> dest, Neighborhood neighborhood)
730729 {
731 return watershedsUnionFind(src.first, src.second, src.third,
730 return watershedsUnionFind(src.first, src.second, src.third,
732731 dest.first, dest.second, neighborhood);
733732 }
734733
738737 watershedsUnionFind(triple<SrcIterator, SrcIterator, SrcAccessor> src,
739738 pair<DestIterator, DestAccessor> dest)
740739 {
741 return watershedsUnionFind(src.first, src.second, src.third,
740 return watershedsUnionFind(src.first, src.second, src.third,
742741 dest.first, dest.second);
743742 }
744743
749748 watershedsUnionFind(MultiArrayView<2, T1, S1> const & src,
750749 MultiArrayView<2, T2, S2> dest, Neighborhood neighborhood)
751750 {
752 return watershedsUnionFind(srcImageRange(src),
751 return watershedsUnionFind(srcImageRange(src),
753752 destImage(dest), neighborhood);
754753 }
755754
761760 {
762761 vigra_precondition(src.shape() == dest.shape(),
763762 "watershedsUnionFind(): shape mismatch between input and output.");
764 return watershedsUnionFind(srcImageRange(src),
763 return watershedsUnionFind(srcImageRange(src),
765764 destImage(dest));
766765 }
767766
775774 {
776775 public:
777776 enum Method { RegionGrowing, UnionFind };
778
777
779778 double max_cost, bias;
780779 SRGType terminate;
781780 Method method;
782781 unsigned int biased_label, bucket_count;
783782 SeedOptions seed_options;
784
785
786
783
784
785
787786 /** \brief Create options object with default settings.
788787
789788 Defaults are: perform complete grow (all pixels are assigned to regions),
790 use standard algorithm, assume that the destination image already contains
789 use standard algorithm, assume that the destination image already contains
791790 region seeds.
792791 */
793792 WatershedOptions()
798797 biased_label(0),
799798 bucket_count(0),
800799 seed_options(SeedOptions().unspecified())
801 {}
802
800 {}
801
803802 /** \brief Perform complete grow.
804803
805804 That is, all pixels are assigned to regions, without explicit contours
806805 in between.
807
806
808807 Default: true
809808 */
810809 WatershedOptions & completeGrow()
812811 terminate = SRGType(CompleteGrow | (terminate & StopAtThreshold));
813812 return *this;
814813 }
815
814
816815 /** \brief Keep one-pixel wide contour between regions.
817
816
818817 Note that this option is unsupported by the turbo algorithm.
819818
820819 Default: false
824823 terminate = SRGType(KeepContours | (terminate & StopAtThreshold));
825824 return *this;
826825 }
827
826
828827 /** \brief Set \ref SRGType explicitly.
829
828
830829 Default: CompleteGrow
831830 */
832831 WatershedOptions & srgType(SRGType type)
834833 terminate = type;
835834 return *this;
836835 }
837
836
838837 /** \brief Stop region growing when the boundaryness exceeds the threshold.
839
838
840839 This option may be combined with completeGrow() and keepContours().
841
840
842841 Default: no early stopping
843842 */
844843 WatershedOptions & stopAtThreshold(double threshold)
847846 max_cost = threshold;
848847 return *this;
849848 }
850
849
851850 /** \brief Use a simpler, but faster region growing algorithm.
852
851
853852 The algorithm internally uses a \ref BucketQueue to determine
854853 the processing order of the pixels. This is only useful,
855854 when the input boundary indicator image contains integers
856855 in the range <tt>[0, ..., bucket_count-1]</tt>. Since
857856 these boundary indicators are typically represented as
858857 UInt8 images, the default <tt>bucket_count</tt> is 256.
859
858
860859 Default: don't use the turbo algorithm
861860 */
862861 WatershedOptions & turboAlgorithm(unsigned int bucket_count = 256)
865864 method = RegionGrowing;
866865 return *this;
867866 }
868
867
869868 /** \brief Specify seed options.
870
869
871870 In this case, watershedsRegionGrowing() assumes that the destination
872 image does not yet contain seeds. It will therefore call
871 image does not yet contain seeds. It will therefore call
873872 generateWatershedSeeds() and pass on the seed options.
874
873
875874 Default: don't compute seeds (i.e. assume that destination image already
876875 contains seeds).
877876 */
880879 seed_options = s;
881880 return *this;
882881 }
883
882
884883 /** \brief Bias the cost of the specified region by the given factor.
885
884
886885 In certain applications, one region (typically the background) should
887886 be preferred in region growing. This is most easily achieved
888887 by adjusting the assignment cost for that region as <tt>factor*cost</tt>,
889 with a factor slightly below 1.
890
888 with a factor slightly below 1. (Accordingly, factors above 1 would
889 correspond to a discouragement of the bias label.)
890
891891 Default: don't bias any region.
892892 */
893893 WatershedOptions & biasLabel(unsigned int label, double factor)
896896 bias = factor;
897897 return *this;
898898 }
899
899
900900 /** \brief Specify the algorithm to be used.
901
901
902902 Possible values are <tt>WatershedOptions::RegionGrowing</tt> and
903903 <tt>WatershedOptions::UnionFind</tt>. The latter algorithm is fastest
904904 but doesn't support seeds and any of the other options.
905
905
906906 Default: RegionGrowing.
907907 */
908908 WatershedOptions & useMethod(Method method)
910910 this->method = method;
911911 return *this;
912912 }
913
913
914914 /** \brief Use region-growing watershed.
915
916 Use this method when you want to specify seeds explicitly (seeded watersheds)
915
916 Use this method when you want to specify seeds explicitly (seeded watersheds)
917917 or use any of the other options.
918
918
919919 Default: true.
920920 */
921921 WatershedOptions & regionGrowing()
923923 method = RegionGrowing;
924924 return *this;
925925 }
926
926
927927 /** \brief Use union-find watershed.
928
929 This is the fasted method, but it doesn't support seeds and any of the other
928
929 This is the fasted method, but it doesn't support seeds and any of the other
930930 options (they will be silently ignored).
931
931
932932 Default: false.
933933 */
934934 WatershedOptions & unionFind()
944944 class WatershedStatistics
945945 {
946946 public:
947
947
948948 typedef SeedRgDirectValueFunctor<CostType> value_type;
949949 typedef value_type & reference;
950950 typedef value_type const & const_reference;
951
951
952952 typedef CostType first_argument_type;
953953 typedef LabelType second_argument_type;
954954 typedef LabelType argument_type;
955
955
956956 WatershedStatistics()
957957 {}
958958
965965 /** update regions statistics (do nothing in the watershed algorithm)
966966 */
967967 template <class T1, class T2>
968 void operator()(first_argument_type const &, second_argument_type const &)
968 void operator()(first_argument_type const &, second_argument_type const &)
969969 {}
970970
971971 /** ask for maximal index (label) allowed
980980
981981 /** read the statistics functor for a region via its label
982982 */
983 const_reference operator[](argument_type label) const
983 const_reference operator[](argument_type) const
984984 { return stats; }
985985
986986 /** access the statistics functor for a region via its label
987987 */
988 reference operator[](argument_type label)
988 reference operator[](argument_type)
989989 { return stats; }
990990
991991 value_type stats;
10091009 /* the return type of the cost() function
10101010 */
10111011 typedef Value cost_type;
1012
1012
10131013 SeedRgBiasedValueFunctor(double b = 1.0)
10141014 : bias(b)
10151015 {}
10301030 class BiasedWatershedStatistics
10311031 {
10321032 public:
1033
1033
10341034 typedef SeedRgBiasedValueFunctor<CostType> value_type;
10351035 typedef value_type & reference;
10361036 typedef value_type const & const_reference;
1037
1037
10381038 typedef CostType first_argument_type;
10391039 typedef LabelType second_argument_type;
10401040 typedef LabelType argument_type;
1041
1041
10421042 BiasedWatershedStatistics(LabelType biasedLabel, double bias)
10431043 : biased_label(biasedLabel),
10441044 biased_stats(bias)
10531053 /** update regions statistics (do nothing in the watershed algorithm)
10541054 */
10551055 template <class T1, class T2>
1056 void operator()(first_argument_type const &, second_argument_type const &)
1056 void operator()(first_argument_type const &, second_argument_type const &)
10571057 {}
10581058
10591059 /** ask for maximal index (label) allowed
10691069 /** read the statistics functor for a region via its label
10701070 */
10711071 const_reference operator[](argument_type label) const
1072 {
1072 {
10731073 return (label == biased_label)
10741074 ? biased_stats
1075 : stats;
1075 : stats;
10761076 }
10771077
10781078 /** access the statistics functor for a region via its label
10791079 */
10801080 reference operator[](argument_type label)
1081 {
1081 {
10821082 return (label == biased_label)
10831083 ? biased_stats
1084 : stats;
1084 : stats;
10851085 }
10861086
10871087 LabelType biased_label;
10941094
10951095 Note: This function is largely obsolete, \ref watershedsMultiArray() should be
10961096 preferred unless top speed is required.
1097
1097
10981098 This function implements variants of the watershed algorithm
10991099 described in
11001100
11061106 designating membership of each point in one of the regions. Plateaus in the boundary
11071107 indicator (i.e. regions of constant gray value) are handled via a Euclidean distance
11081108 transform by default.
1109
1110 By default, the destination image is assumed to hold seeds for a seeded watershed
1111 transform. Seeds may, for example, be created by means of generateWatershedSeeds().
1109
1110 By default, the destination image is assumed to hold seeds for a seeded watershed
1111 transform. Seeds may, for example, be created by means of generateWatershedSeeds().
11121112 Note that the seeds will be overridden with the final watershed segmentation.
1113
1114 Alternatively, you may provide \ref SeedOptions in order to instruct
1113
1114 Alternatively, you may provide \ref SeedOptions in order to instruct
11151115 watershedsRegionGrowing() to generate its own seeds (it will call generateWatershedSeeds()
11161116 internally). In that case, the destination image should be zero-initialized.
1117
1118 You can specify the neighborhood system to be used by passing \ref FourNeighborCode
1117
1118 You can specify the neighborhood system to be used by passing \ref FourNeighborCode
11191119 or \ref EightNeighborCode (default).
1120
1120
11211121 Further options to be specified via \ref WatershedOptions are:
1122
1122
11231123 <ul>
1124 <li> Whether to keep a 1-pixel-wide contour (with label 0) between regions or
1124 <li> Whether to keep a 1-pixel-wide contour (with label 0) between regions or
11251125 perform complete grow (i.e. all pixels are assigned to a region).
11261126 <li> Whether to stop growing when the boundaryness exceeds a threshold (remaining
11271127 pixels keep label 0).
11281128 <li> Whether to use a faster, but less powerful algorithm ("turbo algorithm"). It
11291129 is faster because it orders pixels by means of a \ref BucketQueue (therefore,
1130 the boundary indicator must contain integers in the range
1130 the boundary indicator must contain integers in the range
11311131 <tt>[0, ..., bucket_count-1]</tt>, where <tt>bucket_count</tt> is specified in
11321132 the options object), it only supports complete growing (no contour between regions
11331133 is possible), and it handles plateaus in a simplistic way. It also saves some
11341134 memory because it allocates less temporary storage.
1135 <li> Whether one region (label) is to be preferred or discouraged by biasing its cost
1135 <li> Whether one region (label) is to be preferred or discouraged by biasing its cost
11361136 with a given factor (smaller than 1 for preference, larger than 1 for discouragement).
11371137 </ul>
11381138
11391139 Note that VIGRA provides an alternative implementation of the watershed transform via
1140 \ref watershedsUnionFind().
1140 \ref watershedsUnionFind().
11411141
11421142 <b> Declarations:</b>
11431143
11491149 class Neighborhood = EightNeighborCode>
11501150 unsigned int
11511151 watershedsRegionGrowing(MultiArrayView<2, T1, S1> const & src,
1152 MultiArrayView<2, T2, S2> dest,
1152 MultiArrayView<2, T2, S2> dest,
11531153 Neighborhood neighborhood = EightNeighborCode(),
11541154 WatershedOptions const & options = WatershedOptions());
11551155
11571157 class T2, class S2>
11581158 unsigned int
11591159 watershedsRegionGrowing(MultiArrayView<2, T1, S1> const & src,
1160 MultiArrayView<2, T2, S2> dest,
1160 MultiArrayView<2, T2, S2> dest,
11611161 WatershedOptions const & options = WatershedOptions());
11621162 }
11631163 \endcode
11711171 class Neighborhood = EightNeighborCode>
11721172 unsigned int
11731173 watershedsRegionGrowing(SrcIterator upperlefts, SrcIterator lowerrights, SrcAccessor sa,
1174 DestIterator upperleftd, DestAccessor da,
1174 DestIterator upperleftd, DestAccessor da,
11751175 Neighborhood neighborhood = EightNeighborCode(),
11761176 WatershedOptions const & options = WatershedOptions());
11771177
11791179 class DestIterator, class DestAccessor>
11801180 unsigned int
11811181 watershedsRegionGrowing(SrcIterator upperlefts, SrcIterator lowerrights, SrcAccessor sa,
1182 DestIterator upperleftd, DestAccessor da,
1182 DestIterator upperleftd, DestAccessor da,
11831183 WatershedOptions const & options = WatershedOptions());
11841184 }
11851185 \endcode
11911191 class Neighborhood = EightNeighborCode>
11921192 unsigned int
11931193 watershedsRegionGrowing(triple<SrcIterator, SrcIterator, SrcAccessor> src,
1194 pair<DestIterator, DestAccessor> dest,
1194 pair<DestIterator, DestAccessor> dest,
11951195 Neighborhood neighborhood = EightNeighborCode(),
11961196 WatershedOptions const & options = WatershedOptions());
1197
1197
11981198 template <class SrcIterator, class SrcAccessor,
11991199 class DestIterator, class DestAccessor>
12001200 unsigned int
12011201 watershedsRegionGrowing(triple<SrcIterator, SrcIterator, SrcAccessor> src,
1202 pair<DestIterator, DestAccessor> dest,
1202 pair<DestIterator, DestAccessor> dest,
12031203 WatershedOptions const & options = WatershedOptions());
12041204 }
12051205 \endcode
12061206 \deprecatedEnd
1207
1207
12081208 <b> Usage:</b>
12091209
12101210 <b>\#include</b> \<vigra/watersheds.hxx\><br>
12151215 \code
12161216 MultiArray<2, float> src(w, h);
12171217 ... // read input data
1218
1218
12191219 // compute gradient magnitude at scale 1.0 as a boundary indicator
12201220 MultiArray<2, float> gradMag(w, h);
12211221 gaussianGradientMagnitude(src, gradMag, 1.0);
12251225 // the pixel type of the destination image must be large enough to hold
12261226 // numbers up to 'max_region_label' to prevent overflow
12271227 MultiArray<2, unsigned int> labeling(w, h);
1228
1228
12291229 // call watershed algorithm for 4-neighborhood, leave a 1-pixel boundary between regions,
12301230 // and autogenerate seeds from all gradient minima where the magnitude is below 2.0
1231 unsigned int max_region_label =
1231 unsigned int max_region_label =
12321232 watershedsRegionGrowing(gradMag, labeling,
12331233 FourNeighborCode(),
12341234 WatershedOptions().keepContours()
12351235 .seedOptions(SeedOptions().minima().threshold(2.0)));
12361236 }
1237
1237
12381238 // example 2
12391239 {
12401240 MultiArray<2, unsigned int> labeling(w, h);
1241
1242 // compute seeds beforehand (use connected components of all pixels
1241
1242 // compute seeds beforehand (use connected components of all pixels
12431243 // where the gradient is below 4.0)
1244 unsigned int max_region_label =
1244 unsigned int max_region_label =
12451245 generateWatershedSeeds(gradMag, labeling,
12461246 SeedOptions().levelSets(4.0));
1247
1247
12481248 // quantize the gradient image to 256 gray levels
12491249 MultiArray<2, unsigned char> gradMag256(w, h);
1250 FindMinMax<float> minmax;
1250 FindMinMax<float> minmax;
12511251 inspectImage(gradMag, minmax); // find original range
12521252 transformImage(gradMag, gradMag256,
12531253 linearRangeMapping(minmax, 0, 255));
1254
1254
12551255 // call the turbo algorithm with 256 bins, using 8-neighborhood
12561256 watershedsRegionGrowing(gradMag256, labeling,
12571257 WatershedOptions().turboAlgorithm(256));
12581258 }
1259
1259
12601260 // example 3
12611261 {
12621262 MultiArray<2, unsigned int> labeling(w, h);
1263
1263
12641264 .. // get seeds from somewhere, e.g. an interactive labeling program,
12651265 // make sure that label 1 corresponds to the background
1266
1266
12671267 // bias the watershed algorithm so that the background is preferred
12681268 // by reducing the cost for label 1 to 90%
12691269 watershedsRegionGrowing(gradMag, labeling,
12751275 \code
12761276 vigra::BImage src(w, h);
12771277 ... // read input data
1278
1278
12791279 // compute gradient magnitude at scale 1.0 as a boundary indicator
12801280 vigra::FImage gradMag(w, h);
12811281 gaussianGradientMagnitude(srcImageRange(src), destImage(gradMag), 1.0);
12851285 // the pixel type of the destination image must be large enough to hold
12861286 // numbers up to 'max_region_label' to prevent overflow
12871287 vigra::IImage labeling(w, h);
1288
1288
12891289 // call watershed algorithm for 4-neighborhood, leave a 1-pixel boundary between regions,
12901290 // and autogenerate seeds from all gradient minima where the magnitude is below 2.0
1291 unsigned int max_region_label =
1291 unsigned int max_region_label =
12921292 watershedsRegionGrowing(srcImageRange(gradMag), destImage(labeling),
12931293 FourNeighborCode(),
12941294 WatershedOptions().keepContours()
12951295 .seedOptions(SeedOptions().minima().threshold(2.0)));
12961296 }
1297
1297
12981298 // example 2
12991299 {
13001300 vigra::IImage labeling(w, h);
1301
1302 // compute seeds beforehand (use connected components of all pixels
1301
1302 // compute seeds beforehand (use connected components of all pixels
13031303 // where the gradient is below 4.0)
1304 unsigned int max_region_label =
1304 unsigned int max_region_label =
13051305 generateWatershedSeeds(srcImageRange(gradMag), destImage(labeling),
13061306 SeedOptions().levelSets(4.0));
1307
1307
13081308 // quantize the gradient image to 256 gray levels
13091309 vigra::BImage gradMag256(w, h);
1310 vigra::FindMinMax<float> minmax;
1310 vigra::FindMinMax<float> minmax;
13111311 inspectImage(srcImageRange(gradMag), minmax); // find original range
13121312 transformImage(srcImageRange(gradMag), destImage(gradMag256),
13131313 linearRangeMapping(minmax, 0, 255));
1314
1314
13151315 // call the turbo algorithm with 256 bins, using 8-neighborhood
13161316 watershedsRegionGrowing(srcImageRange(gradMag256), destImage(labeling),
13171317 WatershedOptions().turboAlgorithm(256));
13181318 }
1319
1319
13201320 // example 3
13211321 {
13221322 vigra::IImage labeling(w, h);
1323
1323
13241324 .. // get seeds from somewhere, e.g. an interactive labeling program,
13251325 // make sure that label 1 corresponds to the background
1326
1326
13271327 // bias the watershed algorithm so that the background is preferred
13281328 // by reducing the cost for label 1 to 90%
13291329 watershedsRegionGrowing(srcImageRange(gradMag), destImage(labeling),
13541354 class Neighborhood>
13551355 unsigned int
13561356 watershedsRegionGrowing(SrcIterator upperlefts, SrcIterator lowerrights, SrcAccessor sa,
1357 DestIterator upperleftd, DestAccessor da,
1357 DestIterator upperleftd, DestAccessor da,
13581358 Neighborhood neighborhood,
13591359 WatershedOptions const & options = WatershedOptions())
13601360 {
1361 typedef typename SrcAccessor::value_type ValueType;
1362 typedef typename DestAccessor::value_type LabelType;
1363
1361 typedef typename SrcAccessor::value_type ValueType;
1362 typedef typename DestAccessor::value_type LabelType;
1363
13641364 unsigned int max_region_label = 0;
1365
1365
13661366 if(options.seed_options.mini != SeedOptions::Unspecified)
13671367 {
13681368 // we are supposed to compute seeds
1369 max_region_label =
1370 generateWatershedSeeds(srcIterRange(upperlefts, lowerrights, sa),
1369 max_region_label =
1370 generateWatershedSeeds(srcIterRange(upperlefts, lowerrights, sa),
13711371 destIter(upperleftd, da),
13721372 neighborhood, options.seed_options);
13731373 }
1374
1374
13751375 if(options.biased_label != 0)
13761376 {
13771377 // create a statistics functor for biased region growing
1378 detail::BiasedWatershedStatistics<ValueType, LabelType>
1378 detail::BiasedWatershedStatistics<ValueType, LabelType>
13791379 regionstats(options.biased_label, options.bias);
13801380
13811381 // perform region growing, starting from the seeds computed above
13821382 if(options.bucket_count == 0)
13831383 {
1384 max_region_label =
1384 max_region_label =
13851385 seededRegionGrowing(srcIterRange(upperlefts, lowerrights, sa),
13861386 srcIter(upperleftd, da),
1387 destIter(upperleftd, da),
1387 destIter(upperleftd, da),
13881388 regionstats, options.terminate, neighborhood, options.max_cost);
13891389 }
13901390 else
13911391 {
1392 max_region_label =
1392 max_region_label =
13931393 fastSeededRegionGrowing(srcIterRange(upperlefts, lowerrights, sa),
1394 destIter(upperleftd, da),
1395 regionstats, options.terminate,
1394 destIter(upperleftd, da),
1395 regionstats, options.terminate,
13961396 neighborhood, options.max_cost, options.bucket_count);
13971397 }
13981398 }
14041404 // perform region growing, starting from the seeds computed above
14051405 if(options.bucket_count == 0)
14061406 {
1407 max_region_label =
1407 max_region_label =
14081408 seededRegionGrowing(srcIterRange(upperlefts, lowerrights, sa),
14091409 srcIter(upperleftd, da),
1410 destIter(upperleftd, da),
1410 destIter(upperleftd, da),
14111411 regionstats, options.terminate, neighborhood, options.max_cost);
14121412 }
14131413 else
14141414 {
1415 max_region_label =
1415 max_region_label =
14161416 fastSeededRegionGrowing(srcIterRange(upperlefts, lowerrights, sa),
1417 destIter(upperleftd, da),
1418 regionstats, options.terminate,
1417 destIter(upperleftd, da),
1418 regionstats, options.terminate,
14191419 neighborhood, options.max_cost, options.bucket_count);
14201420 }
14211421 }
1422
1422
14231423 return max_region_label;
14241424 }
14251425
14271427 class DestIterator, class DestAccessor>
14281428 inline unsigned int
14291429 watershedsRegionGrowing(SrcIterator upperlefts, SrcIterator lowerrights, SrcAccessor sa,
1430 DestIterator upperleftd, DestAccessor da,
1430 DestIterator upperleftd, DestAccessor da,
14311431 WatershedOptions const & options = WatershedOptions())
14321432 {
14331433 return watershedsRegionGrowing(upperlefts, lowerrights, sa, upperleftd, da,
14391439 class Neighborhood>
14401440 inline unsigned int
14411441 watershedsRegionGrowing(triple<SrcIterator, SrcIterator, SrcAccessor> src,
1442 pair<DestIterator, DestAccessor> dest,
1442 pair<DestIterator, DestAccessor> dest,
14431443 Neighborhood neighborhood,
14441444 WatershedOptions const & options = WatershedOptions())
14451445 {
14461446 return watershedsRegionGrowing(src.first, src.second, src.third,
1447 dest.first, dest.second,
1447 dest.first, dest.second,
14481448 neighborhood, options);
14491449 }
14501450
14521452 class DestIterator, class DestAccessor>
14531453 inline unsigned int
14541454 watershedsRegionGrowing(triple<SrcIterator, SrcIterator, SrcAccessor> src,
1455 pair<DestIterator, DestAccessor> dest,
1455 pair<DestIterator, DestAccessor> dest,
14561456 WatershedOptions const & options = WatershedOptions())
14571457 {
14581458 return watershedsRegionGrowing(src.first, src.second, src.third,
1459 dest.first, dest.second,
1459 dest.first, dest.second,
14601460 EightNeighborCode(), options);
14611461 }
14621462
14651465 class Neighborhood>
14661466 inline unsigned int
14671467 watershedsRegionGrowing(MultiArrayView<2, T1, S1> const & src,
1468 MultiArrayView<2, T2, S2> dest,
1468 MultiArrayView<2, T2, S2> dest,
14691469 Neighborhood neighborhood,
14701470 WatershedOptions const & options = WatershedOptions())
14711471 {
14721472 vigra_precondition(src.shape() == dest.shape(),
14731473 "watershedsRegionGrowing(): shape mismatch between input and output.");
14741474 return watershedsRegionGrowing(srcImageRange(src),
1475 destImage(dest),
1475 destImage(dest),
14761476 neighborhood, options);
14771477 }
14781478
14801480 class T2, class S2>
14811481 inline unsigned int
14821482 watershedsRegionGrowing(MultiArrayView<2, T1, S1> const & src,
1483 MultiArrayView<2, T2, S2> dest,
1483 MultiArrayView<2, T2, S2> dest,
14841484 WatershedOptions const & options = WatershedOptions())
14851485 {
14861486 vigra_precondition(src.shape() == dest.shape(),
14871487 "watershedsRegionGrowing(): shape mismatch between input and output.");
14881488 return watershedsRegionGrowing(srcImageRange(src),
1489 destImage(dest),
1489 destImage(dest),
14901490 EightNeighborCode(), options);
14911491 }
14921492
2828 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
2929 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
3030 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
3232 /* */
3333 /************************************************************************/
3434
5353 //basically needed for iteration and border-checks
5454 int w = srcShape[0], h = srcShape[1], d = srcShape[2];
5555 int x,y,z, local_min_count=0;
56
56
5757 //declare and define Iterators for all three dims at src
5858 SrcIterator zs = s_Iter;
5959 SrcIterator ys(zs);
6060 SrcIterator xs(ys);
61
61
6262 //Declare Iterators for all three dims at dest
6363 DestIterator zd = d_Iter;
64
64
6565 for(z = 0; z != d; ++z, ++zs.dim2(), ++zd.dim2())
6666 {
6767 ys = zs;
6868 DestIterator yd(zd);
69
69
7070 for(y = 0; y != h; ++y, ++ys.dim1(), ++yd.dim1())
7171 {
7272 xs = ys;
8484 if(atBorder == NotAtBorder)
8585 {
8686 NeighborhoodCirculator<SrcIterator, Neighborhood3D> c(xs), cend(c);
87
87
8888 do {
8989 if(sa(c) < v)
90 {
90 {
9191 v = sa(c);
9292 o = c.directionBit();
9393 }
103103 RestrictedNeighborhoodCirculator<SrcIterator, Neighborhood3D> c(xs, atBorder), cend(c);
104104 do {
105105 if(sa(c) < v)
106 {
106 {
107107 v = sa(c);
108108 o = c.directionBit();
109109 }
114114 }
115115 while(++c != cend);
116116 }
117 if (o==0) local_min_count++;
117 if (o==0) local_min_count++;
118118 da.set(o, xd);
119119 }//end x-iteration
120120 }//end y-iteration
130130 Neighborhood3D)
131131 {
132132 typedef typename DestAccessor::value_type LabelType;
133
133
134134 //basically needed for iteration and border-checks
135135 int w = srcShape[0], h = srcShape[1], d = srcShape[2];
136136 int x,y,z;
137
137
138138 //declare and define Iterators for all three dims at src
139139 SrcIterator zs = s_Iter;
140140 DestIterator zd = d_Iter;
141
141
142142 // temporary image to store region labels
143143 UnionFindArray<LabelType> labels;
144
144
145145 // initialize the neighborhood traversers
146146 NeighborOffsetCirculator<Neighborhood3D> nc(Neighborhood3D::CausalFirst);
147147 NeighborOffsetCirculator<Neighborhood3D> nce(Neighborhood3D::CausalLast);
171171
172172 for(x = 0; x != w; ++x, ++xs.dim0(), ++xd.dim0())
173173 {
174 LabelType currentIndex = labels.nextFreeIndex(); // default: new region
174 LabelType currentIndex = labels.nextFreeIndex(); // default: new region
175175
176176 //check whether there is a special border treatment to be used or not
177177 AtVolumeBorder atBorder = isAtVolumeBorderCausal(x,y,z,w,h,d);
178
178
179179 //We are not at the border!
180180 if(atBorder == NotAtBorder)
181181 {
182182
183183 nc = NeighborOffsetCirculator<Neighborhood3D>(Neighborhood3D::CausalFirst);
184
184
185185 do
186 {
186 {
187187 // Direction of NTraversr Neighbor's direction bit is pointing
188188 // = Direction of voxel towards us?
189189 if((sa(xs) & nc.directionBit()) || (sa(xs,*nc) & nc.oppositeDirectionBit()))
200200 int j=0;
201201 while(nc.direction() != Neighborhood3D::Error)
202202 {
203 int dummy = x+(*nc)[0]; // prevents an apparently incorrect optimization in gcc 4.8
204 if (dummy<0)
205 {
206 std::cerr << "internal error " << dummy << std::endl;
207 }
208203 // Direction of NTraversr Neighbor's direction bit is pointing
209204 // = Direction of voxel towards us?
210205 if((sa(xs) & nc.directionBit()) || (sa(xs,*nc) & nc.oppositeDirectionBit()))
220215 }
221216
222217 unsigned int count = labels.makeContiguous();
223
218
224219 // pass 2: assign one label to each region (tree)
225220 // so that labels form a consecutive sequence 1, 2, ...
226221 zd = d_Iter;
242237 }
243238
244239
245 /** \addtogroup SeededRegionGrowing
240 /** \addtogroup Superpixels
246241 */
247242 //@{
248243
255250 /** \brief Region Segmentation by means of the watershed algorithm.
256251
257252 This function is deprecated, use \ref watershedsMultiArray() instead.
258
253
259254 <b> Declarations:</b>
260255
261256 \deprecatedAPI{watersheds3D}
284279
285280 use with 3D-Six-Neighborhood:
286281 \code
287 namespace vigra {
288
282 namespace vigra {
283
289284 template <class SrcIterator, class SrcAccessor,class SrcShape,
290285 class DestIterator, class DestAccessor>
291286 unsigned int watersheds3DSix(triple<SrcIterator, SrcShape, SrcAccessor> src,
292287 pair<DestIterator, DestAccessor> dest);
293
288
294289 }
295290 \endcode
296291
297292 use with 3D-TwentySix-Neighborhood:
298293 \code
299 namespace vigra {
300
294 namespace vigra {
295
301296 template <class SrcIterator, class SrcAccessor,class SrcShape,
302297 class DestIterator, class DestAccessor>
303298 unsigned int watersheds3DTwentySix(triple<SrcIterator, SrcShape, SrcAccessor> src,
304299 pair<DestIterator, DestAccessor> dest);
305
300
306301 }
307302 \endcode
308303 \deprecatedEnd
309
304
310305 This function implements the union-find version of the watershed algorithms
311306 as described in
312307
315310
316311 The source volume is a boundary indicator such as the gradient magnitude
317312 of the trace of the \ref boundaryTensor(). Local minima of the boundary indicator
318 are used as region seeds, and all other voxels are recursively assigned to the same
319 region as their lowest neighbor. Pass \ref vigra::NeighborCode3DSix or
320 \ref vigra::NeighborCode3DTwentySix to determine the neighborhood where voxel values
313 are used as region seeds, and all other voxels are recursively assigned to the same
314 region as their lowest neighbor. Pass \ref vigra::NeighborCode3DSix or
315 \ref vigra::NeighborCode3DTwentySix to determine the neighborhood where voxel values
321316 are compared. The voxel type of the input volume must be <tt>LessThanComparable</tt>.
322
317
323318 <b> Usage:</b>
324319
325320 <b>\#include</b> \<vigra/watersheds3D.hxx\><br>
329324
330325 \code
331326 Shape3 shape(w, h, d);
332
327
333328 MultiArray<3, float> src(shape), grad(shape);
334329 ...
335
330
336331 double scale = 1;
337332 gaussianGradientMagnitude(src, grad, scale);
338
333
339334 MultiArray<3, int> labels(shape);
340
335
341336 // find 6-connected regions
342337 int max_region_label = watersheds3DSix(grad, labels);
343338
360355 IntVolume::iterator temp_iter=temp.begin();
361356 for(DVolume::iterator iter=src.begin(); iter!=src.end(); ++iter, ++temp_iter)
362357 *iter = norm(*temp_iter);
363
358
364359 // find 6-connected regions
365360 int max_region_label = vigra::watersheds3DSix(srcMultiArrayRange(src), destMultiArray(dest));
366361
367362 // find 26-connected regions
368363 max_region_label = vigra::watersheds3DTwentySix(srcMultiArrayRange(src), destMultiArray(dest));
369
364
370365 \endcode
371366 <b> Required Interface:</b>
372367 \code
376371
377372 SrcAccessor src_accessor;
378373 DestAccessor dest_accessor;
379
374
380375 // compare src values
381376 src_accessor(src_begin) <= src_accessor(src_begin)
382377
396391 {
397392 //create temporary volume to store the DAG of directions to minima
398393 if ((int)Neighborhood3D::DirectionCount>7){ //If we have 3D-TwentySix Neighborhood
399
394
400395 vigra::MultiArray<3,int> orientationVolume(srcShape);
401396
402 preparewatersheds3D( s_Iter, srcShape, sa,
397 preparewatersheds3D( s_Iter, srcShape, sa,
403398 destMultiArray(orientationVolume).first, destMultiArray(orientationVolume).second,
404399 neighborhood3D);
405
400
406401 return watershedLabeling3D( srcMultiArray(orientationVolume).first, srcShape, srcMultiArray(orientationVolume).second,
407402 d_Iter, da,
408403 neighborhood3D);
409404 }
410405 else{
411
406
412407 vigra::MultiArray<3,unsigned char> orientationVolume(srcShape);
413408
414 preparewatersheds3D( s_Iter, srcShape, sa,
409 preparewatersheds3D( s_Iter, srcShape, sa,
415410 destMultiArray(orientationVolume).first, destMultiArray(orientationVolume).second,
416411 neighborhood3D);
417
412
418413 return watershedLabeling3D( srcMultiArray(orientationVolume).first, srcShape, srcMultiArray(orientationVolume).second,
419414 d_Iter, da,
420415 neighborhood3D);
423418
424419 template <class SrcIterator, class SrcShape, class SrcAccessor,
425420 class DestIterator, class DestAccessor>
426 inline unsigned int watersheds3DSix( triple<SrcIterator, SrcShape, SrcAccessor> src,
421 inline unsigned int watersheds3DSix( triple<SrcIterator, SrcShape, SrcAccessor> src,
427422 pair<DestIterator, DestAccessor> dest)
428423 {
429424 return watersheds3D(src.first, src.second, src.third, dest.first, dest.second, NeighborCode3DSix());
431426
432427 template <class SrcIterator, class SrcShape, class SrcAccessor,
433428 class DestIterator, class DestAccessor>
434 inline unsigned int watersheds3DTwentySix( triple<SrcIterator, SrcShape, SrcAccessor> src,
429 inline unsigned int watersheds3DTwentySix( triple<SrcIterator, SrcShape, SrcAccessor> src,
435430 pair<DestIterator, DestAccessor> dest)
436431 {
437432 return watersheds3D(src.first, src.second, src.third, dest.first, dest.second, NeighborCode3DTwentySix());
439434
440435 template <unsigned int N, class T1, class S1,
441436 class T2, class S2>
442 inline unsigned int
443 watersheds3DSix(MultiArrayView<N, T1, S1> const & source,
437 inline unsigned int
438 watersheds3DSix(MultiArrayView<N, T1, S1> const & source,
444439 MultiArrayView<N, T2, S2> dest)
445440 {
446441 vigra_precondition(source.shape() == dest.shape(),
451446 template <unsigned int N, class T1, class S1,
452447 class T2, class S2>
453448 inline unsigned int
454 watersheds3DTwentySix(MultiArrayView<N, T1, S1> const & source,
449 watersheds3DTwentySix(MultiArrayView<N, T1, S1> const & source,
455450 MultiArrayView<N, T2, S2> dest)
456451 {
457452 vigra_precondition(source.shape() == dest.shape(),
0 SET(TARGETS
0 SET(TARGETS
11 convert
22 subimage
33 invert
2222 ADD_DEPENDENCIES(examples example_${TARGET})
2323 TARGET_LINK_LIBRARIES(example_${TARGET} vigraimpex)
2424 ENDFOREACH(TARGET)
25
26 ADD_SUBDIRECTORY(tutorial)
0 # SET(TARGETS
1 # composite
2 # dissolve
3 # graph_agglomerative_clustering
4 # imageExportInfo_tutorial
5 # imageIO_tutorial
6 # imageImportInfo_tutorial
7 # invert_tutorial
8 # mirror_tutorial
9 # smooth_blockwise
10 # smooth_convolve
11 # smooth_explicitly
12 # subimage_tutorial
13 # transpose
14 # transpose_image_tutorial)
15
16 SET(TARGETS
17 smooth_blockwise
18 graph_agglomerative_clustering
19 )
20
21 FOREACH(TARGET ${TARGETS})
22 ADD_EXECUTABLE(example_${TARGET} EXCLUDE_FROM_ALL ${TARGET})
23 ADD_DEPENDENCIES(examples example_${TARGET})
24 TARGET_LINK_LIBRARIES(example_${TARGET} vigraimpex)
25 ENDFOREACH(TARGET)
0 #include <iostream>
1
2 #include <vigra/multi_array.hxx>
3 #include <vigra/impex.hxx>
4 #include <vigra/multi_resize.hxx>
5 #include <vigra/colorconversions.hxx>
6 #include <vigra/multi_convolution.hxx>
7 #include <vigra/multi_watersheds.hxx>
8 #include <vigra/multi_gridgraph.hxx>
9 #include <vigra/accumulator.hxx>
10 #include <vigra/adjacency_list_graph.hxx>
11 #include <vigra/graph_algorithms.hxx>
12 #include <vigra/hierarchical_clustering.hxx>
13 #include <vigra/metrics.hxx>
14
15 using namespace vigra;
16
17 int main (int argc, char ** argv)
18 {
19 // parameters of the hierarchical clustering algorithm
20 float sigmaGradMag = 3.0f; // scale of the Gaussian gradient
21 float beta = 0.5f; // importance of node features relative to edge weights
22 float wardness = 0.8f; // importance of cluster size
23 int numClusters = 30; // desired number of resulting regions (clusters)
24
25 if(argc != 3)
26 {
27 std::cout << "Usage: " << argv[0] << " infile outfile" << std::endl;
28 std::cout << "(supported formats: " << impexListFormats() << ")" << std::endl;
29 std::cout << "(only color images)" << std::endl;
30
31 return 1;
32 }
33 try
34 {
35 // read metadata of image file given in argv[1]
36 ImageImportInfo info(argv[1]);
37
38 vigra_precondition(info.numBands() == 3, "an RGB image is required.");
39
40 // instantiate image arrays of appropriate size
41 MultiArray<2, TinyVector<float, 3> > imageArrayRGB(info.shape()),
42 imageArrayLab(info.shape());
43
44 // read image data
45 importImage(info, imageArrayRGB);
46
47 // convert to Lab color space for better color similarity estimates
48 transformMultiArray(imageArrayRGB, imageArrayLab, RGB2LabFunctor<float>());
49
50 // compute gradient magnitude as an indicator of edge strength
51 MultiArray<2, float> gradMag(imageArrayLab.shape());
52 gaussianGradientMagnitude(imageArrayLab, gradMag, sigmaGradMag);
53
54 // create watershed superpixels with the fast union-find algorithm;
55 // we use a NodeMap (a subclass of MultiArray) to store the labels so
56 // that they can be passed to hierarchicalClustering() directly
57 MultiArray<2, unsigned int> labelArray(gradMag.shape());
58 unsigned int max_label =
59 watershedsMultiArray(gradMag, labelArray, DirectNeighborhood,
60 WatershedOptions().unionFind());
61
62 // double the image resolution for better visualization of the results
63 MultiArray<2, TinyVector<float, 3> > imageArrayBig(info.shape()*2-Shape2(1));
64 resizeMultiArraySplineInterpolation(imageArrayRGB, imageArrayBig);
65
66 // visualize the watersheds as a red overlay over the enlarged image
67 regionImageToCrackEdgeImage(labelArray, imageArrayBig,
68 RGBValue<float>( 255, 0, 0 ), EdgeOverlayOnly);
69
70 // create grid-graph of appropriate size
71 typedef GridGraph<2, undirected_tag > ImageGraph;
72 ImageGraph imageGraph(labelArray.shape());
73
74 // construct empty region adjacency graph (RAG) for the superpixels
75 typedef AdjacencyListGraph RAG;
76 RAG rag;
77
78 // create mapping 'affiliatedEdges' from edges in the RAG to
79 // corresponding edges in imageGraph and build the RAG
80 RAG::EdgeMap<std::vector<ImageGraph::Edge>> affiliatedEdges(rag);
81 makeRegionAdjacencyGraph(imageGraph, labelArray, rag, affiliatedEdges);
82
83 // create edge maps for weights and lengths of the RAG edges (zero initialized)
84 RAG::EdgeMap<float> edgeWeights(rag),
85 edgeLengths(rag);
86
87 // iterate over all RAG edges (this loop follows a standard LEMON idiom)
88 for(RAG::EdgeIt rag_edge(rag); rag_edge != lemon::INVALID; ++rag_edge)
89 {
90 // iterate over all grid edges that constitute the present RAG edge
91 for(unsigned int k = 0; k < affiliatedEdges[*rag_edge].size(); ++k)
92 {
93 // look up the current grid edge and its end points
94 auto const & grid_edge = affiliatedEdges[*rag_edge][k];
95 auto start = imageGraph.u(grid_edge),
96 end = imageGraph.v(grid_edge);
97
98 // compute gradient by linear interpolation between end points
99 double grid_edge_gradient = 0.5 * (gradMag[start] + gradMag[end]);
100 // aggregate the total
101 edgeWeights[*rag_edge] += grid_edge_gradient;
102 }
103
104 // the length of the RAG edge equals the number of constituent grid edges
105 edgeLengths[*rag_edge] = affiliatedEdges[*rag_edge].size();
106 // define edge weight by the average gradient
107 edgeWeights[*rag_edge] /= edgeLengths[*rag_edge];
108 }
109
110 // determine size and average color of each superpixel
111 using namespace acc;
112 AccumulatorChainArray<CoupledArrays<2, TinyVector<float, 3>, unsigned int>,
113 Select<DataArg<1>, LabelArg<2>, // where to look for data and region labels
114 Count, Mean> > // what statistics to compute
115 features;
116 extractFeatures(imageArrayLab, labelArray, features);
117
118 // copy superpixel features into NodeMaps to be passed to hierarchicalClustering()
119 RAG::NodeMap<TinyVector<float, 3>> meanColor(rag);
120 RAG::NodeMap<unsigned int> regionSize(rag);
121 for(unsigned int k=0; k<=max_label; ++k)
122 {
123 meanColor[k] = get<Mean>(features, k);
124 regionSize[k] = get<Count>(features, k);
125 }
126
127 // create a node map for the new (clustered) region labels and perform
128 // clustering to remove unimportant watershed edges
129 RAG::NodeMap<unsigned int> nodeLabels(rag);
130 hierarchicalClustering(rag, // input: the superpixel adjacency graph
131 edgeWeights, edgeLengths, meanColor, regionSize, // features
132 nodeLabels, // output: a cluster labeling of the RAG
133 ClusteringOptions().minRegionCount(numClusters)
134 .nodeFeatureImportance(beta)
135 .sizeImportance(wardness)
136 .nodeFeatureMetric(metrics::L2Norm)
137 );
138
139 // create label image with the new labels
140 transformMultiArray(labelArray, labelArray,
141 [&nodeLabels](unsigned int oldlabel)
142 {
143 return nodeLabels[oldlabel];
144 });
145
146 // visualize the salient edges as a green overlay
147 regionImageToCrackEdgeImage(labelArray, imageArrayBig,
148 RGBValue<float>( 0, 255, 0), EdgeOverlayOnly);
149
150 // write result into image file given by argv[2]
151 exportImage(imageArrayBig, argv[2]);
152
153 }
154 catch (std::exception & e)
155 {
156 // catch any errors that might have occurred and print their reason
157 std::cout << e.what() << std::endl;
158 return 1;
159 }
160 return 0;
161 }
0 /************************************************************************/
1 /* */
2 /* Copyright 1998-2002 by Ullrich Koethe */
3 /* */
4 /* This file is part of the VIGRA computer vision library. */
5 /* The VIGRA Website is */
6 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
7 /* Please direct questions, bug reports, and contributions to */
8 /* ullrich.koethe@iwr.uni-heidelberg.de or */
9 /* vigra@informatik.uni-hamburg.de */
10 /* */
11 /* Permission is hereby granted, free of charge, to any person */
12 /* obtaining a copy of this software and associated documentation */
13 /* files (the "Software"), to deal in the Software without */
14 /* restriction, including without limitation the rights to use, */
15 /* copy, modify, merge, publish, distribute, sublicense, and/or */
16 /* sell copies of the Software, and to permit persons to whom the */
17 /* Software is furnished to do so, subject to the following */
18 /* conditions: */
19 /* */
20 /* The above copyright notice and this permission notice shall be */
21 /* included in all copies or substantial portions of the */
22 /* Software. */
23 /* */
24 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
25 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
26 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
27 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
28 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
29 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
30 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
32 /* */
33 /************************************************************************/
34
35 #include <vigra/multi_array.hxx>
36 #include <vigra/impex.hxx>
37 #include <vigra/convolution.hxx>
38 #include <vigra/multi_blockwise.hxx>
39 #include <iostream>
40
41 using namespace vigra;
42
43 int main (int argc, char ** argv)
44 {
45 if(argc != 3)
46 {
47 std::cout << "Usage: " << argv[0] << " infile outfile" << std::endl;
48 std::cout << "(supported formats: " << impexListFormats() << ")" << std::endl;
49
50 return 1;
51 }
52 try
53 {
54 // read image given as first argument
55 ImageImportInfo info(argv[1]);
56
57 // instantiate arrays for image data and for smoothed image of appropriate size
58 if (info.isGrayscale())
59 {
60 MultiArray<2, float> imageArray(info.shape()),
61 exportArray(info.shape());
62
63 // copy image data into array
64 importImage(info, imageArray);
65
66 gaussianSmoothMultiArray(imageArray, exportArray,
67 BlockwiseConvolutionOptions<2>().stdDev(2.0));
68
69 // write image data to the file given as second argument
70 exportImage(exportArray, ImageExportInfo(argv[2]));
71
72 }
73 else
74 {
75 MultiArray<2, RGBValue<float> > imageArray(info.shape()),
76 exportArray(info.shape());
77
78 // copy image data into array
79 importImage(info, imageArray);
80
81 gaussianSmoothMultiArray(imageArray, exportArray,
82 BlockwiseConvolutionOptions<2>().stdDev(2.0));
83
84 // write image data to the file given as second argument
85 exportImage(exportArray, ImageExportInfo(argv[2]));
86 }
87
88
89 }
90 catch (std::exception & e)
91 {
92 // catch any errors that might have occurred and print their reason
93 std::cout << e.what() << std::endl;
94 return 1;
95 }
96 return 0;
97 }
Binary diff not shown
00 IF(ZLIB_FOUND)
11 ADD_DEFINITIONS(-DHasZLIB)
2 INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR})
2 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${ZLIB_INCLUDE_DIR})
33 ENDIF(ZLIB_FOUND)
44
55 IF(PNG_FOUND)
66 ADD_DEFINITIONS(-DHasPNG)
7 INCLUDE_DIRECTORIES(${PNG_INCLUDE_DIR})
7 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${PNG_INCLUDE_DIR})
88 ENDIF(PNG_FOUND)
99
1010 IF(JPEG_FOUND)
1111 ADD_DEFINITIONS(-DHasJPEG)
12 INCLUDE_DIRECTORIES(${JPEG_INCLUDE_DIR})
12 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${JPEG_INCLUDE_DIR})
1313 ENDIF(JPEG_FOUND)
1414
1515 IF(TIFF_FOUND)
1616 ADD_DEFINITIONS(-DHasTIFF)
17 INCLUDE_DIRECTORIES(${TIFF_INCLUDE_DIR})
17 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${TIFF_INCLUDE_DIR})
1818 ENDIF(TIFF_FOUND)
1919
20 IF(OPENEXR_FOUND)
21 ADD_DEFINITIONS(-DHasEXR)
22 INCLUDE_DIRECTORIES(${OPENEXR_INCLUDE_DIR})
23 ENDIF(OPENEXR_FOUND)
20 IF(OpenEXR_FOUND)
21 ADD_DEFINITIONS(-DHasEXR ${OPENEXR_CPPFLAGS})
22 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${OPENEXR_INCLUDE_DIR})
23 ENDIF(OpenEXR_FOUND)
2424
2525 IF(HDF5_FOUND)
2626 ADD_DEFINITIONS(-DHasHDF5 ${HDF5_CPPFLAGS})
27 INCLUDE_DIRECTORIES(${HDF5_INCLUDE_DIR})
27 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${HDF5_INCLUDE_DIR})
2828 ENDIF(HDF5_FOUND)
2929
3030 IF (MSVC OR MINGW)
6363 viff.cxx
6464 void_vector.cxx)
6565
66 set(SOVERSION 6) # increment this after changing the vigraimpex library
66 set(SOVERSION 11) # increment this after changing the vigraimpex library
6767 IF(MACOSX)
6868 SET_TARGET_PROPERTIES(vigraimpex PROPERTIES VERSION ${SOVERSION}.${vigra_version}
6969 SOVERSION ${SOVERSION} INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib${LIB_SUFFIX}")
255255 std::string
256256 CodecManager::getEncoderType( const std::string & filename,
257257 const std::string & fType,
258 const std::string & mode ) const
258 const std::string & ) const
259259 {
260260 std::string fileType = fType;
261261
160160 case LZ4:
161161 {
162162 int sourceLen = ::LZ4_decompress_fast(source, dest, destSize);
163 vigra_postcondition(sourceLen == srcSize, "uncompress(): lz4 decompression failed.");
163 vigra_postcondition(sourceLen >= 0 && static_cast<unsigned>(sourceLen) == srcSize, "uncompress(): lz4 decompression failed.");
164164 break;
165165 }
166166
5959 &H5Sclose, "HDF5ImportInfo(): could not access dataset dataspace.");
6060 m_dimensions = H5Sget_simple_extent_ndims(dataspace_handle);
6161 //m_dimensions = dset.getSpace().getSimpleExtentNdims();
62
62
6363 //why?
6464 //vigra_precondition( m_dimensions>=2, "HDF5ImportInfo(): Number of dimensions is lower than 2. Not an image!" );
6565
7575 else if(datasize == 8)
7676 m_pixeltype = "DOUBLE";
7777 }
78 else if(dataclass == H5T_INTEGER)
78 else if(dataclass == H5T_INTEGER)
7979 {
8080 if(datasign == H5T_SGN_NONE)
8181 {
150150 return m_pixeltype.c_str();
151151 }
152152
153 MultiArrayIndex HDF5ImportInfo::shapeOfDimension(const int dim) const
154 {
155 return MultiArrayIndex(m_dims[dim]);
156 }
157
158 MultiArrayIndex HDF5ImportInfo::numDimensions() const
159 {
160 return MultiArrayIndex(m_dimensions);
161 }
162
163 const std::string & HDF5ImportInfo::getPathInFile() const
164 {
165 return m_path;
166 }
167
168 const std::string & HDF5ImportInfo::getFilePath() const
169 {
170 return m_filename;
171 }
172
173 hid_t HDF5ImportInfo::getH5FileHandle() const
174 {
175 return m_file_handle;
176 }
177
178 hid_t HDF5ImportInfo::getDatasetHandle() const
179 {
180 return m_dataset_handle;
153 ArrayVector<hsize_t> const & HDF5ImportInfo::shape() const
154 {
155 return m_dims;
156 }
157
158 MultiArrayIndex HDF5ImportInfo::shapeOfDimension(const int dim) const
159 {
160 return MultiArrayIndex(m_dims[dim]);
161 }
162
163 MultiArrayIndex HDF5ImportInfo::numDimensions() const
164 {
165 return MultiArrayIndex(m_dimensions);
166 }
167
168 const std::string & HDF5ImportInfo::getPathInFile() const
169 {
170 return m_path;
171 }
172
173 const std::string & HDF5ImportInfo::getFilePath() const
174 {
175 return m_filename;
176 }
177
178 hid_t HDF5ImportInfo::getH5FileHandle() const
179 {
180 return m_file_handle;
181 }
182
183 hid_t HDF5ImportInfo::getDatasetHandle() const
184 {
185 return m_dataset_handle;
181186 }
182187
183188 H5O_type_t HDF5_get_type(hid_t loc_id, const char* name)
214219
215220 // callback function for listAttributes(), called via HDF5File::ls_H5Literate()
216221 extern "C"
217 herr_t HDF5_listAttributes_inserter_callback(hid_t loc_id, const char* name,
222 herr_t HDF5_listAttributes_inserter_callback(hid_t, const char* name,
218223 const H5A_info_t*, void* operator_data)
219224 {
220225 HDF5_ls_insert(operator_data, name);
00 /*
11 LZ4 - Fast LZ compression algorithm
2 Copyright (C) 2011-2013, Yann Collet.
2 Copyright (C) 2011-2015, Yann Collet.
3
34 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
45
56 Redistribution and use in source and binary forms, with or without
2627 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2728
2829 You can contact the author at :
29 - LZ4 source repository : http://code.google.com/p/lz4/
30 - LZ4 source repository : https://github.com/Cyan4973/lz4
3031 - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
3132 */
3233
33 //**************************************
34 // Tuning parameters
35 //**************************************
36 // MEMORY_USAGE :
37 // Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
38 // Increasing memory usage improves compression ratio
39 // Reduced memory usage can improve speed, due to cache effect
40 // Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
41 #define MEMORY_USAGE 14
42
43 // HEAPMODE :
44 // Select how default compression functions will allocate memory for their hash table,
45 // in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
34
35 /**************************************
36 * Tuning parameters
37 **************************************/
38 /*
39 * HEAPMODE :
40 * Select how default compression functions will allocate memory for their hash table,
41 * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
42 */
4643 #define HEAPMODE 0
4744
48
49 //**************************************
50 // CPU Feature Detection
51 //**************************************
52 // 32 or 64 bits ?
53 #if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
54 || defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) \
55 || defined(__64BIT__) || defined(_LP64) || defined(__LP64__) \
56 || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) // Detects 64 bits mode
57 # define LZ4_ARCH64 1
58 #else
59 # define LZ4_ARCH64 0
60 #endif
61
62 // Little Endian or Big Endian ?
63 // Overwrite the #define below if you know your architecture endianess
64 #if defined (__GLIBC__)
65 # include <endian.h>
66 # if (__BYTE_ORDER == __BIG_ENDIAN)
67 # define LZ4_BIG_ENDIAN 1
68 # endif
69 #elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
70 # define LZ4_BIG_ENDIAN 1
71 #elif defined(__sparc) || defined(__sparc__) \
72 || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
73 || defined(__hpux) || defined(__hppa) \
74 || defined(_MIPSEB) || defined(__s390__)
75 # define LZ4_BIG_ENDIAN 1
76 #else
77 // Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
78 #endif
79
80 // Unaligned memory access is automatically enabled for "common" CPU, such as x86.
81 // For others CPU, such as ARM, the compiler may be more cautious, inserting unnecessary extra code to ensure aligned access property
82 // If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
83 #if defined(__ARM_FEATURE_UNALIGNED)
84 # define LZ4_FORCE_UNALIGNED_ACCESS 1
85 #endif
86
87 // Define this parameter if your target system or compiler does not support hardware bit count
88 #if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count
45 /*
46 * ACCELERATION_DEFAULT :
47 * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
48 */
49 #define ACCELERATION_DEFAULT 1
50
51
52 /**************************************
53 * CPU Feature Detection
54 **************************************/
55 /*
56 * LZ4_FORCE_SW_BITCOUNT
57 * Define this parameter if your target system or compiler does not support hardware bit count
58 */
59 #if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
8960 # define LZ4_FORCE_SW_BITCOUNT
9061 #endif
9162
92 // BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
93 // This option may provide a small boost to performance for some big endian cpu, although probably modest.
94 // You may set this option to 1 if data will remain within closed environment.
95 // This option is useless on Little_Endian CPU (such as x86)
96 //#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1
97
98
99 //**************************************
100 // Compiler Options
101 //**************************************
102 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) // C99
103 /* "restrict" is a known keyword */
63
64 /**************************************
65 * Includes
66 **************************************/
67 #include "lz4.h"
68
69
70 /**************************************
71 * Compiler Options
72 **************************************/
73 #ifdef _MSC_VER /* Visual Studio */
74 # define FORCE_INLINE static __forceinline
75 # include <intrin.h>
76 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
77 # pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */
10478 #else
105 # define restrict // Disable restrict
106 #endif
107
108 #ifdef _MSC_VER // Visual Studio
109 # define FORCE_INLINE static __forceinline
110 # include <intrin.h> // For Visual 2005
111 # if LZ4_ARCH64 // 64-bits
112 # pragma intrinsic(_BitScanForward64) // For Visual 2005
113 # pragma intrinsic(_BitScanReverse64) // For Visual 2005
114 # else // 32-bits
115 # pragma intrinsic(_BitScanForward) // For Visual 2005
116 # pragma intrinsic(_BitScanReverse) // For Visual 2005
117 # endif
118 # pragma warning(disable : 4127) // disable: C4127: conditional expression is constant
119 #else
120 # ifdef __GNUC__
121 # define FORCE_INLINE static inline __attribute__((always_inline))
79 # if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
80 # if defined(__GNUC__) || defined(__clang__)
81 # define FORCE_INLINE static inline __attribute__((always_inline))
82 # else
83 # define FORCE_INLINE static inline
84 # endif
12285 # else
123 # define FORCE_INLINE static inline
124 # endif
125 #endif
126
127 #ifdef _MSC_VER
128 # define lz4_bswap16(x) _byteswap_ushort(x)
129 #else
130 # define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
131 #endif
132
133 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
134
135 #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
86 # define FORCE_INLINE static
87 # endif /* __STDC_VERSION__ */
88 #endif /* _MSC_VER */
89
90 /* LZ4_GCC_VERSION is defined into lz4.h */
91 #if (LZ4_GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
13692 # define expect(expr,value) (__builtin_expect ((expr),(value)) )
13793 #else
13894 # define expect(expr,value) (expr)
14298 #define unlikely(expr) expect((expr) != 0, 0)
14399
144100
145 //**************************************
146 // Memory routines
147 //**************************************
148 #include <stdlib.h> // malloc, calloc, free
101 /**************************************
102 * Memory routines
103 **************************************/
104 #include <stdlib.h> /* malloc, calloc, free */
149105 #define ALLOCATOR(n,s) calloc(n,s)
150106 #define FREEMEM free
151 #include <string.h> // memset, memcpy
107 #include <string.h> /* memset, memcpy */
152108 #define MEM_INIT memset
153109
154110
155 //**************************************
156 // Includes
157 //**************************************
158 #include "lz4.h"
159
160
161 //**************************************
162 // Basic Types
163 //**************************************
164 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
111 /**************************************
112 * Basic Types
113 **************************************/
114 #if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
165115 # include <stdint.h>
166116 typedef uint8_t BYTE;
167117 typedef uint16_t U16;
176126 typedef unsigned long long U64;
177127 #endif
178128
179 #if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
180 # define _PACKED __attribute__ ((packed))
181 #else
182 # define _PACKED
183 #endif
184
185 #if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
186 # if defined(__IBMC__) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
187 # pragma pack(1)
188 # else
189 # pragma pack(push, 1)
190 # endif
191 #endif
192
193 typedef struct { U16 v; } _PACKED U16_S;
194 typedef struct { U32 v; } _PACKED U32_S;
195 typedef struct { U64 v; } _PACKED U64_S;
196 typedef struct {size_t v;} _PACKED size_t_S;
197
198 #if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
199 # if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
200 # pragma pack(0)
201 # else
202 # pragma pack(pop)
203 # endif
204 #endif
205
206 #define A16(x) (((U16_S *)(x))->v)
207 #define A32(x) (((U32_S *)(x))->v)
208 #define A64(x) (((U64_S *)(x))->v)
209 #define AARCH(x) (((size_t_S *)(x))->v)
210
211
212 //**************************************
213 // Constants
214 //**************************************
215 #define LZ4_HASHLOG (MEMORY_USAGE-2)
216 #define HASHTABLESIZE (1 << MEMORY_USAGE)
217 #define HASHNBCELLS4 (1 << LZ4_HASHLOG)
218
129
130 /**************************************
131 * Reading and writing into memory
132 **************************************/
133 #define STEPSIZE sizeof(size_t)
134
135 static unsigned LZ4_64bits(void) { return sizeof(void*)==8; }
136
137 static unsigned LZ4_isLittleEndian(void)
138 {
139 const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
140 return one.c[0];
141 }
142
143
144 static U16 LZ4_read16(const void* memPtr)
145 {
146 U16 val16;
147 memcpy(&val16, memPtr, 2);
148 return val16;
149 }
150
151 static U16 LZ4_readLE16(const void* memPtr)
152 {
153 if (LZ4_isLittleEndian())
154 {
155 return LZ4_read16(memPtr);
156 }
157 else
158 {
159 const BYTE* p = (const BYTE*)memPtr;
160 return (U16)((U16)p[0] + (p[1]<<8));
161 }
162 }
163
164 static void LZ4_writeLE16(void* memPtr, U16 value)
165 {
166 if (LZ4_isLittleEndian())
167 {
168 memcpy(memPtr, &value, 2);
169 }
170 else
171 {
172 BYTE* p = (BYTE*)memPtr;
173 p[0] = (BYTE) value;
174 p[1] = (BYTE)(value>>8);
175 }
176 }
177
178 static U32 LZ4_read32(const void* memPtr)
179 {
180 U32 val32;
181 memcpy(&val32, memPtr, 4);
182 return val32;
183 }
184
185 static U64 LZ4_read64(const void* memPtr)
186 {
187 U64 val64;
188 memcpy(&val64, memPtr, 8);
189 return val64;
190 }
191
192 static size_t LZ4_read_ARCH(const void* p)
193 {
194 if (LZ4_64bits())
195 return (size_t)LZ4_read64(p);
196 else
197 return (size_t)LZ4_read32(p);
198 }
199
200
201 static void LZ4_copy4(void* dstPtr, const void* srcPtr) { memcpy(dstPtr, srcPtr, 4); }
202
203 static void LZ4_copy8(void* dstPtr, const void* srcPtr) { memcpy(dstPtr, srcPtr, 8); }
204
205 /* customized version of memcpy, which may overwrite up to 7 bytes beyond dstEnd */
206 static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
207 {
208 BYTE* d = (BYTE*)dstPtr;
209 const BYTE* s = (const BYTE*)srcPtr;
210 BYTE* e = (BYTE*)dstEnd;
211 do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
212 }
213
214
215 /**************************************
216 * Common Constants
217 **************************************/
219218 #define MINMATCH 4
220219
221220 #define COPYLENGTH 8
222221 #define LASTLITERALS 5
223222 #define MFLIMIT (COPYLENGTH+MINMATCH)
224 const int LZ4_minLength = (MFLIMIT+1);
225
226 #define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1))
227 #define SKIPSTRENGTH 6 // Increasing this value will make the compression run slower on incompressible data
223 static const int LZ4_minLength = (MFLIMIT+1);
224
225 #define KB *(1 <<10)
226 #define MB *(1 <<20)
227 #define GB *(1U<<30)
228228
229229 #define MAXD_LOG 16
230230 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
234234 #define RUN_BITS (8-ML_BITS)
235235 #define RUN_MASK ((1U<<RUN_BITS)-1)
236236
237 #define KB *(1U<<10)
238 #define MB *(1U<<20)
239 #define GB *(1U<<30)
240
241
242 //**************************************
243 // Structures and local types
244 //**************************************
245
237
238 /**************************************
239 * Common Utils
240 **************************************/
241 #define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
242
243
244 /**************************************
245 * Common functions
246 **************************************/
247 static unsigned LZ4_NbCommonBytes (register size_t val)
248 {
249 if (LZ4_isLittleEndian())
250 {
251 if (LZ4_64bits())
252 {
253 # if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
254 unsigned long r = 0;
255 _BitScanForward64( &r, (U64)val );
256 return (int)(r>>3);
257 # elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT)
258 return (__builtin_ctzll((U64)val) >> 3);
259 # else
260 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
261 return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
262 # endif
263 }
264 else /* 32 bits */
265 {
266 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
267 unsigned long r;
268 _BitScanForward( &r, (U32)val );
269 return (int)(r>>3);
270 # elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT)
271 return (__builtin_ctz((U32)val) >> 3);
272 # else
273 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
274 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
275 # endif
276 }
277 }
278 else /* Big Endian CPU */
279 {
280 if (LZ4_64bits())
281 {
282 # if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
283 unsigned long r = 0;
284 _BitScanReverse64( &r, val );
285 return (unsigned)(r>>3);
286 # elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT)
287 return (__builtin_clzll((U64)val) >> 3);
288 # else
289 unsigned r;
290 if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
291 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
292 r += (!val);
293 return r;
294 # endif
295 }
296 else /* 32 bits */
297 {
298 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
299 unsigned long r = 0;
300 _BitScanReverse( &r, (unsigned long)val );
301 return (unsigned)(r>>3);
302 # elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT)
303 return (__builtin_clz((U32)val) >> 3);
304 # else
305 unsigned r;
306 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
307 r += (!val);
308 return r;
309 # endif
310 }
311 }
312 }
313
314 static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
315 {
316 const BYTE* const pStart = pIn;
317
318 while (likely(pIn<pInLimit-(STEPSIZE-1)))
319 {
320 size_t diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
321 if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
322 pIn += LZ4_NbCommonBytes(diff);
323 return (unsigned)(pIn - pStart);
324 }
325
326 if (LZ4_64bits()) if ((pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
327 if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
328 if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
329 return (unsigned)(pIn - pStart);
330 }
331
332
333 #ifndef LZ4_COMMONDEFS_ONLY
334 /**************************************
335 * Local Constants
336 **************************************/
337 #define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
338 #define HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
339 #define HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
340
341 static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
342 static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
343
344
345 /**************************************
346 * Local Structures and types
347 **************************************/
246348 typedef struct {
247 U32 hashTable[HASHNBCELLS4];
248 const BYTE* bufferStart;
249 const BYTE* base;
250 const BYTE* nextBlock;
251 } LZ4_Data_Structure;
252
253 typedef enum { notLimited = 0, limited = 1 } limitedOutput_directive;
349 U32 hashTable[HASH_SIZE_U32];
350 U32 currentOffset;
351 U32 initCheck;
352 const BYTE* dictionary;
353 BYTE* bufferStart; /* obsolete, used for slideInputBuffer */
354 U32 dictSize;
355 } LZ4_stream_t_internal;
356
357 typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
254358 typedef enum { byPtr, byU32, byU16 } tableType_t;
255359
256 typedef enum { noPrefix = 0, withPrefix = 1 } prefix64k_directive;
360 typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
361 typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
257362
258363 typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
259364 typedef enum { full = 0, partial = 1 } earlyEnd_directive;
260365
261366
262 //**************************************
263 // Architecture-specific macros
264 //**************************************
265 #define STEPSIZE sizeof(size_t)
266 #define LZ4_COPYSTEP(d,s) { AARCH(d) = AARCH(s); d+=STEPSIZE; s+=STEPSIZE; }
267 #define LZ4_COPY8(d,s) { LZ4_COPYSTEP(d,s); if (STEPSIZE<8) LZ4_COPYSTEP(d,s); }
268 #define LZ4_SECURECOPY(d,s,e) { if ((STEPSIZE==4)||(d<e)) LZ4_WILDCOPY(d,s,e); }
269
270 #if LZ4_ARCH64 // 64-bit
271 # define HTYPE U32
272 # define INITBASE(base) const BYTE* const base = ip
273 #else // 32-bit
274 # define HTYPE const BYTE*
275 # define INITBASE(base) const int base = 0
276 #endif
277
278 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
279 # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
280 # define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
281 #else // Little Endian
282 # define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
283 # define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
284 #endif
285
286
287 //**************************************
288 // Macros
289 //**************************************
290 #define LZ4_WILDCOPY(d,s,e) { do { LZ4_COPY8(d,s) } while (d<e); } // at the end, d>=e;
291
292
293 //****************************
294 // Private functions
295 //****************************
296 #if LZ4_ARCH64
297
298 FORCE_INLINE int LZ4_NbCommonBytes (register U64 val)
299 {
300 # if defined(LZ4_BIG_ENDIAN)
301 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
302 unsigned long r = 0;
303 _BitScanReverse64( &r, val );
304 return (int)(r>>3);
305 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
306 return (__builtin_clzll(val) >> 3);
307 # else
308 int r;
309 if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
310 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
311 r += (!val);
312 return r;
313 # endif
314 # else
315 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
316 unsigned long r = 0;
317 _BitScanForward64( &r, val );
318 return (int)(r>>3);
319 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
320 return (__builtin_ctzll(val) >> 3);
321 # else
322 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
323 return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
324 # endif
325 # endif
326 }
327
328 #else
329
330 FORCE_INLINE int LZ4_NbCommonBytes (register U32 val)
331 {
332 # if defined(LZ4_BIG_ENDIAN)
333 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
334 unsigned long r = 0;
335 _BitScanReverse( &r, val );
336 return (int)(r>>3);
337 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
338 return (__builtin_clz(val) >> 3);
339 # else
340 int r;
341 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
342 r += (!val);
343 return r;
344 # endif
345 # else
346 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
347 unsigned long r;
348 _BitScanForward( &r, val );
349 return (int)(r>>3);
350 # elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
351 return (__builtin_ctz(val) >> 3);
352 # else
353 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
354 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
355 # endif
356 # endif
357 }
358
359 #endif
360
361
362 //****************************
363 // Compression functions
364 //****************************
365 FORCE_INLINE int LZ4_hashSequence(U32 sequence, tableType_t tableType)
367 /**************************************
368 * Local Utils
369 **************************************/
370 int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
371 int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
372 int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
373
374
375
376 /********************************
377 * Compression functions
378 ********************************/
379
380 static U32 LZ4_hashSequence(U32 sequence, tableType_t const tableType)
366381 {
367382 if (tableType == byU16)
368383 return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
370385 return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
371386 }
372387
373 FORCE_INLINE int LZ4_hashPosition(const BYTE* p, tableType_t tableType) { return LZ4_hashSequence(A32(p), tableType); }
374
375 FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
388 static const U64 prime5bytes = 889523592379ULL;
389 static U32 LZ4_hashSequence64(size_t sequence, tableType_t const tableType)
390 {
391 const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
392 const U32 hashMask = (1<<hashLog) - 1;
393 return ((sequence * prime5bytes) >> (40 - hashLog)) & hashMask;
394 }
395
396 static U32 LZ4_hashSequenceT(size_t sequence, tableType_t const tableType)
397 {
398 if (LZ4_64bits())
399 return LZ4_hashSequence64(sequence, tableType);
400 return LZ4_hashSequence((U32)sequence, tableType);
401 }
402
403 static U32 LZ4_hashPosition(const void* p, tableType_t tableType) { return LZ4_hashSequenceT(LZ4_read_ARCH(p), tableType); }
404
405 static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase)
376406 {
377407 switch (tableType)
378408 {
379 case byPtr: { const BYTE** hashTable = (const BYTE**) tableBase; hashTable[h] = p; break; }
380 case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); break; }
381 case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); break; }
382 }
383 }
384
385 FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
409 case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
410 case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
411 case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
412 }
413 }
414
415 static void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
386416 {
387417 U32 h = LZ4_hashPosition(p, tableType);
388418 LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
389419 }
390420
391 FORCE_INLINE const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
421 static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
392422 {
393423 if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
394424 if (tableType == byU32) { U32* hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
395 { U16* hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } // default, to ensure a return
396 }
397
398 FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
425 { U16* hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
426 }
427
428 static const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
399429 {
400430 U32 h = LZ4_hashPosition(p, tableType);
401431 return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
402432 }
403433
404
405434 FORCE_INLINE int LZ4_compress_generic(
406 void* ctx,
407 const char* source,
408 char* dest,
409 int inputSize,
410 int maxOutputSize,
411
412 limitedOutput_directive limitedOutput,
413 tableType_t tableType,
414 prefix64k_directive prefix)
415 {
435 void* const ctx,
436 const char* const source,
437 char* const dest,
438 const int inputSize,
439 const int maxOutputSize,
440 const limitedOutput_directive outputLimited,
441 const tableType_t tableType,
442 const dict_directive dict,
443 const dictIssue_directive dictIssue,
444 const U32 acceleration)
445 {
446 LZ4_stream_t_internal* const dictPtr = (LZ4_stream_t_internal*)ctx;
447
416448 const BYTE* ip = (const BYTE*) source;
417 const BYTE* const base = (prefix==withPrefix) ? ((LZ4_Data_Structure*)ctx)->base : (const BYTE*) source;
418 const BYTE* const lowLimit = ((prefix==withPrefix) ? ((LZ4_Data_Structure*)ctx)->bufferStart : (const BYTE*)source);
449 const BYTE* base;
450 const BYTE* lowLimit;
451 const BYTE* const lowRefLimit = ip - dictPtr->dictSize;
452 const BYTE* const dictionary = dictPtr->dictionary;
453 const BYTE* const dictEnd = dictionary + dictPtr->dictSize;
454 const size_t dictDelta = dictEnd - (const BYTE*)source;
419455 const BYTE* anchor = (const BYTE*) source;
420456 const BYTE* const iend = ip + inputSize;
421457 const BYTE* const mflimit = iend - MFLIMIT;
422458 const BYTE* const matchlimit = iend - LASTLITERALS;
423459
424460 BYTE* op = (BYTE*) dest;
425 BYTE* const oend = op + maxOutputSize;
426
427 int length;
428 const int skipStrength = SKIPSTRENGTH;
461 BYTE* const olimit = op + maxOutputSize;
462
429463 U32 forwardH;
430
431 // Init conditions
432 if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; // Unsupported input size, too large (or negative)
433 if ((prefix==withPrefix) && (ip != ((LZ4_Data_Structure*)ctx)->nextBlock)) return 0; // must continue from end of previous block
434 if (prefix==withPrefix) ((LZ4_Data_Structure*)ctx)->nextBlock=iend; // do it now, due to potential early exit
435 if ((tableType == byU16) && (inputSize>=LZ4_64KLIMIT)) return 0; // Size too large (not within 64K limit)
436 if (inputSize<LZ4_minLength) goto _last_literals; // Input too small, no compression (all literals)
437
438 // First Byte
464 size_t refDelta=0;
465
466 /* Init conditions */
467 if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
468 switch(dict)
469 {
470 case noDict:
471 default:
472 base = (const BYTE*)source;
473 lowLimit = (const BYTE*)source;
474 break;
475 case withPrefix64k:
476 base = (const BYTE*)source - dictPtr->currentOffset;
477 lowLimit = (const BYTE*)source - dictPtr->dictSize;
478 break;
479 case usingExtDict:
480 base = (const BYTE*)source - dictPtr->currentOffset;
481 lowLimit = (const BYTE*)source;
482 break;
483 }
484 if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
485 if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
486
487 /* First Byte */
439488 LZ4_putPosition(ip, ctx, tableType, base);
440489 ip++; forwardH = LZ4_hashPosition(ip, tableType);
441490
442 // Main Loop
491 /* Main Loop */
443492 for ( ; ; )
444493 {
445 int findMatchAttempts = (1U << skipStrength) + 3;
446 const BYTE* forwardIp = ip;
447 const BYTE* ref;
494 const BYTE* match;
448495 BYTE* token;
449
450 // Find a match
451 do {
452 U32 h = forwardH;
453 int step = findMatchAttempts++ >> skipStrength;
454 ip = forwardIp;
455 forwardIp = ip + step;
456
457 if unlikely(forwardIp > mflimit) { goto _last_literals; }
458
459 forwardH = LZ4_hashPosition(forwardIp, tableType);
460 ref = LZ4_getPositionOnHash(h, ctx, tableType, base);
461 LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
462
463 } while ((ref + MAX_DISTANCE < ip) || (A32(ref) != A32(ip)));
464
465 // Catch up
466 while ((ip>anchor) && (ref > lowLimit) && unlikely(ip[-1]==ref[-1])) { ip--; ref--; }
467
468 // Encode Literal length
469 length = (int)(ip - anchor);
470 token = op++;
471 if ((limitedOutput) && unlikely(op + length + (2 + 1 + LASTLITERALS) + (length/255) > oend)) return 0; // Check output limit
472 if (length>=(int)RUN_MASK)
473 {
474 int len = length-RUN_MASK;
475 *token=(RUN_MASK<<ML_BITS);
476 for(; len >= 255 ; len-=255) *op++ = 255;
477 *op++ = (BYTE)len;
478 }
479 else *token = (BYTE)(length<<ML_BITS);
480
481 // Copy Literals
482 { BYTE* end=(op)+(length); LZ4_WILDCOPY(op,anchor,end); op=end; }
496 {
497 const BYTE* forwardIp = ip;
498 unsigned step = 1;
499 unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
500
501 /* Find a match */
502 do {
503 U32 h = forwardH;
504 ip = forwardIp;
505 forwardIp += step;
506 step = (searchMatchNb++ >> LZ4_skipTrigger);
507
508 if (unlikely(forwardIp > mflimit)) goto _last_literals;
509
510 match = LZ4_getPositionOnHash(h, ctx, tableType, base);
511 if (dict==usingExtDict)
512 {
513 if (match<(const BYTE*)source)
514 {
515 refDelta = dictDelta;
516 lowLimit = dictionary;
517 }
518 else
519 {
520 refDelta = 0;
521 lowLimit = (const BYTE*)source;
522 }
523 }
524 forwardH = LZ4_hashPosition(forwardIp, tableType);
525 LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
526
527 } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
528 || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
529 || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
530 }
531
532 /* Catch up */
533 while ((ip>anchor) && (match+refDelta > lowLimit) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
534
535 {
536 /* Encode Literal length */
537 unsigned litLength = (unsigned)(ip - anchor);
538 token = op++;
539 if ((outputLimited) && (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
540 return 0; /* Check output limit */
541 if (litLength>=RUN_MASK)
542 {
543 int len = (int)litLength-RUN_MASK;
544 *token=(RUN_MASK<<ML_BITS);
545 for(; len >= 255 ; len-=255) *op++ = 255;
546 *op++ = (BYTE)len;
547 }
548 else *token = (BYTE)(litLength<<ML_BITS);
549
550 /* Copy Literals */
551 LZ4_wildCopy(op, anchor, op+litLength);
552 op+=litLength;
553 }
483554
484555 _next_match:
485 // Encode Offset
486 LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ip-ref));
487
488 // Start Counting
489 ip+=MINMATCH; ref+=MINMATCH; // MinMatch already verified
556 /* Encode Offset */
557 LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
558
559 /* Encode MatchLength */
560 {
561 unsigned matchLength;
562
563 if ((dict==usingExtDict) && (lowLimit==dictionary))
564 {
565 const BYTE* limit;
566 match += refDelta;
567 limit = ip + (dictEnd-match);
568 if (limit > matchlimit) limit = matchlimit;
569 matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
570 ip += MINMATCH + matchLength;
571 if (ip==limit)
572 {
573 unsigned more = LZ4_count(ip, (const BYTE*)source, matchlimit);
574 matchLength += more;
575 ip += more;
576 }
577 }
578 else
579 {
580 matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
581 ip += MINMATCH + matchLength;
582 }
583
584 if ((outputLimited) && (unlikely(op + (1 + LASTLITERALS) + (matchLength>>8) > olimit)))
585 return 0; /* Check output limit */
586 if (matchLength>=ML_MASK)
587 {
588 *token += ML_MASK;
589 matchLength -= ML_MASK;
590 for (; matchLength >= 510 ; matchLength-=510) { *op++ = 255; *op++ = 255; }
591 if (matchLength >= 255) { matchLength-=255; *op++ = 255; }
592 *op++ = (BYTE)matchLength;
593 }
594 else *token += (BYTE)(matchLength);
595 }
596
490597 anchor = ip;
491 while likely(ip<matchlimit-(STEPSIZE-1))
492 {
493 size_t diff = AARCH(ref) ^ AARCH(ip);
494 if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; }
495 ip += LZ4_NbCommonBytes(diff);
496 goto _endCount;
497 }
498 if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }
499 if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
500 if ((ip<matchlimit) && (*ref == *ip)) ip++;
501 _endCount:
502
503 // Encode MatchLength
504 length = (int)(ip - anchor);
505 if ((limitedOutput) && unlikely(op + (1 + LASTLITERALS) + (length>>8) > oend)) return 0; // Check output limit
506 if (length>=(int)ML_MASK)
507 {
508 *token += ML_MASK;
509 length -= ML_MASK;
510 for (; length > 509 ; length-=510) { *op++ = 255; *op++ = 255; }
511 if (length >= 255) { length-=255; *op++ = 255; }
512 *op++ = (BYTE)length;
513 }
514 else *token += (BYTE)(length);
515
516 // Test end of chunk
517 if (ip > mflimit) { anchor = ip; break; }
518
519 // Fill table
598
599 /* Test end of chunk */
600 if (ip > mflimit) break;
601
602 /* Fill table */
520603 LZ4_putPosition(ip-2, ctx, tableType, base);
521604
522 // Test next position
523 ref = LZ4_getPosition(ip, ctx, tableType, base);
605 /* Test next position */
606 match = LZ4_getPosition(ip, ctx, tableType, base);
607 if (dict==usingExtDict)
608 {
609 if (match<(const BYTE*)source)
610 {
611 refDelta = dictDelta;
612 lowLimit = dictionary;
613 }
614 else
615 {
616 refDelta = 0;
617 lowLimit = (const BYTE*)source;
618 }
619 }
524620 LZ4_putPosition(ip, ctx, tableType, base);
525 if ((ref + MAX_DISTANCE >= ip) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; }
526
527 // Prepare next loop
528 anchor = ip++;
529 forwardH = LZ4_hashPosition(ip, tableType);
621 if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
622 && (match+MAX_DISTANCE>=ip)
623 && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
624 { token=op++; *token=0; goto _next_match; }
625
626 /* Prepare next loop */
627 forwardH = LZ4_hashPosition(++ip, tableType);
530628 }
531629
532630 _last_literals:
533 // Encode Last Literals
534 {
535 int lastRun = (int)(iend - anchor);
536 if ((limitedOutput) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) return 0; // Check output limit
537 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun >= 255 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
538 else *op++ = (BYTE)(lastRun<<ML_BITS);
539 memcpy(op, anchor, iend - anchor);
540 op += iend-anchor;
541 }
542
543 // End
631 /* Encode Last Literals */
632 {
633 const size_t lastRun = (size_t)(iend - anchor);
634 if ((outputLimited) && ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize))
635 return 0; /* Check output limit */
636 if (lastRun >= RUN_MASK)
637 {
638 size_t accumulator = lastRun - RUN_MASK;
639 *op++ = RUN_MASK << ML_BITS;
640 for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
641 *op++ = (BYTE) accumulator;
642 }
643 else
644 {
645 *op++ = (BYTE)(lastRun<<ML_BITS);
646 }
647 memcpy(op, anchor, lastRun);
648 op += lastRun;
649 }
650
651 /* End */
544652 return (int) (((char*)op)-dest);
545653 }
546654
547655
548 int LZ4_compress(const char* source, char* dest, int inputSize)
656 int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
657 {
658 LZ4_resetStream((LZ4_stream_t*)state);
659 if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
660
661 if (maxOutputSize >= LZ4_compressBound(inputSize))
662 {
663 if (inputSize < LZ4_64Klimit)
664 return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
665 else
666 return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration);
667 }
668 else
669 {
670 if (inputSize < LZ4_64Klimit)
671 return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
672 else
673 return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration);
674 }
675 }
676
677
678 int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
549679 {
550680 #if (HEAPMODE)
551 void* ctx = ALLOCATOR(HASHNBCELLS4, 4); // Aligned on 4-bytes boundaries
681 void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
552682 #else
553 U32 ctx[1U<<(MEMORY_USAGE-2)] = {0}; // Ensure data is aligned on 4-bytes boundaries
683 LZ4_stream_t ctx;
684 void* ctxPtr = &ctx;
554685 #endif
555 int result;
556
557 if (inputSize < (int)LZ4_64KLIMIT)
558 result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, byU16, noPrefix);
686
687 int result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
688
689 #if (HEAPMODE)
690 FREEMEM(ctxPtr);
691 #endif
692 return result;
693 }
694
695
696 int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize)
697 {
698 return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1);
699 }
700
701
702 /* hidden debug function */
703 /* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
704 int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
705 {
706 LZ4_stream_t ctx;
707
708 LZ4_resetStream(&ctx);
709
710 if (inputSize < LZ4_64Klimit)
711 return LZ4_compress_generic(&ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
559712 else
560 result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noPrefix);
713 return LZ4_compress_generic(&ctx, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration);
714 }
715
716
717 /********************************
718 * destSize variant
719 ********************************/
720
721 static int LZ4_compress_destSize_generic(
722 void* const ctx,
723 const char* const src,
724 char* const dst,
725 int* const srcSizePtr,
726 const int targetDstSize,
727 const tableType_t tableType)
728 {
729 const BYTE* ip = (const BYTE*) src;
730 const BYTE* base = (const BYTE*) src;
731 const BYTE* lowLimit = (const BYTE*) src;
732 const BYTE* anchor = ip;
733 const BYTE* const iend = ip + *srcSizePtr;
734 const BYTE* const mflimit = iend - MFLIMIT;
735 const BYTE* const matchlimit = iend - LASTLITERALS;
736
737 BYTE* op = (BYTE*) dst;
738 BYTE* const oend = op + targetDstSize;
739 BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
740 BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
741 BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
742
743 U32 forwardH;
744
745
746 /* Init conditions */
747 if (targetDstSize < 1) return 0; /* Impossible to store anything */
748 if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
749 if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
750 if (*srcSizePtr<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
751
752 /* First Byte */
753 *srcSizePtr = 0;
754 LZ4_putPosition(ip, ctx, tableType, base);
755 ip++; forwardH = LZ4_hashPosition(ip, tableType);
756
757 /* Main Loop */
758 for ( ; ; )
759 {
760 const BYTE* match;
761 BYTE* token;
762 {
763 const BYTE* forwardIp = ip;
764 unsigned step = 1;
765 unsigned searchMatchNb = 1 << LZ4_skipTrigger;
766
767 /* Find a match */
768 do {
769 U32 h = forwardH;
770 ip = forwardIp;
771 forwardIp += step;
772 step = (searchMatchNb++ >> LZ4_skipTrigger);
773
774 if (unlikely(forwardIp > mflimit))
775 goto _last_literals;
776
777 match = LZ4_getPositionOnHash(h, ctx, tableType, base);
778 forwardH = LZ4_hashPosition(forwardIp, tableType);
779 LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
780
781 } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
782 || (LZ4_read32(match) != LZ4_read32(ip)) );
783 }
784
785 /* Catch up */
786 while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
787
788 {
789 /* Encode Literal length */
790 unsigned litLength = (unsigned)(ip - anchor);
791 token = op++;
792 if (op + ((litLength+240)/255) + litLength > oMaxLit)
793 {
794 /* Not enough space for a last match */
795 op--;
796 goto _last_literals;
797 }
798 if (litLength>=RUN_MASK)
799 {
800 unsigned len = litLength - RUN_MASK;
801 *token=(RUN_MASK<<ML_BITS);
802 for(; len >= 255 ; len-=255) *op++ = 255;
803 *op++ = (BYTE)len;
804 }
805 else *token = (BYTE)(litLength<<ML_BITS);
806
807 /* Copy Literals */
808 LZ4_wildCopy(op, anchor, op+litLength);
809 op += litLength;
810 }
811
812 _next_match:
813 /* Encode Offset */
814 LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
815
816 /* Encode MatchLength */
817 {
818 size_t matchLength;
819
820 matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
821
822 if (op + ((matchLength+240)/255) > oMaxMatch)
823 {
824 /* Match description too long : reduce it */
825 matchLength = (15-1) + (oMaxMatch-op) * 255;
826 }
827 //printf("offset %5i, matchLength%5i \n", (int)(ip-match), matchLength + MINMATCH);
828 ip += MINMATCH + matchLength;
829
830 if (matchLength>=ML_MASK)
831 {
832 *token += ML_MASK;
833 matchLength -= ML_MASK;
834 while (matchLength >= 255) { matchLength-=255; *op++ = 255; }
835 *op++ = (BYTE)matchLength;
836 }
837 else *token += (BYTE)(matchLength);
838 }
839
840 anchor = ip;
841
842 /* Test end of block */
843 if (ip > mflimit) break;
844 if (op > oMaxSeq) break;
845
846 /* Fill table */
847 LZ4_putPosition(ip-2, ctx, tableType, base);
848
849 /* Test next position */
850 match = LZ4_getPosition(ip, ctx, tableType, base);
851 LZ4_putPosition(ip, ctx, tableType, base);
852 if ( (match+MAX_DISTANCE>=ip)
853 && (LZ4_read32(match)==LZ4_read32(ip)) )
854 { token=op++; *token=0; goto _next_match; }
855
856 /* Prepare next loop */
857 forwardH = LZ4_hashPosition(++ip, tableType);
858 }
859
860 _last_literals:
861 /* Encode Last Literals */
862 {
863 size_t lastRunSize = (size_t)(iend - anchor);
864 if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend)
865 {
866 /* adapt lastRunSize to fill 'dst' */
867 lastRunSize = (oend-op) - 1;
868 lastRunSize -= (lastRunSize+240)/255;
869 }
870 ip = anchor + lastRunSize;
871
872 if (lastRunSize >= RUN_MASK)
873 {
874 size_t accumulator = lastRunSize - RUN_MASK;
875 *op++ = RUN_MASK << ML_BITS;
876 for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
877 *op++ = (BYTE) accumulator;
878 }
879 else
880 {
881 *op++ = (BYTE)(lastRunSize<<ML_BITS);
882 }
883 memcpy(op, anchor, lastRunSize);
884 op += lastRunSize;
885 }
886
887 /* End */
888 *srcSizePtr = (int) (((const char*)ip)-src);
889 return (int) (((char*)op)-dst);
890 }
891
892
893 static int LZ4_compress_destSize_extState (void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
894 {
895 LZ4_resetStream((LZ4_stream_t*)state);
896
897 if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) /* compression success is guaranteed */
898 {
899 return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
900 }
901 else
902 {
903 if (*srcSizePtr < LZ4_64Klimit)
904 return LZ4_compress_destSize_generic(state, src, dst, srcSizePtr, targetDstSize, byU16);
905 else
906 return LZ4_compress_destSize_generic(state, src, dst, srcSizePtr, targetDstSize, LZ4_64bits() ? byU32 : byPtr);
907 }
908 }
909
910
911 int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
912 {
913 #if (HEAPMODE)
914 void* ctx = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
915 #else
916 LZ4_stream_t ctxBody;
917 void* ctx = &ctxBody;
918 #endif
919
920 int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
561921
562922 #if (HEAPMODE)
563923 FREEMEM(ctx);
565925 return result;
566926 }
567927
568 int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
569 {
570 #if (HEAPMODE)
571 void* ctx = ALLOCATOR(HASHNBCELLS4, 4); // Aligned on 4-bytes boundaries
572 #else
573 U32 ctx[1U<<(MEMORY_USAGE-2)] = {0}; // Ensure data is aligned on 4-bytes boundaries
574 #endif
928
929
930 /********************************
931 * Streaming functions
932 ********************************/
933
934 LZ4_stream_t* LZ4_createStream(void)
935 {
936 LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
937 LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
938 LZ4_resetStream(lz4s);
939 return lz4s;
940 }
941
942 void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
943 {
944 MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
945 }
946
947 int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
948 {
949 FREEMEM(LZ4_stream);
950 return (0);
951 }
952
953
954 #define HASH_UNIT sizeof(size_t)
955 int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
956 {
957 LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict;
958 const BYTE* p = (const BYTE*)dictionary;
959 const BYTE* const dictEnd = p + dictSize;
960 const BYTE* base;
961
962 if ((dict->initCheck) || (dict->currentOffset > 1 GB)) /* Uninitialized structure, or reuse overflow */
963 LZ4_resetStream(LZ4_dict);
964
965 if (dictSize < (int)HASH_UNIT)
966 {
967 dict->dictionary = NULL;
968 dict->dictSize = 0;
969 return 0;
970 }
971
972 if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
973 dict->currentOffset += 64 KB;
974 base = p - dict->currentOffset;
975 dict->dictionary = p;
976 dict->dictSize = (U32)(dictEnd - p);
977 dict->currentOffset += dict->dictSize;
978
979 while (p <= dictEnd-HASH_UNIT)
980 {
981 LZ4_putPosition(p, dict->hashTable, byU32, base);
982 p+=3;
983 }
984
985 return dict->dictSize;
986 }
987
988
989 static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
990 {
991 if ((LZ4_dict->currentOffset > 0x80000000) ||
992 ((size_t)LZ4_dict->currentOffset > (size_t)src)) /* address space overflow */
993 {
994 /* rescale hash table */
995 U32 delta = LZ4_dict->currentOffset - 64 KB;
996 const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
997 int i;
998 for (i=0; i<HASH_SIZE_U32; i++)
999 {
1000 if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
1001 else LZ4_dict->hashTable[i] -= delta;
1002 }
1003 LZ4_dict->currentOffset = 64 KB;
1004 if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
1005 LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
1006 }
1007 }
1008
1009
1010 int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1011 {
1012 LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_stream;
1013 const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1014
1015 const BYTE* smallest = (const BYTE*) source;
1016 if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */
1017 if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
1018 LZ4_renormDictT(streamPtr, smallest);
1019 if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
1020
1021 /* Check overlapping input/dictionary space */
1022 {
1023 const BYTE* sourceEnd = (const BYTE*) source + inputSize;
1024 if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd))
1025 {
1026 streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1027 if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
1028 if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
1029 streamPtr->dictionary = dictEnd - streamPtr->dictSize;
1030 }
1031 }
1032
1033 /* prefix mode : source data follows dictionary */
1034 if (dictEnd == (const BYTE*)source)
1035 {
1036 int result;
1037 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1038 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration);
1039 else
1040 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration);
1041 streamPtr->dictSize += (U32)inputSize;
1042 streamPtr->currentOffset += (U32)inputSize;
1043 return result;
1044 }
1045
1046 /* external dictionary mode */
1047 {
1048 int result;
1049 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1050 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration);
1051 else
1052 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration);
1053 streamPtr->dictionary = (const BYTE*)source;
1054 streamPtr->dictSize = (U32)inputSize;
1055 streamPtr->currentOffset += (U32)inputSize;
1056 return result;
1057 }
1058 }
1059
1060
1061 /* Hidden debug function, to force external dictionary mode */
1062 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
1063 {
1064 LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_dict;
5751065 int result;
576
577 if (inputSize < (int)LZ4_64KLIMIT)
578 result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limited, byU16, noPrefix);
579 else
580 result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limited, (sizeof(void*)==8) ? byU32 : byPtr, noPrefix);
581
582 #if (HEAPMODE)
583 FREEMEM(ctx);
584 #endif
1066 const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1067
1068 const BYTE* smallest = dictEnd;
1069 if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
1070 LZ4_renormDictT((LZ4_stream_t_internal*)LZ4_dict, smallest);
1071
1072 result = LZ4_compress_generic(LZ4_dict, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
1073
1074 streamPtr->dictionary = (const BYTE*)source;
1075 streamPtr->dictSize = (U32)inputSize;
1076 streamPtr->currentOffset += (U32)inputSize;
1077
5851078 return result;
5861079 }
5871080
5881081
589 //*****************************
590 // Using an external allocation
591 //*****************************
592
593 int LZ4_sizeofState() { return 1 << MEMORY_USAGE; }
594
595
596 int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize)
597 {
598 if (((size_t)(state)&3) != 0) return 0; // Error : state is not aligned on 4-bytes boundary
599 MEM_INIT(state, 0, LZ4_sizeofState());
600
601 if (inputSize < (int)LZ4_64KLIMIT)
602 return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, byU16, noPrefix);
603 else
604 return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noPrefix);
605 }
606
607
608 int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize)
609 {
610 if (((size_t)(state)&3) != 0) return 0; // Error : state is not aligned on 4-bytes boundary
611 MEM_INIT(state, 0, LZ4_sizeofState());
612
613 if (inputSize < (int)LZ4_64KLIMIT)
614 return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limited, byU16, noPrefix);
615 else
616 return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limited, (sizeof(void*)==8) ? byU32 : byPtr, noPrefix);
617 }
618
619
620 //****************************
621 // Stream functions
622 //****************************
623
624 int LZ4_sizeofStreamState()
625 {
626 return sizeof(LZ4_Data_Structure);
627 }
628
629 FORCE_INLINE void LZ4_init(LZ4_Data_Structure* lz4ds, const BYTE* base)
630 {
631 MEM_INIT(lz4ds->hashTable, 0, sizeof(lz4ds->hashTable));
632 lz4ds->bufferStart = base;
633 lz4ds->base = base;
634 lz4ds->nextBlock = base;
635 }
636
637 int LZ4_resetStreamState(void* state, const char* inputBuffer)
638 {
639 if ((((size_t)state) & 3) != 0) return 1; // Error : pointer is not aligned on 4-bytes boundary
640 LZ4_init((LZ4_Data_Structure*)state, (const BYTE*)inputBuffer);
641 return 0;
642 }
643
644 void* LZ4_create (const char* inputBuffer)
645 {
646 void* lz4ds = ALLOCATOR(1, sizeof(LZ4_Data_Structure));
647 LZ4_init ((LZ4_Data_Structure*)lz4ds, (const BYTE*)inputBuffer);
648 return lz4ds;
649 }
650
651
652 int LZ4_free (void* LZ4_Data)
653 {
654 FREEMEM(LZ4_Data);
655 return (0);
656 }
657
658
659 char* LZ4_slideInputBuffer (void* LZ4_Data)
660 {
661 LZ4_Data_Structure* lz4ds = (LZ4_Data_Structure*)LZ4_Data;
662 size_t delta = lz4ds->nextBlock - (lz4ds->bufferStart + 64 KB);
663
664 if ( (lz4ds->base - delta > lz4ds->base) // underflow control
665 || ((size_t)(lz4ds->nextBlock - lz4ds->base) > 0xE0000000) ) // close to 32-bits limit
666 {
667 size_t deltaLimit = (lz4ds->nextBlock - 64 KB) - lz4ds->base;
668 int nH;
669
670 for (nH=0; nH < HASHNBCELLS4; nH++)
671 {
672 if ((size_t)(lz4ds->hashTable[nH]) < deltaLimit) lz4ds->hashTable[nH] = 0;
673 else lz4ds->hashTable[nH] -= (U32)deltaLimit;
674 }
675 memcpy((void*)(lz4ds->bufferStart), (const void*)(lz4ds->nextBlock - 64 KB), 64 KB);
676 lz4ds->base = lz4ds->bufferStart;
677 lz4ds->nextBlock = lz4ds->base + 64 KB;
678 }
679 else
680 {
681 memcpy((void*)(lz4ds->bufferStart), (const void*)(lz4ds->nextBlock - 64 KB), 64 KB);
682 lz4ds->nextBlock -= delta;
683 lz4ds->base -= delta;
684 }
685
686 return (char*)(lz4ds->nextBlock);
687 }
688
689
690 int LZ4_compress_continue (void* LZ4_Data, const char* source, char* dest, int inputSize)
691 {
692 return LZ4_compress_generic(LZ4_Data, source, dest, inputSize, 0, notLimited, byU32, withPrefix);
693 }
694
695
696 int LZ4_compress_limitedOutput_continue (void* LZ4_Data, const char* source, char* dest, int inputSize, int maxOutputSize)
697 {
698 return LZ4_compress_generic(LZ4_Data, source, dest, inputSize, maxOutputSize, limited, byU32, withPrefix);
699 }
700
701
702 //****************************
703 // Decompression functions
704 //****************************
705
706 // This generic decompression function cover all use cases.
707 // It shall be instanciated several times, using different sets of directives
708 // Note that it is essential this generic function is really inlined,
709 // in order to remove useless branches during compilation optimisation.
1082 int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
1083 {
1084 LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict;
1085 const BYTE* previousDictEnd = dict->dictionary + dict->dictSize;
1086
1087 if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */
1088 if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
1089
1090 memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
1091
1092 dict->dictionary = (const BYTE*)safeBuffer;
1093 dict->dictSize = (U32)dictSize;
1094
1095 return dictSize;
1096 }
1097
1098
1099
1100 /*******************************
1101 * Decompression functions
1102 *******************************/
1103 /*
1104 * This generic decompression function cover all use cases.
1105 * It shall be instantiated several times, using different sets of directives
1106 * Note that it is essential this generic function is really inlined,
1107 * in order to remove useless branches during compilation optimization.
1108 */
7101109 FORCE_INLINE int LZ4_decompress_generic(
711 const char* source,
712 char* dest,
713 int inputSize, //
714 int outputSize, // If endOnInput==endOnInputSize, this value is the max size of Output Buffer.
715
716 int endOnInput, // endOnOutputSize, endOnInputSize
717 int prefix64k, // noPrefix, withPrefix
718 int partialDecoding, // full, partial
719 int targetOutputSize // only used if partialDecoding==partial
1110 const char* const source,
1111 char* const dest,
1112 int inputSize,
1113 int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
1114
1115 int endOnInput, /* endOnOutputSize, endOnInputSize */
1116 int partialDecoding, /* full, partial */
1117 int targetOutputSize, /* only used if partialDecoding==partial */
1118 int dict, /* noDict, withPrefix64k, usingExtDict */
1119 const BYTE* const lowPrefix, /* == dest if dict == noDict */
1120 const BYTE* const dictStart, /* only if dict==usingExtDict */
1121 const size_t dictSize /* note : = 0 if noDict */
7201122 )
7211123 {
722 // Local Variables
723 const BYTE* restrict ip = (const BYTE*) source;
724 const BYTE* ref;
1124 /* Local Variables */
1125 const BYTE* ip = (const BYTE*) source;
7251126 const BYTE* const iend = ip + inputSize;
7261127
7271128 BYTE* op = (BYTE*) dest;
7281129 BYTE* const oend = op + outputSize;
7291130 BYTE* cpy;
7301131 BYTE* oexit = op + targetOutputSize;
731
732 const size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; // static reduces speed for LZ4_decompress_safe() on GCC64
733 static const size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
734
735
736 // Special cases
737 if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; // targetOutputSize too high => decode everything
738 if ((endOnInput) && unlikely(outputSize==0)) return ((inputSize==1) && (*ip==0)) ? 0 : -1; // Empty output buffer
739 if ((!endOnInput) && unlikely(outputSize==0)) return (*ip==0?1:-1);
740
741
742 // Main Loop
1132 const BYTE* const lowLimit = lowPrefix - dictSize;
1133
1134 const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
1135 const size_t dec32table[] = {4, 1, 2, 1, 4, 4, 4, 4};
1136 const size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
1137
1138 const int safeDecode = (endOnInput==endOnInputSize);
1139 const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
1140
1141
1142 /* Special cases */
1143 if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */
1144 if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */
1145 if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
1146
1147
1148 /* Main Loop */
7431149 while (1)
7441150 {
7451151 unsigned token;
7461152 size_t length;
747
748 // get runlength
1153 const BYTE* match;
1154
1155 /* get literal length */
7491156 token = *ip++;
7501157 if ((length=(token>>ML_BITS)) == RUN_MASK)
7511158 {
752 unsigned s=255;
753 while (((endOnInput)?ip<iend:1) && (s==255))
1159 unsigned s;
1160 do
7541161 {
7551162 s = *ip++;
7561163 length += s;
7571164 }
758 }
759
760 // copy literals
1165 while (likely((endOnInput)?ip<iend-RUN_MASK:1) && (s==255));
1166 if ((safeDecode) && unlikely((size_t)(op+length)<(size_t)(op))) goto _output_error; /* overflow detection */
1167 if ((safeDecode) && unlikely((size_t)(ip+length)<(size_t)(ip))) goto _output_error; /* overflow detection */
1168 }
1169
1170 /* copy literals */
7611171 cpy = op+length;
7621172 if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
7631173 || ((!endOnInput) && (cpy>oend-COPYLENGTH)))
7641174 {
7651175 if (partialDecoding)
7661176 {
767 if (cpy > oend) goto _output_error; // Error : write attempt beyond end of output buffer
768 if ((endOnInput) && (ip+length > iend)) goto _output_error; // Error : read attempt beyond end of input buffer
1177 if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */
1178 if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */
7691179 }
7701180 else
7711181 {
772 if ((!endOnInput) && (cpy != oend)) goto _output_error; // Error : block decoding must stop exactly there
773 if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; // Error : input must be consumed
1182 if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */
1183 if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */
7741184 }
7751185 memcpy(op, ip, length);
7761186 ip += length;
7771187 op += length;
778 break; // Necessarily EOF, due to parsing restrictions
779 }
780 LZ4_WILDCOPY(op, ip, cpy); ip -= (op-cpy); op = cpy;
781
782 // get offset
783 LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
784 if ((prefix64k==noPrefix) && unlikely(ref < (BYTE* const)dest)) goto _output_error; // Error : offset outside destination buffer
785
786 // get matchlength
787 if ((length=(token&ML_MASK)) == ML_MASK)
788 {
789 while ((!endOnInput) || (ip<iend-(LASTLITERALS+1))) // Ensure enough bytes remain for LASTLITERALS + token
790 {
791 unsigned s = *ip++;
1188 break; /* Necessarily EOF, due to parsing restrictions */
1189 }
1190 LZ4_wildCopy(op, ip, cpy);
1191 ip += length; op = cpy;
1192
1193 /* get offset */
1194 match = cpy - LZ4_readLE16(ip); ip+=2;
1195 if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error; /* Error : offset outside destination buffer */
1196
1197 /* get matchlength */
1198 length = token & ML_MASK;
1199 if (length == ML_MASK)
1200 {
1201 unsigned s;
1202 do
1203 {
1204 if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
1205 s = *ip++;
7921206 length += s;
793 if (s==255) continue;
794 break;
1207 } while (s==255);
1208 if ((safeDecode) && unlikely((size_t)(op+length)<(size_t)op)) goto _output_error; /* overflow detection */
1209 }
1210 length += MINMATCH;
1211
1212 /* check external dictionary */
1213 if ((dict==usingExtDict) && (match < lowPrefix))
1214 {
1215 if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error; /* doesn't respect parsing restriction */
1216
1217 if (length <= (size_t)(lowPrefix-match))
1218 {
1219 /* match can be copied as a single segment from external dictionary */
1220 match = dictEnd - (lowPrefix-match);
1221 memmove(op, match, length); op += length;
7951222 }
796 }
797
798 // copy repeated sequence
799 if unlikely((op-ref)<(int)STEPSIZE)
800 {
801 const size_t dec64 = dec64table[(sizeof(void*)==4) ? 0 : op-ref];
802 op[0] = ref[0];
803 op[1] = ref[1];
804 op[2] = ref[2];
805 op[3] = ref[3];
806 op += 4, ref += 4; ref -= dec32table[op-ref];
807 A32(op) = A32(ref);
808 op += STEPSIZE-4; ref -= dec64;
809 } else { LZ4_COPYSTEP(op,ref); }
810 cpy = op + length - (STEPSIZE-4);
811
812 if unlikely(cpy>oend-COPYLENGTH-(STEPSIZE-4))
813 {
814 if (cpy > oend-LASTLITERALS) goto _output_error; // Error : last 5 bytes must be literals
815 LZ4_SECURECOPY(op, ref, (oend-COPYLENGTH));
816 while(op<cpy) *op++=*ref++;
817 op=cpy;
1223 else
1224 {
1225 /* match encompass external dictionary and current segment */
1226 size_t copySize = (size_t)(lowPrefix-match);
1227 memcpy(op, dictEnd - copySize, copySize);
1228 op += copySize;
1229 copySize = length - copySize;
1230 if (copySize > (size_t)(op-lowPrefix)) /* overlap within current segment */
1231 {
1232 BYTE* const endOfMatch = op + copySize;
1233 const BYTE* copyFrom = lowPrefix;
1234 while (op < endOfMatch) *op++ = *copyFrom++;
1235 }
1236 else
1237 {
1238 memcpy(op, lowPrefix, copySize);
1239 op += copySize;
1240 }
1241 }
8181242 continue;
8191243 }
820 LZ4_WILDCOPY(op, ref, cpy);
821 op=cpy; // correction
822 }
823
824 // end of decoding
1244
1245 /* copy repeated sequence */
1246 cpy = op + length;
1247 if (unlikely((op-match)<8))
1248 {
1249 const size_t dec64 = dec64table[op-match];
1250 op[0] = match[0];
1251 op[1] = match[1];
1252 op[2] = match[2];
1253 op[3] = match[3];
1254 match += dec32table[op-match];
1255 LZ4_copy4(op+4, match);
1256 op += 8; match -= dec64;
1257 } else { LZ4_copy8(op, match); op+=8; match+=8; }
1258
1259 if (unlikely(cpy>oend-12))
1260 {
1261 if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals */
1262 if (op < oend-8)
1263 {
1264 LZ4_wildCopy(op, match, oend-8);
1265 match += (oend-8) - op;
1266 op = oend-8;
1267 }
1268 while (op<cpy) *op++ = *match++;
1269 }
1270 else
1271 LZ4_wildCopy(op, match, cpy);
1272 op=cpy; /* correction */
1273 }
1274
1275 /* end of decoding */
8251276 if (endOnInput)
826 return (int) (((char*)op)-dest); // Nb of output bytes decoded
1277 return (int) (((char*)op)-dest); /* Nb of output bytes decoded */
8271278 else
828 return (int) (((char*)ip)-source); // Nb of input bytes read
829
830 // Overflow error detected
1279 return (int) (((const char*)ip)-source); /* Nb of input bytes read */
1280
1281 /* Overflow error detected */
8311282 _output_error:
832 return (int) (-(((char*)ip)-source))-1;
833 }
834
835
836 int LZ4_decompress_safe(const char* source, char* dest, int inputSize, int maxOutputSize)
837 {
838 return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, full, 0);
839 }
840
841 int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int inputSize, int maxOutputSize)
842 {
843 return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, withPrefix, full, 0);
844 }
845
846 int LZ4_decompress_safe_partial(const char* source, char* dest, int inputSize, int targetOutputSize, int maxOutputSize)
847 {
848 return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, partial, targetOutputSize);
849 }
850
851 int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int outputSize)
852 {
853 return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, withPrefix, full, 0);
854 }
855
856 int LZ4_decompress_fast(const char* source, char* dest, int outputSize)
857 {
858 #ifdef _MSC_VER // This version is faster with Visual
859 return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, noPrefix, full, 0);
860 #else
861 return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, withPrefix, full, 0);
862 #endif
863 }
864
1283 return (int) (-(((const char*)ip)-source))-1;
1284 }
1285
1286
1287 int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
1288 {
1289 return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0);
1290 }
1291
1292 int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
1293 {
1294 return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
1295 }
1296
1297 int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
1298 {
1299 return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
1300 }
1301
1302
1303 /* streaming decompression functions */
1304
1305 typedef struct
1306 {
1307 const BYTE* externalDict;
1308 size_t extDictSize;
1309 const BYTE* prefixEnd;
1310 size_t prefixSize;
1311 } LZ4_streamDecode_t_internal;
1312
1313 /*
1314 * If you prefer dynamic allocation methods,
1315 * LZ4_createStreamDecode()
1316 * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
1317 */
1318 LZ4_streamDecode_t* LZ4_createStreamDecode(void)
1319 {
1320 LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t));
1321 return lz4s;
1322 }
1323
1324 int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
1325 {
1326 FREEMEM(LZ4_stream);
1327 return 0;
1328 }
1329
1330 /*
1331 * LZ4_setStreamDecode
1332 * Use this function to instruct where to find the dictionary
1333 * This function is not necessary if previous data is still available where it was decoded.
1334 * Loading a size of 0 is allowed (same effect as no dictionary).
1335 * Return : 1 if OK, 0 if error
1336 */
1337 int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
1338 {
1339 LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1340 lz4sd->prefixSize = (size_t) dictSize;
1341 lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
1342 lz4sd->externalDict = NULL;
1343 lz4sd->extDictSize = 0;
1344 return 1;
1345 }
1346
1347 /*
1348 *_continue() :
1349 These decoding functions allow decompression of multiple blocks in "streaming" mode.
1350 Previously decoded blocks must still be available at the memory position where they were decoded.
1351 If it's not possible, save the relevant part of decoded data into a safe buffer,
1352 and indicate where it stands using LZ4_setStreamDecode()
1353 */
1354 int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
1355 {
1356 LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1357 int result;
1358
1359 if (lz4sd->prefixEnd == (BYTE*)dest)
1360 {
1361 result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1362 endOnInputSize, full, 0,
1363 usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1364 if (result <= 0) return result;
1365 lz4sd->prefixSize += result;
1366 lz4sd->prefixEnd += result;
1367 }
1368 else
1369 {
1370 lz4sd->extDictSize = lz4sd->prefixSize;
1371 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1372 result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1373 endOnInputSize, full, 0,
1374 usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1375 if (result <= 0) return result;
1376 lz4sd->prefixSize = result;
1377 lz4sd->prefixEnd = (BYTE*)dest + result;
1378 }
1379
1380 return result;
1381 }
1382
1383 int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
1384 {
1385 LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1386 int result;
1387
1388 if (lz4sd->prefixEnd == (BYTE*)dest)
1389 {
1390 result = LZ4_decompress_generic(source, dest, 0, originalSize,
1391 endOnOutputSize, full, 0,
1392 usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1393 if (result <= 0) return result;
1394 lz4sd->prefixSize += originalSize;
1395 lz4sd->prefixEnd += originalSize;
1396 }
1397 else
1398 {
1399 lz4sd->extDictSize = lz4sd->prefixSize;
1400 lz4sd->externalDict = (BYTE*)dest - lz4sd->extDictSize;
1401 result = LZ4_decompress_generic(source, dest, 0, originalSize,
1402 endOnOutputSize, full, 0,
1403 usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1404 if (result <= 0) return result;
1405 lz4sd->prefixSize = originalSize;
1406 lz4sd->prefixEnd = (BYTE*)dest + originalSize;
1407 }
1408
1409 return result;
1410 }
1411
1412
1413 /*
1414 Advanced decoding functions :
1415 *_usingDict() :
1416 These decoding functions work the same as "_continue" ones,
1417 the dictionary must be explicitly provided within parameters
1418 */
1419
1420 FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize)
1421 {
1422 if (dictSize==0)
1423 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
1424 if (dictStart+dictSize == dest)
1425 {
1426 if (dictSize >= (int)(64 KB - 1))
1427 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0);
1428 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0);
1429 }
1430 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1431 }
1432
1433 int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1434 {
1435 return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
1436 }
1437
1438 int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
1439 {
1440 return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
1441 }
1442
1443 /* debug function */
1444 int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1445 {
1446 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1447 }
1448
1449
1450 /***************************************************
1451 * Obsolete Functions
1452 ***************************************************/
1453 /* obsolete compression functions */
1454 int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); }
1455 int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); }
1456 int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); }
1457 int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); }
1458 int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); }
1459 int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); }
1460
1461 /*
1462 These function names are deprecated and should no longer be used.
1463 They are only provided here for compatibility with older user programs.
1464 - LZ4_uncompress is totally equivalent to LZ4_decompress_fast
1465 - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
1466 */
1467 int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
1468 int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
1469
1470
1471 /* Obsolete Streaming functions */
1472
1473 int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
1474
1475 static void LZ4_init(LZ4_stream_t_internal* lz4ds, BYTE* base)
1476 {
1477 MEM_INIT(lz4ds, 0, LZ4_STREAMSIZE);
1478 lz4ds->bufferStart = base;
1479 }
1480
1481 int LZ4_resetStreamState(void* state, char* inputBuffer)
1482 {
1483 if ((((size_t)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
1484 LZ4_init((LZ4_stream_t_internal*)state, (BYTE*)inputBuffer);
1485 return 0;
1486 }
1487
1488 void* LZ4_create (char* inputBuffer)
1489 {
1490 void* lz4ds = ALLOCATOR(8, LZ4_STREAMSIZE_U64);
1491 LZ4_init ((LZ4_stream_t_internal*)lz4ds, (BYTE*)inputBuffer);
1492 return lz4ds;
1493 }
1494
1495 char* LZ4_slideInputBuffer (void* LZ4_Data)
1496 {
1497 LZ4_stream_t_internal* ctx = (LZ4_stream_t_internal*)LZ4_Data;
1498 int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
1499 return (char*)(ctx->bufferStart + dictSize);
1500 }
1501
1502 /* Obsolete streaming decompression functions */
1503
1504 int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
1505 {
1506 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1507 }
1508
1509 int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
1510 {
1511 return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1512 }
1513
1514 #endif /* LZ4_COMMONDEFS_ONLY */
1515
00 /*
11 LZ4 - Fast LZ compression algorithm
22 Header File
3 Copyright (C) 2011-2013, Yann Collet.
3 Copyright (C) 2011-2015, Yann Collet.
4
45 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
56
67 Redistribution and use in source and binary forms, with or without
2728 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2829
2930 You can contact the author at :
30 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
31 - LZ4 source repository : http://code.google.com/p/lz4/
31 - LZ4 source repository : https://github.com/Cyan4973/lz4
32 - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
3233 */
3334 #pragma once
3435
3637 extern "C" {
3738 #endif
3839
39
40 //**************************************
41 // Compiler Options
42 //**************************************
43 #if defined(_MSC_VER) && !defined(__cplusplus) // Visual Studio
44 # define inline __inline // Visual C is not C99, but supports some kind of inline
45 #endif
46
47
48 //****************************
49 // Simple Functions
50 //****************************
51
52 int LZ4_compress (const char* source, char* dest, int inputSize);
53 int LZ4_decompress_safe (const char* source, char* dest, int inputSize, int maxOutputSize);
54
55 /*
56 LZ4_compress() :
57 Compresses 'inputSize' bytes from 'source' into 'dest'.
58 Destination buffer must be already allocated,
59 and must be sized to handle worst cases situations (input data not compressible)
60 Worst case size evaluation is provided by function LZ4_compressBound()
61 inputSize : Max supported value is LZ4_MAX_INPUT_VALUE
62 return : the number of bytes written in buffer dest
63 or 0 if the compression fails
40 /*
41 * lz4.h provides block compression functions, and gives full buffer control to programmer.
42 * If you need to generate inter-operable compressed data (respecting LZ4 frame specification),
43 * and can let the library handle its own memory, please use lz4frame.h instead.
44 */
45
46 /**************************************
47 * Version
48 **************************************/
49 #define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
50 #define LZ4_VERSION_MINOR 7 /* for new (non-breaking) interface capabilities */
51 #define LZ4_VERSION_RELEASE 1 /* for tweaks, bug-fixes, or development */
52 #define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
53 int LZ4_versionNumber (void);
54
55 /**************************************
56 * Tuning parameter
57 **************************************/
58 /*
59 * LZ4_MEMORY_USAGE :
60 * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
61 * Increasing memory usage improves compression ratio
62 * Reduced memory usage can improve speed, due to cache effect
63 * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
64 */
65 #define LZ4_MEMORY_USAGE 14
66
67
68 /**************************************
69 * Simple Functions
70 **************************************/
71
72 int LZ4_compress_default(const char* source, char* dest, int sourceSize, int maxDestSize);
73 int LZ4_decompress_safe (const char* source, char* dest, int compressedSize, int maxDecompressedSize);
74
75 /*
76 LZ4_compress_default() :
77 Compresses 'sourceSize' bytes from buffer 'source'
78 into already allocated 'dest' buffer of size 'maxDestSize'.
79 Compression is guaranteed to succeed if 'maxDestSize' >= LZ4_compressBound(sourceSize).
80 It also runs faster, so it's a recommended setting.
81 If the function cannot compress 'source' into a more limited 'dest' budget,
82 compression stops *immediately*, and the function result is zero.
83 As a consequence, 'dest' content is not valid.
84 This function never writes outside 'dest' buffer, nor read outside 'source' buffer.
85 sourceSize : Max supported value is LZ4_MAX_INPUT_VALUE
86 maxDestSize : full or partial size of buffer 'dest' (which must be already allocated)
87 return : the number of bytes written into buffer 'dest' (necessarily <= maxOutputSize)
88 or 0 if compression fails
6489
6590 LZ4_decompress_safe() :
66 maxOutputSize : is the size of the destination buffer (which must be already allocated)
67 return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize)
91 compressedSize : is the precise full size of the compressed block.
92 maxDecompressedSize : is the size of destination buffer, which must be already allocated.
93 return : the number of bytes decompressed into destination buffer (necessarily <= maxDecompressedSize)
94 If destination buffer is not large enough, decoding will stop and output an error code (<0).
6895 If the source stream is detected malformed, the function will stop decoding and return a negative result.
69 This function is protected against buffer overflow exploits (never writes outside of output buffer, and never reads outside of input buffer). Therefore, it is protected against malicious data packets
70 */
71
72
73 //****************************
74 // Advanced Functions
75 //****************************
76 #define LZ4_MAX_INPUT_SIZE 0x7E000000 // 2 113 929 216 bytes
77 #define LZ4_COMPRESSBOUND(isize) ((unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
78 static inline int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
96 This function is protected against buffer overflow exploits, including malicious data packets.
97 It never writes outside output buffer, nor reads outside input buffer.
98 */
99
100
101 /**************************************
102 * Advanced Functions
103 **************************************/
104 #define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
105 #define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
79106
80107 /*
81108 LZ4_compressBound() :
82 Provides the maximum size that LZ4 may output in a "worst case" scenario (input data not compressible)
83 primarily useful for memory allocation of output buffer.
84 inline function is recommended for the general case,
85 macro is also provided when result needs to be evaluated at compilation (such as stack memory allocation).
86
87 isize : is the input size. Max supported value is LZ4_MAX_INPUT_SIZE
88 return : maximum output size in a "worst case" scenario
89 or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE)
90 */
91
92
93 int LZ4_compress_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize);
94
95 /*
96 LZ4_compress_limitedOutput() :
97 Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
98 If it cannot achieve it, compression will stop, and result of the function will be zero.
99 This function never writes outside of provided output buffer.
100
101 inputSize : Max supported value is LZ4_MAX_INPUT_VALUE
102 maxOutputSize : is the size of the destination buffer (which must be already allocated)
103 return : the number of bytes written in buffer 'dest'
104 or 0 if the compression fails
105 */
106
107
108 int LZ4_decompress_fast (const char* source, char* dest, int outputSize);
109 Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible)
110 This function is primarily useful for memory allocation purposes (destination buffer size).
111 Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example).
112 Note that LZ4_compress_default() compress faster when dest buffer size is >= LZ4_compressBound(srcSize)
113 inputSize : max supported value is LZ4_MAX_INPUT_SIZE
114 return : maximum output size in a "worst case" scenario
115 or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE)
116 */
117 int LZ4_compressBound(int inputSize);
118
119 /*
120 LZ4_compress_fast() :
121 Same as LZ4_compress_default(), but allows to select an "acceleration" factor.
122 The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
123 It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
124 An acceleration value of "1" is the same as regular LZ4_compress_default()
125 Values <= 0 will be replaced by ACCELERATION_DEFAULT (see lz4.c), which is 1.
126 */
127 int LZ4_compress_fast (const char* source, char* dest, int sourceSize, int maxDestSize, int acceleration);
128
129
130 /*
131 LZ4_compress_fast_extState() :
132 Same compression function, just using an externally allocated memory space to store compression state.
133 Use LZ4_sizeofState() to know how much memory must be allocated,
134 and allocate it on 8-bytes boundaries (using malloc() typically).
135 Then, provide it as 'void* state' to compression function.
136 */
137 int LZ4_sizeofState(void);
138 int LZ4_compress_fast_extState (void* state, const char* source, char* dest, int inputSize, int maxDestSize, int acceleration);
139
140
141 /*
142 LZ4_compress_destSize() :
143 Reverse the logic, by compressing as much data as possible from 'source' buffer
144 into already allocated buffer 'dest' of size 'targetDestSize'.
145 This function either compresses the entire 'source' content into 'dest' if it's large enough,
146 or fill 'dest' buffer completely with as much data as possible from 'source'.
147 *sourceSizePtr : will be modified to indicate how many bytes where read from 'source' to fill 'dest'.
148 New value is necessarily <= old value.
149 return : Nb bytes written into 'dest' (necessarily <= targetDestSize)
150 or 0 if compression fails
151 */
152 int LZ4_compress_destSize (const char* source, char* dest, int* sourceSizePtr, int targetDestSize);
153
109154
110155 /*
111156 LZ4_decompress_fast() :
112 outputSize : is the original (uncompressed) size
157 originalSize : is the original and therefore uncompressed size
113158 return : the number of bytes read from the source buffer (in other words, the compressed size)
114 If the source stream is malformed, the function will stop decoding and return a negative result.
115 note : This function is a bit faster than LZ4_decompress_safe()
116 This function never writes outside of output buffers, but may read beyond input buffer in case of malicious data packet.
117 Use this function preferably into a trusted environment (data to decode comes from a trusted source).
118 Destination buffer must be already allocated. Its size must be a minimum of 'outputSize' bytes.
119 */
120
121 int LZ4_decompress_safe_partial (const char* source, char* dest, int inputSize, int targetOutputSize, int maxOutputSize);
159 If the source stream is detected malformed, the function will stop decoding and return a negative result.
160 Destination buffer must be already allocated. Its size must be a minimum of 'originalSize' bytes.
161 note : This function fully respect memory boundaries for properly formed compressed data.
162 It is a bit faster than LZ4_decompress_safe().
163 However, it does not provide any protection against intentionally modified data stream (malicious input).
164 Use this function in trusted environment only (data to decode comes from a trusted source).
165 */
166 int LZ4_decompress_fast (const char* source, char* dest, int originalSize);
122167
123168 /*
124169 LZ4_decompress_safe_partial() :
125 This function decompress a compressed block of size 'inputSize' at position 'source'
126 into output buffer 'dest' of size 'maxOutputSize'.
170 This function decompress a compressed block of size 'compressedSize' at position 'source'
171 into destination buffer 'dest' of size 'maxDecompressedSize'.
127172 The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached,
128173 reducing decompression time.
129 return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize)
174 return : the number of bytes decoded in the destination buffer (necessarily <= maxDecompressedSize)
130175 Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller.
131176 Always control how many bytes were decoded.
132177 If the source stream is detected malformed, the function will stop decoding and return a negative result.
133178 This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets
134179 */
135
136
137 //*****************************
138 // Using an external allocation
139 //*****************************
140 int LZ4_sizeofState();
180 int LZ4_decompress_safe_partial (const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize);
181
182
183 /***********************************************
184 * Streaming Compression Functions
185 ***********************************************/
186 #define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4)
187 #define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(long long))
188 /*
189 * LZ4_stream_t
190 * information structure to track an LZ4 stream.
191 * important : init this structure content before first use !
192 * note : only allocated directly the structure if you are statically linking LZ4
193 * If you are using liblz4 as a DLL, please use below construction methods instead.
194 */
195 typedef struct { long long table[LZ4_STREAMSIZE_U64]; } LZ4_stream_t;
196
197 /*
198 * LZ4_resetStream
199 * Use this function to init an allocated LZ4_stream_t structure
200 */
201 void LZ4_resetStream (LZ4_stream_t* streamPtr);
202
203 /*
204 * LZ4_createStream will allocate and initialize an LZ4_stream_t structure
205 * LZ4_freeStream releases its memory.
206 * In the context of a DLL (liblz4), please use these methods rather than the static struct.
207 * They are more future proof, in case of a change of LZ4_stream_t size.
208 */
209 LZ4_stream_t* LZ4_createStream(void);
210 int LZ4_freeStream (LZ4_stream_t* streamPtr);
211
212 /*
213 * LZ4_loadDict
214 * Use this function to load a static dictionary into LZ4_stream.
215 * Any previous data will be forgotten, only 'dictionary' will remain in memory.
216 * Loading a size of 0 is allowed.
217 * Return : dictionary size, in bytes (necessarily <= 64 KB)
218 */
219 int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
220
221 /*
222 * LZ4_compress_fast_continue
223 * Compress buffer content 'src', using data from previously compressed blocks as dictionary to improve compression ratio.
224 * Important : Previous data blocks are assumed to still be present and unmodified !
225 * 'dst' buffer must be already allocated.
226 * If maxDstSize >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
227 * If not, and if compressed data cannot fit into 'dst' buffer size, compression stops, and function returns a zero.
228 */
229 int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int maxDstSize, int acceleration);
230
231 /*
232 * LZ4_saveDict
233 * If previously compressed data block is not guaranteed to remain available at its memory location
234 * save it into a safer place (char* safeBuffer)
235 * Note : you don't need to call LZ4_loadDict() afterwards,
236 * dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue()
237 * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error
238 */
239 int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int dictSize);
240
241
242 /************************************************
243 * Streaming Decompression Functions
244 ************************************************/
245
246 #define LZ4_STREAMDECODESIZE_U64 4
247 #define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long))
248 typedef struct { unsigned long long table[LZ4_STREAMDECODESIZE_U64]; } LZ4_streamDecode_t;
249 /*
250 * LZ4_streamDecode_t
251 * information structure to track an LZ4 stream.
252 * init this structure content using LZ4_setStreamDecode or memset() before first use !
253 *
254 * In the context of a DLL (liblz4) please prefer usage of construction methods below.
255 * They are more future proof, in case of a change of LZ4_streamDecode_t size in the future.
256 * LZ4_createStreamDecode will allocate and initialize an LZ4_streamDecode_t structure
257 * LZ4_freeStreamDecode releases its memory.
258 */
259 LZ4_streamDecode_t* LZ4_createStreamDecode(void);
260 int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream);
261
262 /*
263 * LZ4_setStreamDecode
264 * Use this function to instruct where to find the dictionary.
265 * Setting a size of 0 is allowed (same effect as reset).
266 * Return : 1 if OK, 0 if error
267 */
268 int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize);
269
270 /*
271 *_continue() :
272 These decoding functions allow decompression of multiple blocks in "streaming" mode.
273 Previously decoded blocks *must* remain available at the memory position where they were decoded (up to 64 KB)
274 In the case of a ring buffers, decoding buffer must be either :
275 - Exactly same size as encoding buffer, with same update rule (block boundaries at same positions)
276 In which case, the decoding & encoding ring buffer can have any size, including very small ones ( < 64 KB).
277 - Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
278 maxBlockSize is implementation dependent. It's the maximum size you intend to compress into a single block.
279 In which case, encoding and decoding buffers do not need to be synchronized,
280 and encoding ring buffer can have any size, including small ones ( < 64 KB).
281 - _At least_ 64 KB + 8 bytes + maxBlockSize.
282 In which case, encoding and decoding buffers do not need to be synchronized,
283 and encoding ring buffer can have any size, including larger than decoding buffer.
284 Whenever these conditions are not possible, save the last 64KB of decoded data into a safe buffer,
285 and indicate where it is saved using LZ4_setStreamDecode()
286 */
287 int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxDecompressedSize);
288 int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize);
289
290
291 /*
292 Advanced decoding functions :
293 *_usingDict() :
294 These decoding functions work the same as
295 a combination of LZ4_setStreamDecode() followed by LZ4_decompress_x_continue()
296 They are stand-alone. They don't need nor update an LZ4_streamDecode_t structure.
297 */
298 int LZ4_decompress_safe_usingDict (const char* source, char* dest, int compressedSize, int maxDecompressedSize, const char* dictStart, int dictSize);
299 int LZ4_decompress_fast_usingDict (const char* source, char* dest, int originalSize, const char* dictStart, int dictSize);
300
301
302
303 /**************************************
304 * Obsolete Functions
305 **************************************/
306 /* Deprecate Warnings */
307 /* Should these warnings messages be a problem,
308 it is generally possible to disable them,
309 with -Wno-deprecated-declarations for gcc
310 or _CRT_SECURE_NO_WARNINGS in Visual for example.
311 You can also define LZ4_DEPRECATE_WARNING_DEFBLOCK. */
312 #ifndef LZ4_DEPRECATE_WARNING_DEFBLOCK
313 # define LZ4_DEPRECATE_WARNING_DEFBLOCK
314 # define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
315 # if (LZ4_GCC_VERSION >= 405) || defined(__clang__)
316 # define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
317 # elif (LZ4_GCC_VERSION >= 301)
318 # define LZ4_DEPRECATED(message) __attribute__((deprecated))
319 # elif defined(_MSC_VER)
320 # define LZ4_DEPRECATED(message) __declspec(deprecated(message))
321 # else
322 # pragma message("WARNING: You need to implement LZ4_DEPRECATED for this compiler")
323 # define LZ4_DEPRECATED(message)
324 # endif
325 #endif /* LZ4_DEPRECATE_WARNING_DEFBLOCK */
326
327 /* Obsolete compression functions */
328 /* These functions are planned to start generate warnings by r131 approximately */
329 int LZ4_compress (const char* source, char* dest, int sourceSize);
330 int LZ4_compress_limitedOutput (const char* source, char* dest, int sourceSize, int maxOutputSize);
141331 int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
142332 int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
143
144 /*
145 These functions are provided should you prefer to allocate memory for compression tables with your own allocation methods.
146 To know how much memory must be allocated for the compression tables, use :
147 int LZ4_sizeofState();
148
149 Note that tables must be aligned on 4-bytes boundaries, otherwise compression will fail (return code 0).
150
151 The allocated memory can be provided to the compressions functions using 'void* state' parameter.
152 LZ4_compress_withState() and LZ4_compress_limitedOutput_withState() are equivalent to previously described functions.
153 They just use the externally allocated memory area instead of allocating their own (on stack, or on heap).
154 */
155
156
157 //****************************
158 // Streaming Functions
159 //****************************
160
161 void* LZ4_create (const char* inputBuffer);
162 int LZ4_compress_continue (void* LZ4_Data, const char* source, char* dest, int inputSize);
163 int LZ4_compress_limitedOutput_continue (void* LZ4_Data, const char* source, char* dest, int inputSize, int maxOutputSize);
164 char* LZ4_slideInputBuffer (void* LZ4_Data);
165 int LZ4_free (void* LZ4_Data);
166
167 /*
168 These functions allow the compression of dependent blocks, where each block benefits from prior 64 KB within preceding blocks.
169 In order to achieve this, it is necessary to start creating the LZ4 Data Structure, thanks to the function :
170
171 void* LZ4_create (const char* inputBuffer);
172 The result of the function is the (void*) pointer on the LZ4 Data Structure.
173 This pointer will be needed in all other functions.
174 If the pointer returned is NULL, then the allocation has failed, and compression must be aborted.
175 The only parameter 'const char* inputBuffer' must, obviously, point at the beginning of input buffer.
176 The input buffer must be already allocated, and size at least 192KB.
177 'inputBuffer' will also be the 'const char* source' of the first block.
178
179 All blocks are expected to lay next to each other within the input buffer, starting from 'inputBuffer'.
180 To compress each block, use either LZ4_compress_continue() or LZ4_compress_limitedOutput_continue().
181 Their behavior are identical to LZ4_compress() or LZ4_compress_limitedOutput(),
182 but require the LZ4 Data Structure as their first argument, and check that each block starts right after the previous one.
183 If next block does not begin immediately after the previous one, the compression will fail (return 0).
184
185 When it's no longer possible to lay the next block after the previous one (not enough space left into input buffer), a call to :
186 char* LZ4_slideInputBuffer(void* LZ4_Data);
187 must be performed. It will typically copy the latest 64KB of input at the beginning of input buffer.
188 Note that, for this function to work properly, minimum size of an input buffer must be 192KB.
189 ==> The memory position where the next input data block must start is provided as the result of the function.
190
191 Compression can then resume, using LZ4_compress_continue() or LZ4_compress_limitedOutput_continue(), as usual.
192
193 When compression is completed, a call to LZ4_free() will release the memory used by the LZ4 Data Structure.
194 */
195
196 int LZ4_sizeofStreamState();
197 int LZ4_resetStreamState(void* state, const char* inputBuffer);
198
199 /*
200 These functions achieve the same result as :
201 void* LZ4_create (const char* inputBuffer);
202
203 They are provided here to allow the user program to allocate memory using its own routines.
204
205 To know how much space must be allocated, use LZ4_sizeofStreamState();
206 Note also that space must be 4-bytes aligned.
207
208 Once space is allocated, you must initialize it using : LZ4_resetStreamState(void* state, const char* inputBuffer);
209 void* state is a pointer to the space allocated.
210 It must be aligned on 4-bytes boundaries, and be large enough.
211 The parameter 'const char* inputBuffer' must, obviously, point at the beginning of input buffer.
212 The input buffer must be already allocated, and size at least 192KB.
213 'inputBuffer' will also be the 'const char* source' of the first block.
214
215 The same space can be re-used multiple times, just by initializing it each time with LZ4_resetStreamState().
216 return value of LZ4_resetStreamState() must be 0 is OK.
217 Any other value means there was an error (typically, pointer is not aligned on 4-bytes boundaries).
218 */
219
220
221 int LZ4_decompress_safe_withPrefix64k (const char* source, char* dest, int inputSize, int maxOutputSize);
222 int LZ4_decompress_fast_withPrefix64k (const char* source, char* dest, int outputSize);
223
224 /*
225 *_withPrefix64k() :
226 These decoding functions work the same as their "normal name" versions,
227 but can use up to 64KB of data in front of 'char* dest'.
228 These functions are necessary to decode inter-dependant blocks.
229 */
230
231
232 //****************************
233 // Obsolete Functions
234 //****************************
235
236 static inline int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
237 static inline int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
238
239 /*
240 These functions are deprecated and should no longer be used.
241 They are provided here for compatibility with existing user programs.
242 */
243
333 int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
334 int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
335
336 /* Obsolete decompression functions */
337 /* These function names are completely deprecated and must no longer be used.
338 They are only provided here for compatibility with older programs.
339 - LZ4_uncompress is the same as LZ4_decompress_fast
340 - LZ4_uncompress_unknownOutputSize is the same as LZ4_decompress_safe
341 These function prototypes are now disabled; uncomment them only if you really need them.
342 It is highly recommended to stop using these prototypes and migrate to maintained ones */
343 /* int LZ4_uncompress (const char* source, char* dest, int outputSize); */
344 /* int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); */
345
346 /* Obsolete streaming functions; use new streaming interface whenever possible */
347 LZ4_DEPRECATED("use LZ4_createStream() instead") void* LZ4_create (char* inputBuffer);
348 LZ4_DEPRECATED("use LZ4_createStream() instead") int LZ4_sizeofStreamState(void);
349 LZ4_DEPRECATED("use LZ4_resetStream() instead") int LZ4_resetStreamState(void* state, char* inputBuffer);
350 LZ4_DEPRECATED("use LZ4_saveDict() instead") char* LZ4_slideInputBuffer (void* state);
351
352 /* Obsolete streaming decoding functions */
353 LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
354 LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
244355
245356
246357 #if defined (__cplusplus)
641641 {
642642 *bitpointer = ((currentByte & (1 << bit)) ? photometric : 1 - photometric);
643643 ++bitpointer;
644 if (byte * 8 + 7 - bit == width - 1) break;
644 // NOTE: byte * 8 + 7 - bit is promoted to int by C++'s promotion rules.
645 // The cast to unsigned should be safe given the bounds of the looping
646 // variables bit and bytes.
647 if (static_cast<unsigned>(byte * 8 + 7 - bit) == width - 1) break;
645648 }
646649 }
647650 // XXX probably right
22 INCLUDE(VigraAddTest)
33
44 ADD_SUBDIRECTORY(adjacency_list_graph)
5 ADD_SUBDIRECTORY(binary_forest)
56 ADD_SUBDIRECTORY(blockwisealgorithms)
67 ADD_SUBDIRECTORY(classifier)
78 ADD_SUBDIRECTORY(colorspaces)
1213 ADD_SUBDIRECTORY(delegates)
1314 ADD_SUBDIRECTORY(error)
1415 ADD_SUBDIRECTORY(features)
16 ADD_SUBDIRECTORY(filter_iterator)
1517 ADD_SUBDIRECTORY(filters)
1618 ADD_SUBDIRECTORY(fourier)
1719 ADD_SUBDIRECTORY(functorexpression)
3133 ADD_SUBDIRECTORY(multimorphology)
3234 ADD_SUBDIRECTORY(objectfeatures)
3335 ADD_SUBDIRECTORY(optimization)
36 ADD_SUBDIREcTORY(permutation)
3437 ADD_SUBDIRECTORY(pixeltypes)
3538 ADD_SUBDIRECTORY(polygon)
39 ADD_SUBDIRECTORY(polytope)
40 ADD_SUBDIRECTORY(random_forest_3)
3641 ADD_SUBDIRECTORY(registration)
3742 ADD_SUBDIRECTORY(sampler)
3843 ADD_SUBDIRECTORY(seededRegionGrowing3d)
406406
407407
408408 std::vector<Edge> edgeVec(begin,invalid);
409 shouldEqual(4,edgeVec.size());
409 shouldEqual(4u,edgeVec.size());
410410 shouldEqual(0,g.id(edgeVec[0]));
411411 shouldEqual(1,g.id(edgeVec[1]));
412412 shouldEqual(2,g.id(edgeVec[2]));
418418
419419 EdgeIt empty;
420420 std::vector<Edge> edgeVec(begin,empty);
421 shouldEqual(4,edgeVec.size());
421 shouldEqual(4u,edgeVec.size());
422422 shouldEqual(0,g.id(edgeVec[0]));
423423 shouldEqual(1,g.id(edgeVec[1]));
424424 shouldEqual(2,g.id(edgeVec[2]));
430430
431431 EdgeIt empty;
432432 std::vector<Edge> edgeVec(begin,empty);
433 shouldEqual(3,edgeVec.size());
433 shouldEqual(3u,edgeVec.size());
434434 shouldEqual(1,g.id(edgeVec[0]));
435435 shouldEqual(2,g.id(edgeVec[1]));
436436 shouldEqual(3,g.id(edgeVec[2]));
445445
446446 shouldEqual(std::distance(begin,end),1);
447447 std::vector<Edge> edgeVec(begin,end);
448 shouldEqual(1,edgeVec.size());
448 shouldEqual(1u,edgeVec.size());
449449 shouldEqual(1,g.id(edgeVec[0]));
450450 }
451451
457457 should(end!=lemon::INVALID);
458458
459459 std::vector<Edge> edgeVec(begin,end);
460 shouldEqual(2,edgeVec.size());
460 shouldEqual(2u,edgeVec.size());
461461 shouldEqual(1,g.id(edgeVec[0]));
462462 shouldEqual(2,g.id(edgeVec[1]));
463463 }
490490
491491
492492 std::vector<Edge> edgeVec(begin,invalid);
493 shouldEqual(4,edgeVec.size());
493 shouldEqual(4u,edgeVec.size());
494494 shouldEqual(0,g.id(edgeVec[0]));
495495 shouldEqual(1,g.id(edgeVec[1]));
496496 shouldEqual(2,g.id(edgeVec[2]));
502502
503503 EdgeIt empty;
504504 std::vector<Edge> edgeVec(begin,empty);
505 shouldEqual(4,edgeVec.size());
505 shouldEqual(4u,edgeVec.size());
506506 shouldEqual(0,g.id(edgeVec[0]));
507507 shouldEqual(1,g.id(edgeVec[1]));
508508 shouldEqual(2,g.id(edgeVec[2]));
514514
515515 EdgeIt empty;
516516 std::vector<Edge> edgeVec(begin,empty);
517 shouldEqual(3,edgeVec.size());
517 shouldEqual(3u,edgeVec.size());
518518 shouldEqual(1,g.id(edgeVec[0]));
519519 shouldEqual(2,g.id(edgeVec[1]));
520520 shouldEqual(3,g.id(edgeVec[2]));
529529
530530 shouldEqual(std::distance(begin,end),1);
531531 std::vector<Edge> edgeVec(begin,end);
532 shouldEqual(1,edgeVec.size());
532 shouldEqual(1u,edgeVec.size());
533533 shouldEqual(1,g.id(edgeVec[0]));
534534 }
535535
541541 should(end!=lemon::INVALID);
542542
543543 std::vector<Edge> edgeVec(begin,end);
544 shouldEqual(2,edgeVec.size());
544 shouldEqual(2u,edgeVec.size());
545545 shouldEqual(1,g.id(edgeVec[0]));
546546 shouldEqual(2,g.id(edgeVec[1]));
547547 }
572572
573573 should(begin!=lemon::INVALID);
574574 std::vector<Node> nodeVec(begin,invalid);
575 shouldEqual(4,nodeVec.size());
575 shouldEqual(4u,nodeVec.size());
576576 shouldEqual(0,g.id(nodeVec[0]));
577577 shouldEqual(1,g.id(nodeVec[1]));
578578 shouldEqual(2,g.id(nodeVec[2]));
584584
585585 NodeIt empty;
586586 std::vector<Node> nodeVec(begin,empty);
587 shouldEqual(4,nodeVec.size());
587 shouldEqual(4u,nodeVec.size());
588588 shouldEqual(0,g.id(nodeVec[0]));
589589 shouldEqual(1,g.id(nodeVec[1]));
590590 shouldEqual(2,g.id(nodeVec[2]));
596596
597597 NodeIt empty;
598598 std::vector<Node> nodeVec(begin,empty);
599 shouldEqual(3,nodeVec.size());
599 shouldEqual(3u,nodeVec.size());
600600 shouldEqual(1,g.id(nodeVec[0]));
601601 shouldEqual(2,g.id(nodeVec[1]));
602602 shouldEqual(3,g.id(nodeVec[2]));
611611
612612 shouldEqual(std::distance(begin,end),1);
613613 std::vector<Node> nodeVec(begin,end);
614 shouldEqual(1,nodeVec.size());
614 shouldEqual(1u,nodeVec.size());
615615 shouldEqual(1,g.id(nodeVec[0]));
616616 }
617617
623623 should(end!=lemon::INVALID);
624624
625625 std::vector<Node> nodeVec(begin,end);
626 shouldEqual(2,nodeVec.size());
626 shouldEqual(2u,nodeVec.size());
627627 shouldEqual(1,g.id(nodeVec[0]));
628628 shouldEqual(2,g.id(nodeVec[1]));
629629 }
660660
661661 should(begin!=lemon::INVALID);
662662 std::vector<Node> nodeVec(begin,invalid);
663 shouldEqual(4,nodeVec.size());
663 shouldEqual(4u,nodeVec.size());
664664 shouldEqual(1,g.id(nodeVec[0]));
665665 shouldEqual(2,g.id(nodeVec[1]));
666666 shouldEqual(3,g.id(nodeVec[2]));
672672
673673 NodeIt empty;
674674 std::vector<Node> nodeVec(begin,empty);
675 shouldEqual(4,nodeVec.size());
675 shouldEqual(4u,nodeVec.size());
676676 shouldEqual(1,g.id(nodeVec[0]));
677677 shouldEqual(2,g.id(nodeVec[1]));
678678 shouldEqual(3,g.id(nodeVec[2]));
684684
685685 NodeIt empty;
686686 std::vector<Node> nodeVec(begin,empty);
687 shouldEqual(3,nodeVec.size());
687 shouldEqual(3u,nodeVec.size());
688688 shouldEqual(2,g.id(nodeVec[0]));
689689 shouldEqual(3,g.id(nodeVec[1]));
690690 shouldEqual(4,g.id(nodeVec[2]));
699699
700700 shouldEqual(std::distance(begin,end),1);
701701 std::vector<Node> nodeVec(begin,end);
702 shouldEqual(1,nodeVec.size());
702 shouldEqual(1u,nodeVec.size());
703703 shouldEqual(2,g.id(nodeVec[0]));
704704 }
705705
711711 should(end!=lemon::INVALID);
712712
713713 std::vector<Node> nodeVec(begin,end);
714 shouldEqual(2,nodeVec.size());
714 shouldEqual(2u,nodeVec.size());
715715 shouldEqual(2,g.id(nodeVec[0]));
716716 shouldEqual(3,g.id(nodeVec[1]));
717717 }
10301030 should(begin!=lemon::INVALID);
10311031 shouldEqual(std::distance(begin,invalid),8);
10321032 std::vector<Arc> arcVec(begin,invalid);
1033 shouldEqual(8,arcVec.size());
1033 shouldEqual(8u,arcVec.size());
10341034 shouldEqual(0,g.id(arcVec[0]));
10351035 shouldEqual(1,g.id(arcVec[1]));
10361036 shouldEqual(2,g.id(arcVec[2]));
10461046
10471047 ArcIt empty;
10481048 std::vector<Arc> arcVec(begin,empty);
1049 shouldEqual(8,arcVec.size());
1049 shouldEqual(8u,arcVec.size());
10501050 shouldEqual(0,g.id(arcVec[0]));
10511051 shouldEqual(1,g.id(arcVec[1]));
10521052 shouldEqual(2,g.id(arcVec[2]));
10621062
10631063 ArcIt empty;
10641064 std::vector<Arc> arcVec(begin,empty);
1065 shouldEqual(7,arcVec.size());
1065 shouldEqual(7u,arcVec.size());
10661066 shouldEqual(1,g.id(arcVec[0]));
10671067 shouldEqual(2,g.id(arcVec[1]));
10681068 shouldEqual(3,g.id(arcVec[2]));
10811081
10821082 shouldEqual(std::distance(begin,end),1);
10831083 std::vector<Arc> arcVec(begin,end);
1084 shouldEqual(1,arcVec.size());
1084 shouldEqual(1u,arcVec.size());
10851085 shouldEqual(1,g.id(arcVec[0]));
10861086 }
10871087
10931093 should(end!=lemon::INVALID);
10941094
10951095 std::vector<Arc> arcVec(begin,end);
1096 shouldEqual(2,arcVec.size());
1096 shouldEqual(2u,arcVec.size());
10971097 shouldEqual(1,g.id(arcVec[0]));
10981098 shouldEqual(2,g.id(arcVec[1]));
10991099 }
12021202 shouldEqual(std::distance(a,b),2);
12031203
12041204 std::set<Edge> eSet(a,b);
1205 shouldEqual(eSet.size(),2);
1205 shouldEqual(eSet.size(),2u);
12061206 should(eSet.find(e13)!=eSet.end());
12071207 should(eSet.find(e12)!=eSet.end());
12081208 should(eSet.find(e34)==eSet.end());
12171217 shouldEqual(std::distance(a,b),2);
12181218
12191219 std::set<Edge> eSet(a,b);
1220 shouldEqual(eSet.size(),2);
1220 shouldEqual(eSet.size(),2u);
12211221 should(eSet.find(e12)!=eSet.end());
12221222 should(eSet.find(e24)!=eSet.end());
12231223 should(eSet.find(e34)==eSet.end());
12321232 shouldEqual(std::distance(a,b),2);
12331233
12341234 std::set<Edge> eSet(a,b);
1235 shouldEqual(eSet.size(),2);
1235 shouldEqual(eSet.size(),2u);
12361236 should(eSet.find(e13)!=eSet.end());
12371237 should(eSet.find(e34)!=eSet.end());
12381238 should(eSet.find(e12)==eSet.end());
12471247 shouldEqual(std::distance(a,b),2);
12481248
12491249 std::set<Edge> eSet(a,b);
1250 shouldEqual(eSet.size(),2);
1250 shouldEqual(eSet.size(),2u);
12511251 should(eSet.find(e24)!=eSet.end());
12521252 should(eSet.find(e34)!=eSet.end());
12531253 should(eSet.find(e12)==eSet.end());
12811281 should(a!=b);
12821282 should(b==lemon::INVALID);
12831283 should(a!=lemon::INVALID);
1284 shouldEqual(std::distance(a,b),2);
1284 shouldEqual(std::distance(a,b),2u);
12851285
12861286 std::set<Edge> eSet(a,b);
1287 shouldEqual(eSet.size(),2);
1287 shouldEqual(eSet.size(),2u);
12881288 should(eSet.find(e13)!=eSet.end());
12891289 should(eSet.find(e12)!=eSet.end());
12901290 should(eSet.find(e34)==eSet.end());
12991299 shouldEqual(std::distance(a,b),2);
13001300
13011301 std::set<Edge> eSet(a,b);
1302 shouldEqual(eSet.size(),2);
1302 shouldEqual(eSet.size(),2u);
13031303 should(eSet.find(e12)!=eSet.end());
13041304 should(eSet.find(e24)!=eSet.end());
13051305 should(eSet.find(e34)==eSet.end());
13141314 shouldEqual(std::distance(a,b),2);
13151315
13161316 std::set<Edge> eSet(a,b);
1317 shouldEqual(eSet.size(),2);
1317 shouldEqual(eSet.size(),2u);
13181318 should(eSet.find(e13)!=eSet.end());
13191319 should(eSet.find(e34)!=eSet.end());
13201320 should(eSet.find(e12)==eSet.end());
13291329 shouldEqual(std::distance(a,b),2);
13301330
13311331 std::set<Edge> eSet(a,b);
1332 shouldEqual(eSet.size(),2);
1332 shouldEqual(eSet.size(),2u);
13331333 should(eSet.find(e24)!=eSet.end());
13341334 should(eSet.find(e34)!=eSet.end());
13351335 should(eSet.find(e12)==eSet.end());
0 VIGRA_ADD_TEST(test_binary_forest test.cxx)
0 /************************************************************************/
1 /* */
2 /* Copyright 2014-2015 by Ullrich Koethe and Philip Schill */
3 /* */
4 /* This file is part of the VIGRA computer vision library. */
5 /* The VIGRA Website is */
6 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
7 /* Please direct questions, bug reports, and contributions to */
8 /* ullrich.koethe@iwr.uni-heidelberg.de or */
9 /* vigra@informatik.uni-hamburg.de */
10 /* */
11 /* Permission is hereby granted, free of charge, to any person */
12 /* obtaining a copy of this software and associated documentation */
13 /* files (the "Software"), to deal in the Software without */
14 /* restriction, including without limitation the rights to use, */
15 /* copy, modify, merge, publish, distribute, sublicense, and/or */
16 /* sell copies of the Software, and to permit persons to whom the */
17 /* Software is furnished to do so, subject to the following */
18 /* conditions: */
19 /* */
20 /* The above copyright notice and this permission notice shall be */
21 /* included in all copies or substantial portions of the */
22 /* Software. */
23 /* */
24 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
25 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
26 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
27 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
28 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
29 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
30 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
32 /* */
33 /************************************************************************/
34 #include <vigra/binary_forest.hxx>
35 #include <vigra/unittest.hxx>
36
37 using namespace vigra;
38
39 struct BinaryForestTests
40 {
41 typedef BinaryForest Graph;
42 typedef Graph::Node Node;
43 typedef Graph::Arc Arc;
44
45 void test_basic_attributes()
46 {
47 Graph gr;
48 Node n0 = gr.addNode();
49 Node n1 = gr.addNode();
50 Node n2 = gr.addNode();
51 Node n3 = gr.addNode();
52 Node n4 = gr.addNode();
53 Node n5 = gr.addNode();
54 Arc a01 = gr.addArc(n0, n1);
55 Arc a02 = gr.addArc(n0, n2);
56 Arc a13 = gr.addArc(n1, n3);
57 Arc a14 = gr.addArc(n1, n4);
58 Arc a25 = gr.addArc(n2, n5);
59
60 should(n0 != n1);
61 should(n0 != n2);
62 should(n0 != n3);
63 should(n0 != n4);
64 should(n0 != n5);
65 should(n1 != n2);
66 should(n1 != n3);
67 should(n1 != n4);
68 should(n1 != n5);
69 should(n2 != n3);
70 should(n2 != n4);
71 should(n2 != n5);
72 should(n3 != n4);
73 should(n3 != n5);
74 should(n4 != n5);
75
76 should(a01 != a02);
77 should(a01 != a13);
78 should(a01 != a14);
79 should(a01 != a25);
80 should(a02 != a13);
81 should(a02 != a14);
82 should(a02 != a25);
83 should(a13 != a14);
84 should(a13 != a25);
85 should(a14 != a25);
86
87 should(gr.numNodes() == 6);
88 should(gr.numArcs() == 5);
89 should(gr.maxNodeId() == 5);
90 should(gr.maxArcId() == 11);
91
92 should(gr.source(a01) == n0);
93 should(gr.source(a02) == n0);
94 should(gr.source(a13) == n1);
95 should(gr.source(a14) == n1);
96 should(gr.source(a25) == n2);
97
98 should(gr.target(a01) == n1);
99 should(gr.target(a02) == n2);
100 should(gr.target(a13) == n3);
101 should(gr.target(a14) == n4);
102 should(gr.target(a25) == n5);
103
104 should(gr.valid(n0));
105 should(gr.valid(n1));
106 should(gr.valid(n2));
107 should(gr.valid(n3));
108 should(gr.valid(n4));
109 should(gr.valid(n5));
110 should(!gr.valid(Node(lemon::INVALID)));
111 should(!gr.valid(Node(123)));
112
113 should(gr.valid(a01));
114 should(gr.valid(a02));
115 should(gr.valid(a13));
116 should(gr.valid(a14));
117 should(gr.valid(a25));
118 should(!gr.valid(Arc(lemon::INVALID)));
119 should(!gr.valid(Arc(123)));
120
121 should(gr.inDegree(n0) == 0);
122 should(gr.inDegree(n1) == 1);
123 should(gr.inDegree(n2) == 1);
124 should(gr.inDegree(n3) == 1);
125 should(gr.inDegree(n4) == 1);
126 should(gr.inDegree(n5) == 1);
127
128 should(gr.outDegree(n0) == 2);
129 should(gr.outDegree(n1) == 2);
130 should(gr.outDegree(n2) == 1);
131 should(gr.outDegree(n3) == 0);
132 should(gr.outDegree(n4) == 0);
133 should(gr.outDegree(n5) == 0);
134
135 should(gr.numRoots() == 1);
136 should(gr.getRoot() == n0);
137 should(!gr.valid(gr.getRoot(1)));
138 }
139
140 void test_merge()
141 {
142 Graph gr;
143 Node n0 = gr.addNode();
144 Node n1 = gr.addNode();
145 Node n2 = gr.addNode();
146 Node n3 = gr.addNode();
147 Arc a01 = gr.addArc(n0, n1);
148 Arc a02 = gr.addArc(n0, n2);
149 Arc a13 = gr.addArc(n1, n3);
150
151 Graph gr2;
152 {
153 Node n0 = gr2.addNode();
154 Node n1 = gr2.addNode();
155 Node n2 = gr2.addNode();
156 Node n3 = gr2.addNode();
157 gr2.addArc(n0, n1);
158 gr2.addArc(n1, n2);
159 gr2.addArc(n1, n3);
160 gr.merge(gr2);
161 }
162
163 should(gr.numNodes() == 8);
164 should(gr.numArcs() == 6);
165 should(gr.numRoots() == 2);
166
167 // Get the new nodes.
168 Node n4 = gr.nodeFromId(4);
169 Node n5 = gr.nodeFromId(5);
170 Node n6 = gr.nodeFromId(6);
171 Node n7 = gr.nodeFromId(7);
172
173 // Check the roots.
174 should(gr.getRoot(0) == n0);
175 should(gr.getRoot(1) == n4);
176
177 // Check that the old nodes and arcs are still correct.
178 should(gr.valid(n0));
179 should(gr.valid(n1));
180 should(gr.valid(n2));
181 should(gr.valid(n3));
182 should(gr.valid(a01));
183 should(gr.valid(a02));
184 should(gr.valid(a13));
185 should(gr.getChild(n0, 0) == n1);
186 should(gr.getChild(n0, 1) == n2);
187 should(gr.getChild(n1, 0) == n3);
188 should(gr.numChildren(n1) == 1);
189 should(gr.numChildren(n2) == 0);
190 should(gr.numChildren(n3) == 0);
191 should(gr.numParents(n0) == 0);
192
193 // Check the new nodes and arcs are still correct.
194 should(gr.getChild(n4, 0) == n5);
195 should(gr.getChild(n5, 0) == n6);
196 should(gr.getChild(n5, 1) == n7);
197 should(gr.numChildren(n4) == 1);
198 should(gr.numChildren(n5) == 2);
199 should(gr.numChildren(n6) == 0);
200 should(gr.numChildren(n7) == 0);
201 should(gr.numParents(n4) == 0);
202 }
203
204 template <ContainerTag CTag>
205 void test_property_map()
206 {
207 PropertyMap<Node, int, CTag> m;
208 Node n0(2);
209 Node n1(5);
210 Node n2(10);
211 Node n3(27);
212 m.insert(n0, 27);
213 m.insert(n1, 12);
214 m.insert(n2, 73);
215
216 should(m.size() == 3);
217 should(m.at(n0) == 27);
218 should(m.at(n1) == 12);
219 should(m.at(n2) == 73);
220 should(m[n0] == 27);
221 should(m[n1] == 12);
222 should(m[n2] == 73);
223
224 {
225 auto it = m.find(n0);
226 should(it != m.end());
227 should(it->first == n0);
228 should(it->second == 27);
229 should(m.find(n3) == m.end());
230 }
231
232 {
233 PropertyMap<Node, int, CTag> m2;
234 m2 = m;
235 should(m2.size() == 3);
236 should(m[n0] == 27);
237 should(m[n1] == 12);
238 should(m[n2] == 73);
239 }
240
241 {
242 std::vector<Node> keys, keys_expected;// = {n0, n1, n2};
243 keys_expected.push_back(n0);
244 keys_expected.push_back(n1);
245 keys_expected.push_back(n2);
246 std::vector<int> values, values_expected;// = {27, 12, 73};
247 values_expected.push_back(27);
248 values_expected.push_back(12);
249 values_expected.push_back(73);
250 for (auto const & p : m)
251 {
252 keys.push_back(p.first);
253 values.push_back(p.second);
254 }
255 shouldEqualSequence(keys.begin(), keys.end(), keys_expected.begin());
256 shouldEqualSequence(values.begin(), values.end(), values_expected.begin());
257 }
258
259 m.erase(n1);
260 should(m.size() == 2);
261 should(m.at(n0) == 27);
262 should(m.at(n2) == 73);
263 should(m[n0] == 27);
264 should(m[n2] == 73);
265
266 {
267 std::vector<Node> keys, keys_expected;// = {n0, n2};
268 keys_expected.push_back(n0);
269 keys_expected.push_back(n2);
270 std::vector<int> values, values_expected;// = {27, 73};
271 values_expected.push_back(27);
272 values_expected.push_back(73);
273 for (auto const & p : m)
274 {
275 keys.push_back(p.first);
276 values.push_back(p.second);
277 }
278 shouldEqualSequence(keys.begin(), keys.end(), keys_expected.begin());
279 shouldEqualSequence(values.begin(), values.end(), values_expected.begin());
280 }
281
282 m.clear();
283 should(m.size() == 0);
284 }
285 };
286
287 struct BinaryForestTestSuite : public test_suite
288 {
289 BinaryForestTestSuite()
290 :
291 test_suite("BinaryForest test")
292 {
293 add(testCase(&BinaryForestTests::test_basic_attributes));
294 add(testCase(&BinaryForestTests::test_merge));
295 add(testCase(&BinaryForestTests::test_property_map<MapTag>));
296 add(testCase(&BinaryForestTests::test_property_map<IndexVectorTag>));
297 add(testCase(&BinaryForestTests::test_property_map<VectorTag>));
298 }
299 };
300
301 int main(int argc, char** argv)
302 {
303 BinaryForestTestSuite forest_test;
304 int failed = forest_test.run(testsToBeExecuted(argc, argv));
305 std::cout << forest_test.report() << std::endl;
306 return (failed != 0);
307 }
157157 array_fives.push_back(Array5(Shape5(1)));
158158 array_fives.push_back(Array5(Shape5(2,2,3,4,3)));
159159 array_fives.push_back(Array5(Shape5(5,6,2,2,3)));
160 for(int i = 0; i != array_fives.size(); ++i)
160 for(decltype(array_fives.size()) i = 0; i != array_fives.size(); ++i)
161161 {
162162 fillRandom(array_fives[i].begin(), array_fives[i].end(), 3);
163163 }
168168 array_twos.push_back(Array2(Shape2(4,4)));
169169 array_twos.push_back(Array2(Shape2(6,10)));
170170 array_twos.push_back(Array2(Shape2(19,25)));
171 for(int i = 0; i != array_twos.size(); ++i)
171 for(decltype(array_twos.size()) i = 0; i != array_twos.size(); ++i)
172172 {
173173 fillRandom(array_twos[i].begin(), array_twos[i].end(), 3);
174174 }
178178 array_ones.push_back(Array1(Shape1(47)));
179179 array_ones.push_back(Array1(Shape1(81)));
180180 array_ones.push_back(Array1(Shape1(997)));
181 for(int i = 0; i != array_ones.size(); ++i)
181 for(decltype(array_ones.size()) i = 0; i != array_ones.size(); ++i)
182182 {
183183 fillRandom(array_ones[i].begin(), array_ones[i].end(), 3);
184184 }
6161 data_sets.push_back(Array(Shape(5)));
6262 data_sets.push_back(Array(Shape(997)));
6363 data_sets.push_back(Array(Shape(10000)));
64 for(int i = 0; i != data_sets.size(); ++i)
64 for(decltype(data_sets.size()) i = 0; i != data_sets.size(); ++i)
6565 {
6666 fillRandom(data_sets[i].begin(), data_sets[i].end(), 3);
6767 }
6868
69 for(int i = 0; i != data_sets.size(); ++i)
69 for(decltype(data_sets.size()) i = 0; i != data_sets.size(); ++i)
7070 {
7171 const Array& data = data_sets[i];
7272
109109 data_sets.push_back(Array(Shape(6)));
110110 data_sets.push_back(Array(Shape(1, 10, 100, 1)));
111111
112 for(int i = 0; i != data_sets.size(); ++i)
112 for(decltype(data_sets.size()) i = 0; i != data_sets.size(); ++i)
113113 {
114114 fillRandom(data_sets[i].begin(), data_sets[i].end(), 3);
115115 }
125125 neighborhoods.push_back(DirectNeighborhood);
126126 neighborhoods.push_back(IndirectNeighborhood);
127127
128 for(int i = 0; i != data_sets.size(); ++i)
128 for(decltype(data_sets.size()) i = 0; i != data_sets.size(); ++i)
129129 {
130130 const Array& data = data_sets[i];
131 for(int j = 0; j != block_shapes.size(); ++j)
131 for(decltype(block_shapes.size()) j = 0; j != block_shapes.size(); ++j)
132132 {
133133 const Shape& block_shape = block_shapes[j];
134 for(int k = 0; k != neighborhoods.size(); ++k)
134 for(decltype(neighborhoods.size()) k = 0; k != neighborhoods.size(); ++k)
135135 {
136136 NeighborhoodType neighborhood = neighborhoods[k];
137137
3737
3838 #include <cstdlib>
3939 #include <vector>
40
41 #ifdef __GNUC__
42 #pragma GCC diagnostic push
43 #pragma GCC diagnostic ignored "-Wsign-compare"
44 #endif
4045
4146 template <class Iterator1,class Iterator2>
4247 bool equivalentLabels(Iterator1 begin1, Iterator1 end1,
8085 *begin = rand() % maximum;
8186 }
8287
88 #ifdef __GNUC__
89 #pragma GCC diagnostic pop
90 #endif
91
92
8393 #endif // VIGRA_BLOCKWISE_ALGORITHMS_TEST_UTILS_HXX
00 if(HDF5_FOUND)
1 INCLUDE_DIRECTORIES(${HDF5_INCLUDE_DIR})
2
1 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${HDF5_INCLUDE_DIR})
32 ADD_DEFINITIONS(${HDF5_CPPFLAGS} -DHasHDF5)
3
44 VIGRA_ADD_TEST(test_classifier test.cxx LIBRARIES vigraimpex ${HDF5_LIBRARIES})
5 VIGRA_ADD_TEST(classifier_speed_comparison speed_comparison.cxx LIBRARIES ${HDF5_LIBRARIES})
56 else()
67 MESSAGE(STATUS "** WARNING: test_classifier::RFHDF5Test() will not be executed")
8
79 VIGRA_ADD_TEST(test_classifier test.cxx )
10 VIGRA_ADD_TEST(classifier_speed_comparison speed_comparison.cxx)
811 endif()
9
10 VIGRA_ADD_TEST(classifier_speed_comparison speed_comparison.cxx)
1112
1213 add_subdirectory(data)
1314
99
1010
1111
12 int main(int argc, char ** argv)
12 int main(int /*argc*/, char ** /*argv*/)
1313 {
1414 typedef MultiArrayShape<2>::type Shp;
1515 MultiArray<2, double> features(Shp(1000, 50), 0.0);
5353
5454 //TODO split must be const
5555 template<class Tree, class Split, class Region, class Feature_t, class Label_t>
56 void visit_after_split( Tree & tree,
56 void visit_after_split( Tree & /*tree*/,
5757 Split & split,
5858 Region & parent,
5959 Region & leftChild,
6060 Region & rightChild,
61 Feature_t & features,
62 Label_t & labels)
61 Feature_t & /*features*/,
62 Label_t & /*labels*/)
6363 {
6464 if(split.createNode().typeID() == i_ThresholdNode)
6565 {
8585 }
8686
8787 template<class RF, class PR, class SM, class ST>
88 void visit_after_tree( RF& rf, PR & pr, SM & sm, ST & st, int index)
88 void visit_after_tree( RF& /*rf*/, PR & /*pr*/, SM & /*sm*/, ST & /*st*/, int index)
8989 {
9090 fout << std::endl << std::endl << "Tree Number: " << index << " finished." << std::endl << std::endl;
9191 }
105105
106106
107107 template<class Tree, class Split, class Region, class Feature_t, class Label_t>
108 void visit_after_split( Tree & tree,
108 void visit_after_split( Tree & /*tree*/,
109109 Split & split,
110110 Region & parent,
111111 Region & leftChild,
112112 Region & rightChild,
113 Feature_t & features,
114 Label_t & labels)
113 Feature_t & /*features*/,
114 Label_t & /*labels*/)
115115 {
116116 if(split.createNode().typeID() == i_ThresholdNode)
117117 {
132132 }
133133
134134 template<class RF, class PR, class SM, class ST>
135 void visit_after_tree( RF& rf, PR & pr, SM & sm, ST & st, int index)
135 void visit_after_tree( RF& /*rf*/, PR & /*pr*/, SM & /*sm*/, ST & /*st*/, int /*index*/)
136136 {
137137 treesset.insert(sout.str());
138138 sout.str(std::string());
139139 }
140140
141141 template<class RF, class PR>
142 void visit_at_end(RF & rf, PR & pr)
142 void visit_at_end(RF & /*rf*/, PR & /*pr*/)
143143 {
144144 std::ofstream fout("setTest.log");
145145 std::set<std::string>::iterator iter;
171171
172172 //TODO split must be const
173173 template<class Tree, class Split, class Region, class Feature_t, class Label_t>
174 void visit_after_split( Tree & tree,
174 void visit_after_split( Tree & /*tree*/,
175175 Split & split,
176176 Region & parent,
177177 Region & leftChild,
216216 }
217217
218218 template<class RF, class PR, class SM, class ST>
219 void visit_after_tree( RF& rf, PR & pr, SM & sm, ST & st, int index)
219 void visit_after_tree( RF& /*rf*/, PR & /*pr*/, SM & /*sm*/, ST & /*st*/, int index)
220220 {
221221 fout << std::endl << std::endl << "Tree Number: " << index << " finished." << std::endl << std::endl;
222222 }
2828 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
2929 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
3030 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
3232 /* */
3333 /************************************************************************/
3434
5050
5151 vigra::DImage getSymmetricLine(bool transposed = false){
5252 vigra::DImage src;
53
53
5454 if(transposed)
5555 src.resize(1, 40);
5656 else
113113 typedef vigra::DRGBImage RGBImage;
114114
115115 ConvolutionTest()
116 : constimg(5,5),
117 rampimg(5,1),
118 sym_image(getSymmetricImage()),
116 : constimg(5,5),
117 rampimg(5,1),
118 sym_image(getSymmetricImage()),
119119 unsym_image(getUnsymmetricImage())
120120 {
121121 constimg.init(1.0);
122
122
123123 vigra::Kernel1D<double> binom1;
124124 binom1.initBinomial(1);
125125 sym_kernel.initSeparable(binom1, binom1);
131131
132132 line_kernel.initExplicitly(Diff2D(-2,0), Diff2D(2,0)) = 1, 4, 12, 4, 1 ;
133133 line_kernel.normalize(1);
134
134
135135 ImageImportInfo info("lenna128.xv");
136136
137137 lenna.resize(info.width(), info.height());
140140 Image::ScanOrderIterator i = rampimg.begin();
141141 Image::ScanOrderIterator end = rampimg.end();
142142 Image::Accessor acc = rampimg.accessor();
143
143
144144 for(int k=0; i != end; ++i, ++k)
145145 {
146146 acc.set(k, i);
254254 shouldEqualSequence(out, out+outsize-ksize, r);
255255 }
256256 }
257
257
258258 void initExplicitlyTest()
259259 {
260260 vigra::Kernel1D<double> k;
261261 k.initExplicitly(-1,2) = 1,2,3,4;
262
262
263263 shouldEqual(k.left(), -1);
264264 shouldEqual(k.right(), 2);
265265 shouldEqual(k[-1], 1);
266266 shouldEqual(k[0], 2);
267267 shouldEqual(k[1], 3);
268268 shouldEqual(k[2], 4);
269
269
270270 k.initExplicitly(-2,1) = -2;
271271 shouldEqual(k.left(), -2);
272272 shouldEqual(k.right(), 1);
320320 std::string message(c.what());
321321 should(0 == expected.compare(message.substr(0,expected.size())));
322322 }
323
324 double data[] = { 1, 2, 4,
325 5, 11, 3,
326 6, 8, 7 };
327 BasicImage<double> kernel_image(3,3, data);
328 Kernel2D<double> kernel_from_image;
329 kernel_from_image.initExplicitly(kernel_image);
330 unsym_kernel.normalize(kernel_from_image.norm());
331
332 for(int y=-1; y<=1; ++y)
333 for(int x=-1; x<=1; ++x)
334 shouldEqualTolerance(unsym_kernel(x,y), kernel_from_image(x,y), 1e-14);
335
323336 }
324337
325338 void simpleSharpeningTest()
378391 }
379392
380393 void stdConvolutionTestOnConstImage()
381 {
394 {
382395 Image tmp_clip(constimg);
383396 tmp_clip = 0.0;
384397 Image tmp_wrap(constimg);
395408
396409 sym_kernel.setBorderTreatment(BORDER_TREATMENT_AVOID);
397410 convolveImage(View(constimg), View(tmp_avoid), sym_kernel);
398
411
399412 sym_kernel.setBorderTreatment(BORDER_TREATMENT_WRAP);
400413 convolveImage(View(constimg), View(tmp_wrap), sym_kernel);
401414
404417
405418 sym_kernel.setBorderTreatment(BORDER_TREATMENT_REFLECT);
406419 convolveImage(View(constimg), View(tmp_reflect), sym_kernel);
407
420
408421 Image::ScanOrderIterator i_src = constimg.begin();
409422 Image::ScanOrderIterator i_src_end = constimg.end();
410423 Image::ScanOrderIterator i_clip = tmp_clip.begin();
413426 Image::ScanOrderIterator i_repeat = tmp_repeat.begin();
414427 Image::ScanOrderIterator i_reflect = tmp_reflect.begin();
415428 Image::Accessor acc = constimg.accessor();
416
429
417430 for(int y = 0; i_src != i_src_end; y++){
418431 for(int x = 0; x < constimg.size().x; x++, ++i_src, ++i_clip, ++i_wrap, ++i_repeat, ++i_reflect, ++i_avoid){
419432 should(acc(i_src) == acc(i_clip));
421434 should(acc(i_src) == acc(i_repeat));
422435 should(acc(i_src) == acc(i_reflect));
423436 if(x != 0 && y != 0 && x != 4 && y != 4){
424 should(acc(i_src) == acc(i_avoid));
437 should(acc(i_src) == acc(i_avoid));
425438 }else{
426439 should(acc(i_avoid) == 0);
427440 }
435448 // exportImage(srcImageRange(dest_lenna), ImageExportInfo("lenna_convolve_128x120.xv"));
436449
437450 }
438
451
439452
440453 void stdConvolutionTestWithAvoid()
441454 {
462475 }
463476
464477 }
465
478
466479
467480 void stdConvolutionTestWithZeropad()
468481 {
470483 dest.init(42.1);
471484 ref.init(33.2);
472485
473 convolveImage(srcImageRange(sym_image, Rect2D(Point2D(1,1), sym_image.size()-Size2D(2,2))),
486 convolveImage(srcImageRange(sym_image, Rect2D(Point2D(1,1), sym_image.size()-Size2D(2,2))),
474487 destImage(dest), kernel2d(sym_kernel, BORDER_TREATMENT_ZEROPAD));
475488
476489 initImageBorder(destImageRange(sym_image), 1, 0);
491504 }
492505 }
493506 }
494
507
495508 void stdConvolutionTestWithClip()
496509 {
497510 Image dest(sym_image);
516529 shouldEqualTolerance (acc(i_dest_2D + Diff2D(3,2)), 6.05852, 1E-4);
517530 shouldEqualTolerance (acc(i_dest_2D + Diff2D(6,4)), 9.14363, 1E-4);
518531 }
519
532
520533 void stdConvolutionTestWithWrap()
521534 {
522535 Image dest(unsym_image);
541554 shouldEqualTolerance (acc(i_dest_2D + Diff2D(2,3)), 33.0798, 1E-5);
542555 shouldEqualTolerance (acc(i_dest_2D + Diff2D(6,4)), 48.4841, 1E-5);
543556 }
544
557
545558 void stdConvolutionTestWithReflect()
546559 {
547560 Image dest(unsym_image);
566579 shouldEqualTolerance (acc(i_dest_2D + Diff2D(2,3)), 33.0798, 1E-5);
567580 shouldEqualTolerance (acc(i_dest_2D + Diff2D(6,4)), 48.4841, 1E-5);
568581 }
569
582
570583 void stdConvolutionTestWithRepeat()
571584 {
572585 Image dest(unsym_image);
594607
595608 void stdConvolutionTestFromWrapWithReflect()
596609 {
597
610
598611 Image src_wrap(78, 1);
599612 Image src_reflect(40, 1);
600613 Image dest_wrap(src_wrap);
612625 for (int j = 38 ; j >= 1 ; j--, iter_src_wrap++){
613626 acc_src_wrap.set( j + 0.25, iter_src_wrap);
614627 }
615
628
616629 convolveImage(srcImageRange(src_wrap), destImage(dest_wrap), kernel2d(line_kernel, BORDER_TREATMENT_WRAP));
617630 convolveImage(srcImageRange(src_reflect), destImage(dest_reflect), kernel2d(line_kernel, BORDER_TREATMENT_REFLECT));
618631
628641 iter_dest_reflect++;
629642 }
630643 }
631
644
632645 void stdConvolutionTestFromRepeatWithAvoid()
633646 {
634647 Image src_avoid(40, 1);
635648 src_avoid.init(2.47);
636649 Image src_repeat(36, 1);
637
650
638651 Image dest_repeat(src_repeat);
639652 Image dest_avoid(src_avoid);
640653
679692 }
680693 }
681694 }
682
695
683696 /**
684 * Es wird die Positionierung der einzelnen
697 * Es wird die Positionierung der einzelnen
685698 * Punkte relativ zueinander getestet.
686699 */
687700 void stdConvolutionTestOfAllTreatmentsRelatively(){
743756 {
744757 vigra::Kernel1D<double> binom;
745758 binom.initBinomial(2);
746
759
747760 vigra::Kernel1D<double>::Iterator center = binom.center();
748
761
749762 should(center[0] == 0.375);
750763 should(center[-1] == 0.25);
751764 should(center[1] == 0.25);
752765 should(center[-2] == 0.0625);
753766 should(center[2] == 0.0625);
754
767
755768 binom.initBinomial(1);
756
769
757770 center = binom.center();
758
771
759772 should(center[0] == 0.5);
760773 should(center[-1] == 0.25);
761774 should(center[1] == 0.25);
762
775
763776 Image tmp1(constimg);
764777 Image tmp2(constimg);
765778 tmp2 = 0.0;
766
779
767780 separableConvolveX(srcImageRange(constimg), destImage(tmp1), kernel1d(binom));
768781 separableConvolveY(srcImageRange(tmp1), destImage(tmp2), kernel1d(binom));
769
782
770783 Image::ScanOrderIterator i1 = constimg.begin();
771784 Image::ScanOrderIterator i1end = constimg.end();
772785 Image::ScanOrderIterator i2 = tmp2.begin();
773786 Image::Accessor acc = constimg.accessor();
774
787
775788 for(; i1 != i1end; ++i1, ++i2)
776789 {
777790 should(acc(i1) == acc(i2));
778791 }
779792 }
780
793
781794 void separableDerivativeRepeatTest()
782795 {
783796 vigra::Kernel1D<double> grad;
784797 grad.initSymmetricGradient();
785
798
786799 Image tmp1(rampimg);
787800 tmp1 = 0.0;
788801 Image tmp2(constimg);
789
802
790803 separableConvolveX(srcImageRange(rampimg), destImage(tmp1), kernel1d(grad));
791804 separableConvolveX(srcImageRange(constimg), destImage(tmp2), kernel1d(grad));
792
805
793806 Image::ScanOrderIterator i1 = tmp1.begin();
794807 Image::ScanOrderIterator i2 = tmp2.begin();
795808 Image::Accessor acc = tmp1.accessor();
796
809
797810 should(acc(i1) == 0.5);
798811 should(acc(i2) == 0.0);
799812 ++i1;
813826 should(acc(i1) == 0.5);
814827 should(acc(i2) == 0.0);
815828 }
816
829
817830 void separableDerivativeReflectTest()
818831 {
819832 vigra::Kernel1D<double> grad;
820833 grad.initSymmetricGradient();
821834 grad.setBorderTreatment(vigra::BORDER_TREATMENT_REFLECT);
822
835
823836 Image tmp1(rampimg);
824837 tmp1 = 1000.0;
825
838
826839 separableConvolveX(srcImageRange(rampimg), destImage(tmp1), kernel1d(grad));
827
840
828841 Image::ScanOrderIterator i1 = tmp1.begin();
829842 Image::Accessor acc = tmp1.accessor();
830
843
831844 should(acc(i1) == 0.0);
832845 ++i1;
833846 should(acc(i1) == 1.0);
838851 ++i1;
839852 should(acc(i1) == 0.0);
840853 }
841
854
842855 void separableDerivativeAvoidTest()
843856 {
844857 vigra::Kernel1D<double> grad;
845858 grad.initSymmetricGradient();
846859 grad.setBorderTreatment(vigra::BORDER_TREATMENT_AVOID);
847
860
848861 Image tmp1(rampimg);
849862 tmp1 = 1000.0;
850
863
851864 separableConvolveX(srcImageRange(rampimg), destImage(tmp1), kernel1d(grad));
852
865
853866 Image::ScanOrderIterator i1 = tmp1.begin();
854867 Image::Accessor acc = tmp1.accessor();
855
868
856869 should(acc(i1) == 1000.0);
857870 ++i1;
858871 should(acc(i1) == 1.0);
863876 ++i1;
864877 should(acc(i1) == 1000.0);
865878 }
866
879
867880 void separableSmoothClipTest()
868881 {
869882 vigra::Kernel1D<double> binom;
870883 binom.initBinomial(1);
871884 binom.setBorderTreatment(vigra::BORDER_TREATMENT_CLIP);
872
885
873886 Image tmp1(rampimg);
874887 tmp1 = 1000.0;
875
888
876889 separableConvolveX(srcImageRange(rampimg), destImage(tmp1), kernel1d(binom));
877
890
878891 Image::ScanOrderIterator i1 = tmp1.begin();
879892 Image::Accessor acc = tmp1.accessor();
880
893
881894 should(acc(i1) == 1.0/3.0);
882895 ++i1;
883896 should(acc(i1) == 1.0);
888901 ++i1;
889902 should(acc(i1) == 11.0/3.0);
890903 }
891
904
892905 void separableSmoothZeropadTest()
893906 {
894907 vigra::Kernel1D<double> binom;
897910
898911 Image tmp1(rampimg);
899912 tmp1 = 1000.0;
900
913
901914 separableConvolveX(srcImageRange(rampimg), destImage(tmp1), kernel1d(binom));
902
915
903916 Image::ScanOrderIterator i1 = tmp1.begin();
904917 Image::Accessor acc = tmp1.accessor();
905918
913926 ++i1;
914927 shouldEqual(acc(i1), 2.75);
915928 }
916
929
917930 void separableSmoothWrapTest()
918931 {
919932 vigra::Kernel1D<double> binom;
920933 binom.initBinomial(1);
921934 binom.setBorderTreatment(vigra::BORDER_TREATMENT_WRAP);
922
935
923936 Image tmp1(rampimg);
924937 tmp1 = 1000.0;
925
938
926939 separableConvolveX(srcImageRange(rampimg), destImage(tmp1), kernel1d(binom));
927
940
928941 Image::ScanOrderIterator i1 = tmp1.begin();
929942 Image::Accessor acc = tmp1.accessor();
930
943
931944 should(acc(i1) == 1.25);
932945 ++i1;
933946 should(acc(i1) == 1.0);
938951 ++i1;
939952 should(acc(i1) == 2.75);
940953 }
941
954
942955 void gaussianSmoothingTest()
943956 {
944957 double scale = 1.0;
950963
951964 separableConvolveX(srcImageRange(lenna), destImage(tmp1), kernel1d(gauss));
952965 separableConvolveY(srcImageRange(tmp1), destImage(tmp2), kernel1d(gauss));
953
966
954967 gaussianSmoothing(srcImageRange(lenna), destImage(tmp1), scale);
955
968
956969 Image::ScanOrderIterator i1 = tmp1.begin();
957970 Image::ScanOrderIterator i1end = tmp1.end();
958971 Image::ScanOrderIterator i2 = tmp2.begin();
959972 Image::Accessor acc = constimg.accessor();
960
973
961974 for(; i1 != i1end; ++i1, ++i2)
962975 {
963976 should(acc(i1) == acc(i2));
978991 i1 = tmp1.begin();
979992 i1end = tmp1.end();
980993 i2 = recursive.begin();
981
994
982995 double sum = 0.0;
983996 for(; i1 != i1end; ++i1, ++i2)
984997 {
9921005 recursiveGaussianFilterY(View(tmp1), View(tmp2), scale);
9931006 should(View(recursive) == View(tmp2));
9941007 }
995
1008
9961009 void optimalSmoothing3Test()
9971010 {
9981011 vigra::Kernel1D<double> smooth3;
10031016
10041017 separableConvolveX(srcImageRange(lenna), destImage(tmp1), kernel1d(smooth3));
10051018 separableConvolveY(srcImageRange(tmp1), destImage(tmp2), kernel1d(smooth3));
1006
1019
10071020 gaussianSmoothing(srcImageRange(lenna), destImage(tmp1), 0.68);
1008
1021
10091022 Image::ScanOrderIterator i1 = tmp1.begin();
10101023 Image::ScanOrderIterator i1end = tmp1.end();
10111024 Image::ScanOrderIterator i2 = tmp2.begin();
10121025 Image::Accessor acc = constimg.accessor();
1013
1026
10141027 for(; i1 != i1end; ++i1, ++i2)
10151028 {
10161029 shouldEqualTolerance(acc(i1), acc(i2), 1e-2);
10171030 }
10181031 }
1019
1032
10201033 void optimalSmoothing5Test()
10211034 {
10221035 vigra::Kernel1D<double> smooth5;
10271040
10281041 separableConvolveX(srcImageRange(lenna), destImage(tmp1), kernel1d(smooth5));
10291042 separableConvolveY(srcImageRange(tmp1), destImage(tmp2), kernel1d(smooth5));
1030
1043
10311044 gaussianSmoothing(srcImageRange(lenna), destImage(tmp1), 0.867);
1032
1045
10331046 Image::ScanOrderIterator i1 = tmp1.begin();
10341047 Image::ScanOrderIterator i1end = tmp1.end();
10351048 Image::ScanOrderIterator i2 = tmp2.begin();
10361049 Image::Accessor acc = constimg.accessor();
1037
1050
10381051 for(; i1 != i1end; ++i1, ++i2)
10391052 {
10401053 shouldEqualTolerance(acc(i1), acc(i2), 1e-2);
10411054 }
10421055 }
1043
1056
10441057 void separableGradientTest()
10451058 {
10461059 Image sepgrad(lenna.size());
10471060 importImage(vigra::ImageImportInfo("lenna128sepgrad.xv"), destImage(sepgrad));
1048
1061
10491062 vigra::Kernel1D<double> gauss;
10501063 gauss.initGaussian(1.0);
10511064 vigra::Kernel1D<double> grad;
10591072
10601073 separableConvolveX(srcImageRange(lenna), destImage(tmp3), kernel1d(grad));
10611074 separableConvolveY(srcImageRange(tmp3), destImage(tmp1), kernel1d(gauss));
1062
1075
10631076 separableConvolveX(srcImageRange(lenna), destImage(tmp3), kernel1d(gauss));
10641077 separableConvolveY(srcImageRange(tmp3), destImage(tmp2), kernel1d(grad));
1065
1078
10661079 Image::ScanOrderIterator i1 = tmp1.begin();
10671080 Image::ScanOrderIterator i1end = tmp1.end();
10681081 Image::ScanOrderIterator i2 = tmp2.begin();
10691082 Image::ScanOrderIterator i = sepgrad.begin();
10701083 Image::Accessor acc = constimg.accessor();
1071
1084
10721085 for(; i1 != i1end; ++i1, ++i2, ++i)
10731086 {
10741087 double grad = VIGRA_CSTD::sqrt(acc(i1)*acc(i1)+acc(i2)*acc(i2));
10861099
10871100 combineTwoImages(srcImageRange(nsgrad), srcImage(tmp1), destImage(nsgrad), Arg1() - Arg2());
10881101 Image zero(lenna.size(), 0.0);
1089 shouldEqualSequenceTolerance(nsgrad.data(), nsgrad.data()+nsgrad.width()*nsgrad.height(),
1102 shouldEqualSequenceTolerance(nsgrad.data(), nsgrad.data()+nsgrad.width()*nsgrad.height(),
10901103 zero.data(), 1e-12);
10911104 }
1092
1105
10931106 void gradientTest()
10941107 {
10951108 Image sepgrad(lenna.size());
10961109 importImage(vigra::ImageImportInfo("lenna128sepgrad.xv"), destImage(sepgrad));
1097
1098
1110
1111
10991112 Image tmpx(lenna.size());
11001113 Image tmpy(lenna.size());
11011114 Image mag(lenna.size());
11091122 Image::ScanOrderIterator ig = mag.begin();
11101123 Image::ScanOrderIterator i = sepgrad.begin();
11111124 Image::Accessor acc = constimg.accessor();
1112
1125
11131126 for(; i1 != i1end; ++i1, ++i2, ++ig, ++i)
11141127 {
11151128 double grad = VIGRA_CSTD::sqrt(acc(i1)*acc(i1)+acc(i2)*acc(i2));
11171130 shouldEqualTolerance(acc(ig)-acc(i), 0.0, 1e-12);
11181131 }
11191132 }
1120
1133
11211134 void optimalGradient3Test()
11221135 {
11231136 Image tmp(lenna.size());
11311144
11321145 separableConvolveX(srcImageRange(lenna), destImage(tmp), kernel1d(diff));
11331146 separableConvolveY(srcImageRange(tmp), destImage(tmpx), kernel1d(smooth3));
1134
1147
11351148 separableConvolveX(srcImageRange(lenna), destImage(tmp), kernel1d(smooth3));
11361149 separableConvolveY(srcImageRange(tmp), destImage(tmpy), kernel1d(diff));
11371150
11531166 shouldEqual(mi, 0.0);
11541167 should(std::fabs(ma - 68.0) < 1.0);
11551168 }
1156
1169
11571170 void optimalLaplacian3Test()
11581171 {
11591172 Image tmp(lenna.size());
11641177 diff.initSecondDifference3();
11651178 vigra::Kernel1D<double> smooth3;
11661179 smooth3.initOptimalSecondDerivativeSmoothing3();
1167
1180
11681181 separableConvolveX(srcImageRange(lenna), destImage(tmp), kernel1d(diff));
11691182 separableConvolveY(srcImageRange(tmp), destImage(tmpx), kernel1d(smooth3));
1170
1183
11711184 separableConvolveX(srcImageRange(lenna), destImage(tmp), kernel1d(smooth3));
11721185 separableConvolveY(srcImageRange(tmp), destImage(tmpy), kernel1d(diff));
11731186
11891202 should(std::fabs(mi + 120.0) < 1.0);
11901203 should(std::fabs(ma - 117.0) < 1.0);
11911204 }
1192
1205
11931206 void optimalGradient5Test()
11941207 {
11951208 Image tmp(lenna.size());
12011214 diff.initOptimalFirstDerivative5();
12021215 vigra::Kernel1D<double> smooth5;
12031216 smooth5.initOptimalFirstDerivativeSmoothing5();
1204
1217
12051218 separableConvolveX(srcImageRange(lenna), destImage(tmp), kernel1d(diff));
12061219 separableConvolveY(srcImageRange(tmp), destImage(tmpx), kernel1d(smooth5));
1207
1220
12081221 separableConvolveX(srcImageRange(lenna), destImage(tmp), kernel1d(smooth5));
12091222 separableConvolveY(srcImageRange(tmp), destImage(tmpy), kernel1d(diff));
12101223
12191232 }
12201233 }
12211234 }
1222
1235
12231236 void optimalLaplacian5Test()
12241237 {
12251238 Image tmp(lenna.size());
12321245 diff.initOptimalSecondDerivative5();
12331246 vigra::Kernel1D<double> smooth5;
12341247 smooth5.initOptimalSecondDerivativeSmoothing5();
1235
1248
12361249 separableConvolveX(srcImageRange(lenna), destImage(tmp), kernel1d(diff));
12371250 separableConvolveY(srcImageRange(tmp), destImage(tmpx), kernel1d(smooth5));
1238
1251
12391252 separableConvolveX(View(lenna), View(tmp), smooth5);
12401253 separableConvolveY(View(tmp), View(tmpy), diff);
12411254
12521265 }
12531266 }
12541267 }
1255
1268
12561269 void gradientRGBTest()
12571270 {
12581271 RGBImage input(lenna.size());
12591272 importImage(vigra::ImageImportInfo("lenna128rgb.xv"), destImage(input));
1260
1273
12611274 Image sepgrad(lenna.size());
12621275 importImage(vigra::ImageImportInfo("lenna128rgbsepgrad.xv"), destImage(sepgrad));
1263
1264
1276
1277
12651278 RGBImage tmpx(lenna.size());
12661279 RGBImage tmpy(lenna.size());
12671280 Image mag(lenna.size());
12691282
12701283 gaussianGradient(srcImageRange(input), destImage(tmpx), destImage(tmpy), 1.0);
12711284 gaussianGradientMagnitude(srcImageRange(input), destImage(mag), 1.0);
1272
1285
12731286 RGBImage::ScanOrderIterator i1 = tmpx.begin();
12741287 RGBImage::ScanOrderIterator i1end = tmpx.end();
12751288 RGBImage::ScanOrderIterator i2 = tmpy.begin();
12771290 Image::ScanOrderIterator i = sepgrad.begin();
12781291 RGBImage::Accessor rgb = tmpx.accessor();
12791292 Image::Accessor acc = constimg.accessor();
1280
1293
12811294 for(; i1 != i1end; ++i1, ++i2, ++ig, ++i)
12821295 {
12831296 double grad = VIGRA_CSTD::sqrt(squaredNorm(rgb(i1))+squaredNorm(rgb(i2)));
12861299 }
12871300
12881301 }
1289
1302
12901303 void hessianTest()
12911304 {
12921305 Image refxx(lenna.size());
13011314 Image resxy(lenna.size());
13021315 Image resyy(lenna.size());
13031316
1304 hessianMatrixOfGaussian(srcImageRange(lenna),
1317 hessianMatrixOfGaussian(srcImageRange(lenna),
13051318 destImage(resxx), destImage(resxy), destImage(resyy), 1.0);
1306
1319
13071320 Image::ScanOrderIterator i1 = resxx.begin();
13081321 Image::ScanOrderIterator i1end = resxx.end();
13091322 Image::ScanOrderIterator i2 = resyy.begin();
13121325 Image::ScanOrderIterator r2 = refyy.begin();
13131326 Image::ScanOrderIterator r3 = refxy.begin();
13141327 Image::Accessor acc = constimg.accessor();
1315
1316
1328
1329
13171330 for(; i1 != i1end; ++i1, ++i2, ++i3, ++r1, ++r2, ++r3)
13181331 {
13191332 shouldEqualTolerance(acc(i1)-acc(r1), 0.0, 1e-12);
13261339 Image resxy(lenna.size());
13271340 Image resyy(lenna.size());
13281341
1329 hessianMatrixOfGaussian(View(lenna),
1342 hessianMatrixOfGaussian(View(lenna),
13301343 View(resxx), View(resxy), View(resyy), 1.0);
1331
1344
13321345 Image::ScanOrderIterator i1 = resxx.begin();
13331346 Image::ScanOrderIterator i1end = resxx.end();
13341347 Image::ScanOrderIterator i2 = resyy.begin();
13371350 Image::ScanOrderIterator r2 = refyy.begin();
13381351 Image::ScanOrderIterator r3 = refxy.begin();
13391352 Image::Accessor acc = constimg.accessor();
1340
1341
1353
1354
13421355 for(; i1 != i1end; ++i1, ++i2, ++i3, ++r1, ++r2, ++r3)
13431356 {
13441357 shouldEqualTolerance(acc(i1)-acc(r1), 0.0, 1e-12);
13471360 }
13481361 }
13491362 }
1350
1363
13511364 void structureTensorTest()
13521365 {
13531366 Image resxx(lenna.size());
13561369 Image refxx(lenna.size());
13571370 Image refxy(lenna.size());
13581371 Image refyy(lenna.size());
1359
1372
13601373 typedef BasicImage<TinyVector<double, 3> > VectorImage;
13611374 VectorImage resst(lenna.size());
13621375
1363 structureTensor(View(lenna),
1376 structureTensor(View(lenna),
13641377 View(resxx), View(resxy), View(resyy), 1.0, 2.0);
13651378
13661379 structureTensor(View(lenna), MultiArrayView<2, TinyVector<double, 3> >(resst), 1.0, 2.0);
1367
1380
13681381 importImage(vigra::ImageImportInfo("lennastxx.xv"), destImage(refxx));
13691382 importImage(vigra::ImageImportInfo("lennastyy.xv"), destImage(refyy));
13701383 importImage(vigra::ImageImportInfo("lennastxy.xv"), destImage(refxy));
1371
1384
13721385 Image::ScanOrderIterator i1 = resxx.begin();
13731386 Image::ScanOrderIterator i1end = resxx.end();
13741387 Image::ScanOrderIterator i2 = resxy.begin();
13901403 shouldEqualTolerance(vacc(i4)[2], acc(r3), 1e-7);
13911404 }
13921405 }
1393
1406
13941407 void structureTensorRGBTest()
13951408 {
13961409 RGBImage input(lenna.size());
14001413 VectorImage resst(lenna.size()), refst(lenna.size());
14011414
14021415 structureTensor(srcImageRange(lenna), destImage(resst), 1.0, 2.0);
1403
1416
14041417 importImage(vigra::ImageImportInfo("lennargbst.xv"), destImage(refst));
1405
1418
14061419 VectorImage::ScanOrderIterator i1 = resst.begin();
14071420 VectorImage::ScanOrderIterator i1end = resst.end();
14081421 VectorImage::ScanOrderIterator r1 = refst.begin();
14151428 shouldEqualTolerance(vacc(i1)[2], vacc(r1)[2], 1e-7);
14161429 }
14171430 }
1418
1431
14191432 void stdConvolutionTest()
14201433 {
14211434 vigra::Kernel1D<double> binom1;
14221435 binom1.initBinomial(1);
1423
1436
14241437 vigra::Kernel2D<double> binom2;
14251438 binom2.initSeparable(binom1, binom1);
1426
1439
14271440 Image tmp1(constimg);
14281441 tmp1 = 0.0;
14291442
14301443 convolveImage(srcImageRange(constimg), destImage(tmp1), kernel2d(binom2));
1431
1444
14321445 Image::ScanOrderIterator i1 = constimg.begin();
14331446 Image::ScanOrderIterator i1end = constimg.end();
14341447 Image::ScanOrderIterator i2 = tmp1.begin();
14351448 Image::Accessor acc = constimg.accessor();
1436
1449
14371450 for(; i1 != i1end; ++i1, ++i2)
14381451 {
14391452 should(acc(i1) == acc(i2));
14401453 }
14411454 }
1442
1455
14431456 void stdVersusSeparableConvolutionTest()
14441457 {
1445
1458
14461459 vigra::Kernel1D<double> gauss1;
14471460 gauss1.initGaussian(2.0);
1448
1461
14491462 vigra::Kernel2D<double> gauss2;
14501463 gauss2.initSeparable(gauss1, gauss1);
1451
1464
14521465 Image tmp1(lenna);
14531466 tmp1 = 0.0;
14541467
14551468 convolveImage(srcImageRange(lenna), destImage(tmp1), kernel2d(gauss2));
1456
1469
14571470 Image tmp2(lenna);
14581471 Image tmp3(lenna);
14591472 tmp3 = 0.0;
1460
1473
14611474 separableConvolveX(srcImageRange(lenna), destImage(tmp2), kernel1d(gauss1));
14621475 separableConvolveY(srcImageRange(tmp2), destImage(tmp3), kernel1d(gauss1));
1463
1476
14641477 Image::Iterator y1 = tmp1.upperLeft() - gauss2.upperLeft();
14651478 Image::Iterator end = tmp1.lowerRight() - gauss2.lowerRight();
14661479 Image::Iterator y2 = tmp3.upperLeft() - gauss2.upperLeft();
14671480 Image::Accessor acc = tmp1.accessor();
1468
1481
14691482 for(; y1.y != end.y; ++y1.y, ++y2.y)
14701483 {
14711484 Image::Iterator x1 = y1;
14761489 }
14771490 }
14781491 }
1479
1492
14801493 void recursiveFilterTestWithAvoid()
14811494 {
14821495 Image src_const(25, 25);
15101523 id.x = tmp_dest.x;
15111524 }
15121525
1513 recursiveFilterY(srcImageRange(src_const), destImage(dest),
1526 recursiveFilterY(srcImageRange(src_const), destImage(dest),
15141527 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_AVOID);
15151528
15161529 is = src_const.upperLeft();
15371550 id.x = tmp_dest.x;
15381551 }
15391552
1540 // Hier wird an einem symmetrischen Bild /\ getestet
1553 // Hier wird an einem symmetrischen Bild /\ getestet
15411554 // ob die korrekten Daten eingehalten wurden.
15421555
15431556 Image src(getSymmetricLine()), srct(getSymmetricLine(true));
15441557 dest = src;
15451558 Image destt(srct);
15461559
1547 Image::value_type correct_data[40] =
1548 {0.25, 1.25, 2.25, 3.25, 4.25, 5.25, 6.25, 7.25, 8.25, 9.25,
1549 10.25, 11.249812, 12.249472, 13.248558, 14.246079, 15.239341,
1550 16.221025, 17.171238, 18.035903, 18.668023,
1551 18.668023, 18.035903, 17.171238, 16.221025, 15.239341,
1552 14.246079, 13.248558, 12.249472, 11.249812, 10.25, 9.25,
1560 Image::value_type correct_data[40] =
1561 {0.25, 1.25, 2.25, 3.25, 4.25, 5.25, 6.25, 7.25, 8.25, 9.25,
1562 10.25, 11.249812, 12.249472, 13.248558, 14.246079, 15.239341,
1563 16.221025, 17.171238, 18.035903, 18.668023,
1564 18.668023, 18.035903, 17.171238, 16.221025, 15.239341,
1565 14.246079, 13.248558, 12.249472, 11.249812, 10.25, 9.25,
15531566 8.25, 7.25, 6.25, 5.25, 4.25, 3.25, 2.25, 1.25, 0.25};
15541567
1555 recursiveFilterX(View(src), View(dest),
1568 recursiveFilterX(View(src), View(dest),
15561569 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_AVOID);
1557 recursiveFilterY(View(srct), View(destt),
1570 recursiveFilterY(View(srct), View(destt),
15581571 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_AVOID);
15591572 Image::iterator dest_iter = dest.begin();
15601573 Image::iterator destt_iter = destt.begin();
15741587 src_const.init(42.1);
15751588 dest.init(1.12);
15761589
1577 recursiveFilterX(srcImageRange(src_const), destImage(dest),
1590 recursiveFilterX(srcImageRange(src_const), destImage(dest),
15781591 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_REFLECT);
15791592
15801593 Image::Iterator is = src_const.upperLeft();
15941607 id.x = tmp_dest.x;
15951608 }
15961609
1597 recursiveFilterY(srcImageRange(dest), destImage(dest),
1610 recursiveFilterY(srcImageRange(dest), destImage(dest),
15981611 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_REFLECT);
15991612
16001613 is = src_const.upperLeft();
16141627 id.x = tmp_dest.x;
16151628 }
16161629
1617 // Hier wird an einem symmetrischen Bild /\ (Groesse 40x1) getestet
1630 // Hier wird an einem symmetrischen Bild /\ (Groesse 40x1) getestet
16181631 // ob die korrekten Daten eingehalten wurden.
16191632
16201633 Image src(getSymmetricLine());
16211634 dest = src;
1622 recursiveFilterX(srcImageRange(src), destImage(dest),
1635 recursiveFilterX(srcImageRange(src), destImage(dest),
16231636 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_REFLECT);
1624 Image::value_type correct_data[40] =
1625 {1.10091101909, 1.56303266253, 2.36515826013, 3.29236429975, 4.26558480098,
1626 5.25573290944, 6.25210788209, 7.25077235463, 8.25027572885, 9.25007858906,
1627 10.2499668097, 11.2498189802, 12.2494745341, 13.2485593473, 14.2460793794,
1628 15.2393409851, 16.2210251818, 17.171238053, 18.0359027479, 18.6680232997,
1629 18.6680232997, 18.0359027479, 17.171238053, 16.2210251818, 15.2393409851,
1630 14.2460793794, 13.2485593473, 12.2494745342, 11.2498189803, 10.24996681,
1631 9.25007858994, 8.25027573123, 7.2507723611, 6.2521078997, 5.2557329573,
1637 Image::value_type correct_data[40] =
1638 {1.10091101909, 1.56303266253, 2.36515826013, 3.29236429975, 4.26558480098,
1639 5.25573290944, 6.25210788209, 7.25077235463, 8.25027572885, 9.25007858906,
1640 10.2499668097, 11.2498189802, 12.2494745341, 13.2485593473, 14.2460793794,
1641 15.2393409851, 16.2210251818, 17.171238053, 18.0359027479, 18.6680232997,
1642 18.6680232997, 18.0359027479, 17.171238053, 16.2210251818, 15.2393409851,
1643 14.2460793794, 13.2485593473, 12.2494745342, 11.2498189803, 10.24996681,
1644 9.25007858994, 8.25027573123, 7.2507723611, 6.2521078997, 5.2557329573,
16321645 4.26558493107, 3.29236465337, 2.36515922136, 1.56303527544, 1.10091812172};
16331646
16341647 Image::iterator dest_iter = dest.begin();
16451658 Image dest(src_const);
16461659 src_const.init(42.1);
16471660 dest.init(1.12);
1648 recursiveFilterX(srcImageRange(src_const), destImage(dest),
1661 recursiveFilterX(srcImageRange(src_const), destImage(dest),
16491662 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_CLIP);
16501663
16511664 Image::Iterator is = src_const.upperLeft();
16651678 id.x = tmp_dest.x;
16661679 }
16671680
1668 recursiveFilterY(srcImageRange(src_const), destImage(dest),
1681 recursiveFilterY(srcImageRange(src_const), destImage(dest),
16691682 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_CLIP);
16701683
16711684 is = src_const.upperLeft();
16891702 void recursiveFilterTestWithClipOnNonConstImage(){
16901703 Image src(40, 1);
16911704 Image dest(src);
1692 Image::value_type correct_data[40] =
1693 {0.831977, 1.53351, 2.3853, 3.31218, 4.27763, 5.26195,
1694 6.25506, 7.25211, 8.25086, 9.25035, 10.2501, 11.2501,
1695 12.25, 13.25, 14.25, 15.25, 16.25, 17.25, 18.25, 19.25,
1696 20.25, 21.25, 22.25, 23.25, 24.25, 25.25, 26.25, 27.25,
1697 28.2499, 29.2499, 30.2496, 31.2491, 32.2479, 33.2449,
1705 Image::value_type correct_data[40] =
1706 {0.831977, 1.53351, 2.3853, 3.31218, 4.27763, 5.26195,
1707 6.25506, 7.25211, 8.25086, 9.25035, 10.2501, 11.2501,
1708 12.25, 13.25, 14.25, 15.25, 16.25, 17.25, 18.25, 19.25,
1709 20.25, 21.25, 22.25, 23.25, 24.25, 25.25, 26.25, 27.25,
1710 28.2499, 29.2499, 30.2496, 31.2491, 32.2479, 33.2449,
16981711 34.2381, 35.2224, 36.1878, 37.1147, 37.9665, 38.668};
16991712
17001713 Image::Accessor acc_src = src.accessor();
17051718 acc_src.set(i + 0.25, iter_src);
17061719 }
17071720
1708 recursiveFilterX(srcImageRange(src), destImage(dest),
1721 recursiveFilterX(srcImageRange(src), destImage(dest),
17091722 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_CLIP);
17101723
17111724 Image::iterator idest = dest.begin();
17261739 src_const.init(42.1);
17271740 dest.init(1.12);
17281741
1729 recursiveFilterX(srcImageRange(src_const), destImage(dest),
1742 recursiveFilterX(srcImageRange(src_const), destImage(dest),
17301743 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_WRAP);
17311744
17321745 Image::Iterator is = src_const.upperLeft();
17461759 id.x = tmp_dest.x;
17471760 }
17481761
1749 recursiveFilterY(srcImageRange(src_const), destImage(dest),
1762 recursiveFilterY(srcImageRange(src_const), destImage(dest),
17501763 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_WRAP);
17511764
17521765 is = src_const.upperLeft();
17671780 }
17681781
17691782
1770 // Hier wird an einem symmetrischen Bild /\ (Groesse 40x1) getestet
1783 // Hier wird an einem symmetrischen Bild /\ (Groesse 40x1) getestet
17711784 // ob die korrekten Daten eingehalten wurden.
17721785
17731786 Image src(getSymmetricLine());
17741787 dest = src;
1775 recursiveFilterX(srcImageRange(src), destImage(dest),
1788 recursiveFilterX(srcImageRange(src), destImage(dest),
17761789 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_WRAP);
1777 Image::value_type correct_data[40] =
1790 Image::value_type correct_data[40] =
17781791 {0.8319696, 1.4640946, 2.328761, 3.2789745, 4.260659,
17791792 5.2539208, 6.2514412, 7.2505271, 8.2501855, 9.2500454,
17801793 10.249955, 11.249814, 12.249473, 13.248559, 14.246079,
18001813 src_const.init(42.1);
18011814 dest.init(1.12);
18021815
1803 recursiveFilterX(srcImageRange(src_const), destImage(dest),
1816 recursiveFilterX(srcImageRange(src_const), destImage(dest),
18041817 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_REPEAT);
18051818
18061819 Image::Iterator is = src_const.upperLeft();
18191832 is.x = tmp_src.x;
18201833 id.x = tmp_dest.x;
18211834 }
1822 recursiveFilterY(srcImageRange(src_const), destImage(dest),
1835 recursiveFilterY(srcImageRange(src_const), destImage(dest),
18231836 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_REPEAT);
18241837
18251838 is = src_const.upperLeft();
18391852 id.x = tmp_dest.x;
18401853 }
18411854
1842 // Hier wird an einem symmetrischen Bild /\ (Groesse 40x1) getestet
1855 // Hier wird an einem symmetrischen Bild /\ (Groesse 40x1) getestet
18431856 // ob die korrekten Daten eingehalten wurden.
18441857
18451858 Image src(getSymmetricLine());
18461859 dest = src;
1847 recursiveFilterX(srcImageRange(src), destImage(dest),
1860 recursiveFilterX(srcImageRange(src), destImage(dest),
18481861 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_REPEAT);
1849 Image::value_type correct_data[40] =
1850 {0.67545906, 1.4065176, 2.3075796, 3.2711823, 4.2577924,
1851 5.2528662, 6.2510533, 7.2503844, 8.250133, 9.2500261,
1852 10.249947, 11.249812, 12.249472, 13.248558, 14.246079,
1853 15.239341, 16.221025, 17.171238, 18.035903, 18.668023,
1854 18.668023, 18.035903, 17.171238, 16.221025, 15.239341,
1855 14.246079, 13.248558, 12.249472, 11.249812, 10.249947,
1856 9.2500261, 8.250133, 7.2503844, 6.2510533, 5.2528662,
1862 Image::value_type correct_data[40] =
1863 {0.67545906, 1.4065176, 2.3075796, 3.2711823, 4.2577924,
1864 5.2528662, 6.2510533, 7.2503844, 8.250133, 9.2500261,
1865 10.249947, 11.249812, 12.249472, 13.248558, 14.246079,
1866 15.239341, 16.221025, 17.171238, 18.035903, 18.668023,
1867 18.668023, 18.035903, 17.171238, 16.221025, 15.239341,
1868 14.246079, 13.248558, 12.249472, 11.249812, 10.249947,
1869 9.2500261, 8.250133, 7.2503844, 6.2510533, 5.2528662,
18571870 4.2577924, 3.2711823, 2.3075796, 1.4065176, 0.67545906};
18581871
18591872 Image::iterator dest_iter = dest.begin();
18881901 acc_src_wrap.set( j + 0.25, iter_src_wrap);
18891902 }
18901903
1891 recursiveFilterX(srcImageRange(src_wrap), destImage(dest_wrap),
1904 recursiveFilterX(srcImageRange(src_wrap), destImage(dest_wrap),
18921905 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_WRAP);
18931906
1894 recursiveFilterX(srcImageRange(src_reflect), destImage(dest_reflect),
1907 recursiveFilterX(srcImageRange(src_reflect), destImage(dest_reflect),
18951908 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_REFLECT);
18961909
18971910 Image::iterator iter_dest_wrap = dest_wrap.begin();
19021915
19031916 while(iter_dest_reflect != end_dest_reflect)
19041917 {
1905 shouldEqualTolerance(acc_dest_wrap(iter_dest_wrap),
1918 shouldEqualTolerance(acc_dest_wrap(iter_dest_wrap),
19061919 acc_dest_reflect(iter_dest_reflect), 1e-6);
19071920 iter_dest_wrap++;
19081921 iter_dest_reflect++;
19691982 }
19701983
19711984 /**
1972 * Es wird die Positionierung der einzelnen
1985 * Es wird die Positionierung der einzelnen
19731986 * Punkte relativ zueinander getestet.
19741987 */
19751988 void recursiveFilterTestOfAllTreatmentsRelatively()
19962009 Image dest_wrap(src);
19972010 Image dest_clip(src);
19982011
1999 recursiveFilterX(srcImageRange(src), destImage(dest_avoid),
2012 recursiveFilterX(srcImageRange(src), destImage(dest_avoid),
20002013 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_AVOID);
2001 recursiveFilterX(srcImageRange(src), destImage(dest_repeat),
2014 recursiveFilterX(srcImageRange(src), destImage(dest_repeat),
20022015 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_REPEAT);
2003 recursiveFilterX(srcImageRange(src), destImage(dest_reflect),
2016 recursiveFilterX(srcImageRange(src), destImage(dest_reflect),
20042017 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_REFLECT);
2005 recursiveFilterX(srcImageRange(src), destImage(dest_wrap),
2018 recursiveFilterX(srcImageRange(src), destImage(dest_wrap),
20062019 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_WRAP);
2007 recursiveFilterX(srcImageRange(src), destImage(dest_clip),
2020 recursiveFilterX(srcImageRange(src), destImage(dest_clip),
20082021 VIGRA_CSTD::exp(-1.0), BORDER_TREATMENT_CLIP);
2009
2022
20102023 iter_src = src.begin();
20112024 Image::iterator iter_dest_avoid = dest_avoid.begin();
20122025 Image::iterator iter_dest_repeat = dest_repeat.begin();
20392052 }
20402053 }
20412054 }
2042
2055
20432056 void recursiveSmoothTest()
20442057 {
20452058 Image tmp1(constimg);
20472060
20482061 recursiveSmoothX(View(constimg), View(tmp1), 1.0);
20492062 recursiveSmoothY(View(tmp1), View(tmp1), 1.0);
2050
2063
20512064 Image::ScanOrderIterator i1 = constimg.begin();
20522065 Image::ScanOrderIterator i1end = constimg.end();
20532066 Image::ScanOrderIterator i2 = tmp1.begin();
20542067 Image::Accessor acc = constimg.accessor();
2055
2068
20562069 for(; i1 != i1end; ++i1, ++i2)
20572070 {
20582071 should(acc(i1) == acc(i2));
20592072 }
20602073 }
2061
2074
20622075 void recursiveGradientTest()
20632076 {
20642077 ImageImportInfo info("lenna128recgrad.xv");
20652078
20662079 Image recgrad(info.width(), info.height());
20672080 importImage(info, destImage(recgrad));
2068
2081
20692082 Image tmp1(lenna);
20702083 tmp1 = 0.0;
20712084 Image tmp2(lenna);
20732086
20742087 recursiveFirstDerivativeX(View(lenna), View(tmp1), 1.0);
20752088 recursiveSmoothY(View(tmp1), View(tmp1), 1.0);
2076
2089
20772090 recursiveSmoothX(View(lenna), View(tmp2), 1.0);
20782091 recursiveFirstDerivativeY(View(tmp2), View(tmp2), 1.0);
2079
2092
20802093 Image::ScanOrderIterator i1 = tmp1.begin();
20812094 Image::ScanOrderIterator i1end = tmp1.end();
20822095 Image::ScanOrderIterator i2 = tmp2.begin();
20832096 Image::ScanOrderIterator i = recgrad.begin();
20842097 Image::Accessor acc = constimg.accessor();
2085
2098
20862099 for(; i1 != i1end; ++i1, ++i2, ++i)
20872100 {
20882101 double grad = sqrt(acc(i1)*acc(i1)+acc(i2)*acc(i2));
2089
2102
20902103 shouldEqualTolerance(grad, acc(i), 1e-7);
20912104 }
20922105 }
2093
2106
20942107 void recursiveSecondDerivativeTest()
20952108 {
20962109 double b = VIGRA_CSTD::exp(-1.0);
20972110 double factor = (1.0 - b) * (1.0 - b) / b;
2098
2111
20992112 Image tmp1(rampimg);
21002113 tmp1 = 0.0;
21012114 Image tmp2(rampimg);
21032116
21042117 recursiveSmoothX(View(rampimg), View(tmp1), 1.0);
21052118 recursiveSecondDerivativeX(View(rampimg), View(tmp2), 1.0);
2106
2119
21072120 Image::ScanOrderIterator i1 = rampimg.begin();
21082121 Image::ScanOrderIterator i1end = i1 + rampimg.width();
21092122 Image::ScanOrderIterator i2 = tmp1.begin();
21102123 Image::ScanOrderIterator i3 = tmp2.begin();
21112124 Image::Accessor acc = constimg.accessor();
2112
2125
21132126 for(; i1 != i1end; ++i1, ++i2, ++i3)
21142127 {
21152128 double diff = factor * (acc(i2) - acc(i1));
21162129 shouldEqualTolerance(diff, acc(i3), 1e-7);
21172130 }
21182131 }
2119
2132
21202133 void nonlinearDiffusionTest()
21212134 {
2122
2135
21232136 Image res(lenna.size());
2124
2137
21252138 Image comp(lenna.size());
21262139 importImage(vigra::ImageImportInfo("lenna128nonlinear.xv"), destImage(comp));
21272140
21342147 vigra::DiffusivityFunctor<double>(4.0), 4.0);
21352148 shouldEqualSequenceTolerance(res.begin(), res.end(), comp.begin(), 1e-7);
21362149 }
2137
2150
21382151 Image constimg, lenna, rampimg, sym_image, unsym_image;
21392152 vigra::Kernel2D<double> sym_kernel, unsym_kernel, line_kernel;
2140
2153
21412154 };
21422155
21432156 struct ResamplingConvolutionTest
21492162 BSpline<3, double> spline, dspline(1);
21502163 ArrayVector<Kernel1D<double> > kernels(4);
21512164 Rational<int> samplingRatio(4), offset(1,8);
2152 resampling_detail::MapTargetToSourceCoordinate
2165 resampling_detail::MapTargetToSourceCoordinate
21532166 mapCoordinate(samplingRatio, offset);
21542167 createResamplingKernels(spline, mapCoordinate, kernels);
2155
2168
21562169 for(unsigned int i = 0; i<kernels.size(); ++i)
21572170 {
21582171 double sum = 0.0;
21662179 }
21672180
21682181 createResamplingKernels(dspline, mapCoordinate, kernels);
2169
2182
21702183 for(unsigned int i = 0; i<kernels.size(); ++i)
21712184 {
21722185 double sum = 0.0;
21792192 shouldEqualTolerance(sum, 1.0, 1e-14);
21802193 }
21812194 }
2182
2195
21832196 void testKernelsGauss()
21842197 {
21852198 Gaussian<double> gauss(0.7), dgauss(0.7, 1);
21862199 ArrayVector<Kernel1D<double> > kernels(4);
21872200 Rational<int> samplingRatio(4), offset(1,8);
2188 resampling_detail::MapTargetToSourceCoordinate
2201 resampling_detail::MapTargetToSourceCoordinate
21892202 mapCoordinate(samplingRatio, offset);
21902203 createResamplingKernels(gauss, mapCoordinate, kernels);
2191
2204
21922205 for(unsigned int i = 0; i<kernels.size(); ++i)
21932206 {
21942207 double sum = 0.0;
22022215 }
22032216
22042217 createResamplingKernels(dgauss, mapCoordinate, kernels);
2205
2218
22062219 for(unsigned int i = 0; i<kernels.size(); ++i)
22072220 {
22082221 double sum = 0.0;
22162229 shouldEqualTolerance(sum, 1.0, 1e-14);
22172230 }
22182231 }
2219
2232
22202233 void testOversamplingConstant()
22212234 {
22222235 BSpline<3, double> spline, dspline(1);
22232236 Rational<int> samplingRatio(4,1), offset(1,8);
2224
2237
22252238 FImage img(100, 100);
22262239 img.init(1.0);
2227
2240
22282241 int wnew = rational_cast<int>((img.width() - 1 - offset) * samplingRatio + 1);
22292242 int hnew = rational_cast<int>((img.height() - 1 - offset) * samplingRatio + 1);
2230
2243
22312244 FImage res(wnew, hnew);
2232
2245
22332246 resamplingConvolveImage(srcImageRange(img), destImageRange(res),
22342247 spline, samplingRatio, offset, spline, samplingRatio, offset);
22352248 for(FImage::iterator i = res.begin(); i < res.end(); ++i)
22362249 shouldEqual(*i, 1.0);
2237
2250
22382251 resamplingConvolveImage(View(img), View(res),
22392252 dspline, samplingRatio, offset, spline, samplingRatio, offset);
22402253 for(FImage::iterator i = res.begin(); i < res.end(); ++i)
22452258 {
22462259 Gaussian<double> gauss(0.7);
22472260 Rational<int> samplingRatio(2,1), offset(1,4);
2248
2261
22492262 ImageImportInfo info("lenna128.xv");
22502263 FImage img(info.size());
22512264 importImage(info, destImage(img));
2252
2265
22532266 int wnew = rational_cast<int>((info.width() - 1 - offset) * samplingRatio + 1);
22542267 int hnew = rational_cast<int>((info.height() - 1 - offset) * samplingRatio + 1);
2255
2256 FImage res(wnew, hnew);
2268
2269 FImage res(wnew, hnew);
22572270 resamplingConvolveImage(View(img), View(res),
22582271 gauss, samplingRatio, offset, gauss, samplingRatio, offset);
2259
2272
22602273 ImageImportInfo rinfo("resampling.xv");
22612274 shouldEqual(rinfo.width(), wnew);
22622275 shouldEqual(rinfo.height(), hnew);
2263 FImage ref(wnew, hnew);
2276 FImage ref(wnew, hnew);
22642277 importImage(rinfo, destImage(ref));
22652278
22662279 for(FImage::iterator i = res.begin(), j = ref.begin(); i < res.end(); ++i, ++j)
22862299 void testPyramidConstruction()
22872300 {
22882301 vigra::ImagePyramid<Image> pyramid(-2, 2, img);
2289
2302
22902303 shouldEqual(pyramid[-2].size(), Size2D(509, 477));
22912304 shouldEqual(pyramid[-1].size(), Size2D(255, 239));
22922305 shouldEqual(pyramid[0].size(), Size2D(128, 120));
22972310 void testBurtReduceExpand()
22982311 {
22992312 vigra::ImagePyramid<Image> pyramid(-2, 3, img), laplacian(-2,3, img);
2300
2313
23012314 pyramidExpandBurtFilter(pyramid, 0, -2);
23022315 pyramidReduceBurtFilter(pyramid, 0, 3);
2303
2316
23042317 pyramidReduceBurtLaplacian(laplacian, 0, 3);
23052318
23062319 char buf[100];
23132326 std::sprintf(buf, "lenna_level%d.xv", i);
23142327 ImageImportInfo info(buf);
23152328 shouldEqual(info.size(), pyramid[i].size());
2316
2329
23172330 Image ref(info.size());
23182331 importImage(info, destImage(ref));
23192332 shouldEqualSequenceTolerance(ref.begin(), ref.end(), pyramid[i].begin(), 1e-12);
23202333 }
2321
2334
23222335 for(int i=0; i<=2; ++i)
23232336 {
23242337 std::sprintf(buf, "lenna_levellap%d.xv", i);
23252338 ImageImportInfo info(buf);
23262339 shouldEqual(info.size(), laplacian[i].size());
2327
2340
23282341 Image ref(info.size());
23292342 importImage(info, destImage(ref));
23302343 for(int k=0; k<info.width()*info.height(); ++k)
23312344 shouldEqualTolerance(ref.data()[k]-laplacian[i].data()[k], 0.0, 1e-12);
23322345 }
2333
2346
23342347 shouldEqualSequenceTolerance(pyramid[3].begin(), pyramid[3].end(), laplacian[3].begin(), 1e-14);
2335
2348
23362349 pyramidExpandBurtLaplacian(laplacian, 3, -2);
23372350
23382351 for(int i=3; i>=-2; --i)
23402353 shouldEqualSequenceTolerance(pyramid[i].begin(), pyramid[i].end(), laplacian[i].begin(), 1e-14);
23412354 }
23422355 }
2343
2356
23442357 };
23452358
23462359 struct TotalVariationTest{
2347
2360
23482361 const int width,height;
23492362 MultiArray<2,double> data;
23502363 MultiArray<2,double> out;
23572370 }
23582371 }
23592372 }
2360
2373
23612374 void testTotalVariation(){
2362
2375
23632376 totalVariationFilter(data,out,0.5,1000,0.01);
23642377 //exportImage(srcImageRange(out), vigra::ImageExportInfo("test_tv.pgm"));
2365 shouldEqualSequenceTolerance(out.begin(), out.end(), result_std_tv, 1e-12);
2378 shouldEqualSequenceTolerance(out.begin(), out.end(), result_std_tv, 1e-12);
23662379 }
23672380 void testWeightedTotalVariation(){
2368
2381
23692382 totalVariationFilter(data,weight,out,.5,1000,0.01);
23702383 //exportImage(srcImageRange(out), vigra::ImageExportInfo("test_tvw.pgm"));
2371 shouldEqualSequenceTolerance(out.begin(), out.end(), result_std_tv_weight, 1e-12);
2384 shouldEqualSequenceTolerance(out.begin(), out.end(), result_std_tv_weight, 1e-12);
23722385 }
2373
2386
23742387 void testAnisotropicTotalVariation(){
2375
2388
23762389 double alpha0=0.05,beta0=0.5,sigma=0.1,rho=.5,K=10;
23772390 int inner_steps=200,outer_steps=5;
2378
2391
23792392 MultiArray<2,double> phi(Shape2(width,height));
23802393 MultiArray<2,double> alpha(Shape2(width,height));
2381 MultiArray<2,double> beta(Shape2(width,height));
2382 MultiArray<2,double> xedges(Shape2(width,height));
2383 MultiArray<2,double> yedges(Shape2(width,height));
2384
2394 MultiArray<2,double> beta(Shape2(width,height));
2395 MultiArray<2,double> xedges(Shape2(width,height));
2396 MultiArray<2,double> yedges(Shape2(width,height));
2397
23852398 for (int y=0;y<height;y++){
23862399 for (int x=0;x<width;x++){
23872400 alpha(x,y)=alpha0;
23902403 yedges(x,y)=1;
23912404 }
23922405 }
2393 out=data;
2394 for (int i=0;i<outer_steps;i++){
2406 out=data;
2407 for (int i=0;i<outer_steps;i++){
23952408 getAnisotropy(out,phi,alpha,beta,alpha0,beta0,sigma,rho,K);
2396 anisotropicTotalVariationFilter(data,weight,phi,alpha,beta,out,inner_steps);
2409 anisotropicTotalVariationFilter(data,weight,phi,alpha,beta,out,inner_steps);
23972410 }
23982411 //exportImage(srcImageRange(out), vigra::ImageExportInfo("test_aniso.pgm"));
2399 shouldEqualSequenceTolerance(out.begin(), out.end(), result_aniso_tv, 1e-8);
2412 shouldEqualSequenceTolerance(out.begin(), out.end(), result_aniso_tv, 1e-8);
24002413 }
2401
2414
24022415 void testSecondOrderTotalVariation(){
2403
2416
24042417 double alpha0=0.05,beta0=0.5,sigma=0.1,rho=.5,K=10,gamma0=0.1;
24052418 int inner_steps=200,outer_steps=5;
2406
2419
24072420 MultiArray<2,double> phi(Shape2(width,height));
24082421 MultiArray<2,double> alpha(Shape2(width,height));
2409 MultiArray<2,double> beta(Shape2(width,height));
2410 MultiArray<2,double> gamma(Shape2(width,height));
2411 MultiArray<2,double> xedges(Shape2(width,height));
2412 MultiArray<2,double> yedges(Shape2(width,height));
2413
2422 MultiArray<2,double> beta(Shape2(width,height));
2423 MultiArray<2,double> gamma(Shape2(width,height));
2424 MultiArray<2,double> xedges(Shape2(width,height));
2425 MultiArray<2,double> yedges(Shape2(width,height));
2426
24142427 for (int y=0;y<height;y++){
24152428 for (int x=0;x<width;x++){
24162429 alpha(x,y)=alpha0;
24202433 yedges(x,y)=1;
24212434 }
24222435 }
2423 out=data;
2436 out=data;
24242437 for (int i=0;i<outer_steps;i++){
24252438 getAnisotropy(out,phi,alpha,beta,alpha0,beta0,sigma,rho,K);
24262439 secondOrderTotalVariationFilter(data,weight,phi,alpha,beta,gamma,xedges,yedges,out,inner_steps);
24272440 }
24282441 //exportImage(srcImageRange(out), vigra::ImageExportInfo("test_htv.pgm"));
2429 shouldEqualSequenceTolerance(out.begin(), out.end(), result_higher_order_tv,1e-12);
2442 shouldEqualSequenceTolerance(out.begin(), out.end(), result_higher_order_tv,1e-12);
24302443 }
24312444 };
24322445
24422455 #if 1
24432456 add( testCase( &ConvolutionTest::initExplicitlyTest));
24442457
2445 add( testCase( &ConvolutionTest::simpleSharpeningTest));
2446 add( testCase( &ConvolutionTest::gaussianSharpeningTest));
2458 add( testCase( &ConvolutionTest::simpleSharpeningTest));
2459 add( testCase( &ConvolutionTest::gaussianSharpeningTest));
24472460 add( testCase( &ConvolutionTest::stdConvolutionTestOnConstImage));
24482461 add( testCase( &ConvolutionTest::stdConvolutionTestWithAvoid));
24492462 add( testCase( &ConvolutionTest::stdConvolutionTestWithZeropad));
24982511
24992512 add( testCase( &ImagePyramidTest::testPyramidConstruction));
25002513 add( testCase( &ImagePyramidTest::testBurtReduceExpand));
2501
2514
25022515 add( testCase( &TotalVariationTest::testTotalVariation));
25032516 add( testCase( &TotalVariationTest::testWeightedTotalVariation));
25042517 add( testCase( &TotalVariationTest::testAnisotropicTotalVariation));
00 if(FFTW3_FOUND)
1 INCLUDE_DIRECTORIES(${FFTW3_INCLUDE_DIR})
1 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${FFTW3_INCLUDE_DIR})
22
33 VIGRA_CONFIGURE_THREADING()
44
00 if(FFTW3_FOUND)
1 INCLUDE_DIRECTORIES(${FFTW3_INCLUDE_DIR})
1 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${FFTW3_INCLUDE_DIR})
22
33 VIGRA_ADD_TEST(test_features test.cxx LIBRARIES vigraimpex ${FFTW3_LIBRARIES})
44
0 VIGRA_ADD_TEST(test_filter_iterator test.cxx)
0 /************************************************************************/
1 /* */
2 /* Copyright 2014-2015 by Ullrich Koethe and Philip Schill */
3 /* */
4 /* This file is part of the VIGRA computer vision library. */
5 /* The VIGRA Website is */
6 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
7 /* Please direct questions, bug reports, and contributions to */
8 /* ullrich.koethe@iwr.uni-heidelberg.de or */
9 /* vigra@informatik.uni-hamburg.de */
10 /* */
11 /* Permission is hereby granted, free of charge, to any person */
12 /* obtaining a copy of this software and associated documentation */
13 /* files (the "Software"), to deal in the Software without */
14 /* restriction, including without limitation the rights to use, */
15 /* copy, modify, merge, publish, distribute, sublicense, and/or */
16 /* sell copies of the Software, and to permit persons to whom the */
17 /* Software is furnished to do so, subject to the following */
18 /* conditions: */
19 /* */
20 /* The above copyright notice and this permission notice shall be */
21 /* included in all copies or substantial portions of the */
22 /* Software. */
23 /* */
24 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
25 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
26 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
27 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
28 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
29 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
30 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
32 /* */
33 /************************************************************************/
34 #include <vigra/filter_iterator.hxx>
35 #include <vigra/unittest.hxx>
36 #include <vector>
37 #include <numeric>
38 #include <iterator>
39
40 using namespace vigra;
41
42 struct FilterIteratorTests
43 {
44 template <typename ITER0, typename ITER1>
45 void test_filter_read_mod2(ITER0 in_begin, ITER0 in_end, ITER1 expected_begin)
46 {
47 typedef typename std::iterator_traits<ITER0>::value_type value_type;
48 std::vector<value_type> out;
49 auto filter = [](int x) { return x % 2 == 0; };
50 auto begin = make_filter_iterator(filter, in_begin, in_end);
51 auto end = make_filter_iterator(filter, in_end, in_end);
52 for (auto it = begin; it != end; ++it)
53 {
54 out.push_back(*it);
55 }
56 shouldEqualSequence(out.begin(), out.end(), expected_begin);
57 }
58
59 template <typename ITER0, typename ITER1>
60 void test_filter_write_mod2(ITER0 in_begin, ITER0 in_end, ITER1 expected_begin)
61 {
62 typedef typename std::iterator_traits<ITER0>::value_type value_type;
63 std::vector<value_type> out(in_begin, in_end);
64 auto filter = [](int x) { return x % 2 == 0; };
65 auto begin = make_filter_iterator(filter, out.begin(), out.end());
66 auto end = make_filter_iterator(filter, out.end(), out.end());
67 for (auto it = begin; it != end; ++it)
68 {
69 *it += 100;
70 }
71 shouldEqualSequence(out.begin(), out.end(), expected_begin);
72 }
73
74 void test_filter_iterator_read()
75 {
76 {
77 int in[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
78 int out_expected[] = {0, 2, 4, 6, 8};
79 test_filter_read_mod2(in, in+10, out_expected);
80 }
81 {
82 int in[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
83 int out_expected[] = {0, 2, 4, 6, 8};
84 std::vector<int> v_in(in, in+9);
85 test_filter_read_mod2(v_in.begin(), v_in.end(), out_expected);
86 }
87 {
88 int in[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
89 int out_expected[] = {2, 4, 6, 8};
90 std::vector<int> v_in(in, in+9);
91 test_filter_read_mod2(v_in.cbegin(), v_in.cend(), out_expected);
92 }
93 {
94 int in[] = {1, 2, 3, 4, 5, 6, 7, 8};
95 int out_expected[] = {2, 4, 6, 8};
96 test_filter_read_mod2(in, in+8, out_expected);
97 }
98 }
99
100 void test_filter_iterator_write()
101 {
102 {
103 int in[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
104 int out_expected[] = {100, 1, 102, 3, 104, 5, 106, 7, 108, 9};
105 test_filter_write_mod2(in, in+10, out_expected);
106 }
107 {
108 int in[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
109 int out_expected[] = {100, 1, 102, 3, 104, 5, 106, 7, 108};
110 test_filter_write_mod2(in, in+9, out_expected);
111 }
112 {
113 int in[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
114 int out_expected[] = {1, 102, 3, 104, 5, 106, 7, 108, 9};
115 test_filter_write_mod2(in, in+9, out_expected);
116 }
117 {
118 int in[] = {1, 2, 3, 4, 5, 6, 7, 8};
119 int out_expected[] = {1, 102, 3, 104, 5, 106, 7, 108};
120 test_filter_write_mod2(in, in+8, out_expected);
121 }
122 }
123 };
124
125 struct FilterIteratorTestSuite : public test_suite
126 {
127 FilterIteratorTestSuite()
128 :
129 test_suite("FilterIterator test")
130 {
131 add(testCase(&FilterIteratorTests::test_filter_iterator_read));
132 add(testCase(&FilterIteratorTests::test_filter_iterator_write));
133 }
134 };
135
136 int main(int argc, char** argv)
137 {
138 FilterIteratorTestSuite filter_iterator_test;
139 int failed = filter_iterator_test.run(testsToBeExecuted(argc, argv));
140 std::cout << filter_iterator_test.report() << std::endl;
141 return (failed != 0);
142 }
00 if(FFTW3_FOUND)
1 INCLUDE_DIRECTORIES(${FFTW3_INCLUDE_DIR})
1 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${FFTW3_INCLUDE_DIR})
22
33 VIGRA_CONFIGURE_THREADING()
4
4
55 VIGRA_ADD_TEST(test_fourier test.cxx LIBRARIES vigraimpex ${FFTW3_LIBRARIES} ${THREADING_LIBRARIES})
66
77 VIGRA_COPY_TEST_DATA(ghouse.gif filter.xv gaborresult.xv)
11
22 if(WITH_BOOST_GRAPH)
33 VIGRA_ADD_TEST(test_gridgraph_BGL test.cxx LIBRARIES ${Boost_GRAPH_LIBRARY})
4 INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIR})
4 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${Boost_INCLUDE_DIR})
55 SET_TARGET_PROPERTIES(test_gridgraph_BGL PROPERTIES COMPILE_FLAGS "-DWITH_BOOST_GRAPH")
66 endif()
77
88 if(WITH_LEMON)
99 VIGRA_ADD_TEST(test_gridgraph_LEMON test.cxx LIBRARIES ${LEMON_LIBRARY})
10 INCLUDE_DIRECTORIES(${LEMON_INCLUDE_DIR})
10 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${LEMON_INCLUDE_DIR})
1111 SET_TARGET_PROPERTIES(test_gridgraph_LEMON PROPERTIES COMPILE_FLAGS "-DWITH_LEMON")
1212 endif()
4848
4949 using namespace vigra;
5050
51 #ifdef __GNUC__
52 #pragma GCC diagnostic push
53 #pragma GCC diagnostic ignored "-Wsign-compare"
54 #endif
55
5156 template <unsigned int N>
5257 struct NeighborhoodTests
5358 {
103108 shouldEqual(gridGraphMaxDegree(N, DirectNeighborhood), neighborCount);
104109
105110 Shape pos, neg, strides = cumprod(Shape(3)) / 3;
106 for(int k=0; k<neighborCount; ++k)
111 for(unsigned k=0; k<neighborCount; ++k)
107112 {
108113 shouldEqual(sum(abs(neighborOffsets[k])), 1); // test that it is a direct neighbor
109114
146151 shouldEqual(neighborExists[borderType].size(), neighborCount);
147152 checkNeighborCodes[borderType] = 1;
148153
149 for(int k=0; k<neighborCount; ++k)
154 for(unsigned k=0; k<neighborCount; ++k)
150155 {
151156 // check that neighbors are correctly marked as inside or outside in neighborExists
152157 shouldEqual(va.isInside(vi.point()+neighborOffsets[k]), neighborExists[borderType][k]);
170175 shouldEqual((GridGraphMaxDegree<N, IndirectNeighborhood>::value), neighborCount);
171176 shouldEqual(gridGraphMaxDegree(N, IndirectNeighborhood), neighborCount);
172177
173 for(int k=0; k<neighborCount; ++k)
178 for(unsigned k=0; k<neighborCount; ++k)
174179 {
175180 shouldEqual(abs(neighborOffsets[k]).maximum(), 1); // check that offset is at most 1 in any direction
176181
12991304
13001305 return (failed != 0);
13011306 }
1307
1308 #ifdef __GNUC__
1309 #pragma GCC diagnostic pop
1310 #endif
00 if(HDF5_FOUND)
1 INCLUDE_DIRECTORIES(${HDF5_INCLUDE_DIR})
2
1 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${HDF5_INCLUDE_DIR})
2
33 ADD_DEFINITIONS(${HDF5_CPPFLAGS})
44
55 VIGRA_ADD_TEST(test_hdf5impex test.cxx LIBRARIES vigraimpex ${HDF5_LIBRARIES})
77
88 IF(TIFF_FOUND)
99 ADD_DEFINITIONS(-DHasTIFF)
10 INCLUDE_DIRECTORIES(${TIFF_INCLUDE_DIR})
10 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${TIFF_INCLUDE_DIR})
1111 ENDIF(TIFF_FOUND)
1212
1313 IF(OPENEXR_FOUND)
1414 /* restriction, including without limitation the rights to use, */
1515 /* copy, modify, merge, publish, distribute, sublicense, and/or */
1616 /* sell copies of the Software, and to permit persons to whom the */
17 /* Software is furnished to do so, subject to the following */
1817 /* conditions: */
1918 /* */
2019 /* The above copyright notice and this permission notice shall be */
681680 void testFile(const char* filename)
682681 {
683682 ImageExportInfo exportinfo(filename);
684 FRGBImage img(1, 1);
683 FVector4Image img(1, 1);
685684 img(0, 0) = 1;
686685
687686 const Diff2D position(0, 100);
26752675
26762676 void testDeterminant()
26772677 {
2678 double ds2[] = {1, 2, 2, 1};
2679 double dns2[] = {1, 2, 3, 1};
2680 Matrix ms2(Shape(2,2), ds2);
2681 Matrix mns2(Shape(2,2), dns2);
2682 double eps = 1e-12;
2683 shouldEqualTolerance(determinant(ms2), -3.0, eps);
2684 shouldEqualTolerance(determinant(mns2), -5.0, eps);
2685 shouldEqualTolerance(logDeterminant(transpose(ms2)*ms2), std::log(9.0), eps);
2686 shouldEqualTolerance(logDeterminant(transpose(mns2)*mns2), std::log(25.0), eps);
2687
2688 double ds3[] = {1, 2, 3, 2, 3, 1, 3, 1, 2};
2689 double dns3[] = {1, 2, 3, 5, 3, 1, 3, 1, 2};
2690 Matrix ms3(Shape(3,3), ds3);
2691 Matrix mns3(Shape(3,3), dns3);
2692 shouldEqualTolerance(determinant(ms3), -18.0, eps);
2693 shouldEqualTolerance(determinant(mns3), -21.0, eps);
2694 shouldEqualTolerance(determinant(transpose(ms3)*ms3, "Cholesky"), 324.0, eps);
2695 shouldEqualTolerance(determinant(transpose(mns3)*mns3, "Cholesky"), 441.0, eps);
2696 shouldEqualTolerance(logDeterminant(transpose(ms3)*ms3), std::log(324.0), eps);
2697 shouldEqualTolerance(logDeterminant(transpose(mns3)*mns3), std::log(441.0), eps);
2678 {
2679 double eps = 1e-12;
2680 double ds2[] = {1, 2, 2, 1};
2681 double dns2[] = {1, 2, 3, 1};
2682 Matrix ms2(Shape(2,2), ds2);
2683 Matrix mns2(Shape(2,2), dns2);
2684 shouldEqualTolerance(determinant(ms2), -3.0, eps);
2685 shouldEqualTolerance(determinant(mns2), -5.0, eps);
2686 shouldEqualTolerance(logDeterminant(transpose(ms2)*ms2), std::log(9.0), eps);
2687 shouldEqualTolerance(logDeterminant(transpose(mns2)*mns2), std::log(25.0), eps);
2688 }
2689 {
2690 double eps = 1e-12;
2691 double ds3[] = {1, 2, 3, 2, 3, 1, 3, 1, 2};
2692 double dns3[] = {1, 2, 3, 5, 3, 1, 3, 1, 2};
2693 Matrix ms3(Shape(3,3), ds3);
2694 Matrix mns3(Shape(3,3), dns3);
2695 shouldEqualTolerance(determinant(ms3), -18.0, eps);
2696 shouldEqualTolerance(determinant(mns3), -21.0, eps);
2697 shouldEqualTolerance(determinant(transpose(ms3)*ms3, "Cholesky"), 324.0, eps);
2698 shouldEqualTolerance(determinant(transpose(mns3)*mns3, "Cholesky"), 441.0, eps);
2699 shouldEqualTolerance(logDeterminant(transpose(ms3)*ms3), std::log(324.0), eps);
2700 shouldEqualTolerance(logDeterminant(transpose(mns3)*mns3), std::log(441.0), eps);
2701 }
2702 {
2703 int ds2[] = {1, 2, 2, 1};
2704 int dns2[] = {1, 2, 3, 1};
2705 vigra::Matrix<int> ms2(Shape(2,2), ds2);
2706 vigra::Matrix<int> mns2(Shape(2,2), dns2);
2707 shouldEqual(determinant(ms2), -3);
2708 shouldEqual(determinant(mns2), -5);
2709 }
2710 {
2711 int ds3[] = {1, 2, 3, 2, 3, 1, 3, 1, 2};
2712 int dns3[] = {1, 2, 3, 5, 3, 1, 3, 1, 2};
2713 vigra::Matrix<int> ms3(Shape(3,3), ds3);
2714 vigra::Matrix<int> mns3(Shape(3,3), dns3);
2715 shouldEqual(determinant(ms3), -18);
2716 shouldEqual(determinant(mns3), -21);
2717 }
2718 {
2719 unsigned short ds2[] = {1, 2, 2, 1};
2720 unsigned short dns2[] = {1, 2, 3, 1};
2721 vigra::Matrix<unsigned short> ms2(Shape(2,2), ds2);
2722 vigra::Matrix<unsigned short> mns2(Shape(2,2), dns2);
2723 shouldEqual(determinant(ms2), -3);
2724 shouldEqual(determinant(mns2), -5);
2725 }
2726 {
2727 unsigned short ds3[] = {1, 2, 3, 2, 3, 1, 3, 1, 2};
2728 unsigned short dns3[] = {1, 2, 3, 5, 3, 1, 3, 1, 2};
2729 vigra::Matrix<unsigned short> ms3(Shape(3,3), ds3);
2730 vigra::Matrix<unsigned short> mns3(Shape(3,3), dns3);
2731 shouldEqual(determinant(ms3), -18);
2732 shouldEqual(determinant(mns3), -21);
2733 }
26982734 }
26992735
27002736 void testSVD()
773773 Edge e13 = g.findEdge(n1,n3);
774774 Edge e24 = g.findEdge(n2,n4);
775775 Edge e34 = g.findEdge(n3,n4);
776 ignore_argument(e12,e13,e24,e34);
776777
777778 // get incoming arcs
778779 {
844845 Edge e13 = g.findEdge(n1,n3);
845846 Edge e24 = g.findEdge(n2,n4);
846847 Edge e34 = g.findEdge(n3,n4);
848 ignore_argument(e12,e13,e24,e34);
849
847850
848851
849852 {
1313
1414 # Check cpp version
1515 if(NOT ${VIGRA_CPP_VERSION})
16 message(FATAL_ERROR
16 message(FATAL_ERROR
1717 "cmake error: VIGRA_CPP_VERSION not defined yet. "
1818 "Call VIGRA_DETECT_CPP_VERSION() from the main CMakeLists file." )
1919 endif()
2020
2121 # multiarray/test.cxx uses 'auto' from c++11.
2222 string(COMPARE LESS ${VIGRA_CPP_VERSION} "201103" NO_CXX11)
23 if(NO_CXX11 AND NOT MSVC) # Visual Studio 2010 and 2012 supports enough c++11 features that we can still use it
23 if(NO_CXX11 AND NOT MSVC) # Visual Studio 2010 and 2012 supports enough c++11 features that we can still use it
2424 MESSAGE(STATUS "** WARNING: You are compiling in C++98 mode.")
2525 MESSAGE(STATUS "** Multiarray tests will be skipped.")
2626 MESSAGE(STATUS "** Add -std=c++11 to CMAKE_CXX_FLAGS to enable multiarray tests.")
2727 else()
2828 VIGRA_ADD_TEST(test_multiarray test.cxx LIBRARIES vigraimpex)
29 endif()
29 endif()
3030
3131 # Even with C++11, a working threading implementation is needed for running multiarray_chunked tests.
3232 VIGRA_CONFIGURE_THREADING()
4242
4343 IF(HDF5_FOUND)
4444 ADD_DEFINITIONS(-DHasHDF5 ${HDF5_CPPFLAGS})
45 INCLUDE_DIRECTORIES(${HDF5_INCLUDE_DIR})
45 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${HDF5_INCLUDE_DIR})
4646 SET(MULTIARRAY_CHUNKED_LIBRARIES vigraimpex ${HDF5_LIBRARIES} ${MULTIARRAY_CHUNKED_LIBRARIES})
4747 ELSE(TIFF_FOUND)
4848 SET(MULTIARRAY_CHUNKED_LIBRARIES vigraimpex ${MULTIARRAY_CHUNKED_LIBRARIES})
4949 ENDIF()
5050
51 VIGRA_ADD_TEST(test_multiarray_chunked test_chunked.cxx
51 VIGRA_ADD_TEST(test_multiarray_chunked test_chunked.cxx
5252 LIBRARIES ${MULTIARRAY_CHUNKED_LIBRARIES})
5353 endif()
5454
11921192 shouldEqual (countx, 30);
11931193 shouldEqual (county, 15);
11941194 shouldEqual (countz, 5);
1195 shouldEqual (seqi, a3.end());
1195 should (seqi == a3.end());
11961196
11971197 // test direct navigation
11981198 traverser3_t i3 = a3.traverser_begin();
13271327 shouldEqual (countx, 30);
13281328 shouldEqual (county, 15);
13291329 shouldEqual (countz, 5);
1330 shouldEqual (seqi, a3.end());
1330 should (seqi == a3.end());
13311331 //
13321332 //// test direct navigation
13331333 //traverser3_t i3 = a3.traverser_begin();
17351735 static typename Image::value_type data[];
17361736
17371737 ImageViewTest()
1738 : ma(TinyVector<int, 2>(3,3)),
1738 : ma(Shape2(3,3)),
17391739 img(makeBasicImageView(ma))
17401740 {
17411741 typename Image::Accessor acc = img.accessor();
18671867 void testStridedImageView()
18681868 {
18691869 // create stride MultiArrayView
1870 typename MA::difference_type
1871 start(0,0), end(2,2);
1872 MA roi = ma.subarray(start, end);
1870 Shape2 start(0,0), end(2, 2);
1871 typename MA::view_type roi = ma.subarray(start, end);
18731872
18741873 // inspect both the MultiArrayView and the corresponding BasicImageView
18751874 vigra::FindSum<typename Image::value_type> sum1, sum2;
9494 }
9595
9696 static ArrayPtr createArray(Shape3 const & shape,
97 Shape3 const & chunk_shape,
97 Shape3 const & /*chunk_shape*/,
9898 ChunkedArrayFull<3, T> *,
9999 std::string const & = "chunked_test.h5")
100100 {
198198 int dataBytesBefore = array->dataBytes();
199199 array->releaseChunks(Shape3(5, 0, 3), Shape3(shape[0], shape[1], shape[2]-3), true);
200200 if(!isFullArray)
201 should(array->dataBytes() < dataBytesBefore);
201 should(array->dataBytes() < (unsigned)dataBytesBefore);
202202
203203 if(IsSameType<Array, ChunkedArrayLazy<3, T> >::value ||
204204 IsSameType<Array, ChunkedArrayCompressed<3, T> >::value)
0 VIGRA_ADD_TEST(test_objectfeatures test.cxx LIBRARIES vigraimpex)
0 VIGRA_ADD_TEST(test_objectfeatures test.cxx)
1 IF(WITH_LEMON)
2 VIGRA_ADD_TEST(test_objectfeatures_lemon test_lemon.cxx LIBRARIES ${LEMON_LIBRARY})
3 INCLUDE_DIRECTORIES(${LEMON_INCLUDE_DIR})
4 SET_TARGET_PROPERTIES(test_objectfeatures_lemon PROPERTIES COMPILE_FLAGS "-DWITH_LEMON")
5 ENDIF(WITH_LEMON)
16 VIGRA_ADD_TEST(test_stand_alone_acc_chain stand_alone_acc_chain.cxx)
27 VIGRA_COPY_TEST_DATA(of.gif)
38
14891489 shouldEqual(W(3, 0, 1), get<AutoRangeHistogram<3> >(c,3));
14901490 }
14911491 }
1492
1493 void testConvexHullFeatures()
1494 {
1495 using namespace vigra::acc;
1496
1497 int size = 6;
1498 MultiArray<2, int> mask(vigra::Shape2(size, size));
1499
1500 mask(1, 1) = 1;
1501 mask(2, 1) = 1;
1502 mask(2, 2) = 1;
1503 mask(2, 3) = 1;
1504 mask(1, 3) = 1;
1505 mask(3, 1) = 1;
1506 mask(3, 3) = 1;
1507 mask(4, 1) = 1;
1508 mask(4, 3) = 1;
1509
1510 //for(auto i = mask.begin(); i != mask.end(); ++i)
1511 //{
1512 // std::cerr << (*i ? "*" : " ");
1513 // if(i.point()[0] == mask.shape(0)-1)
1514 // std::cerr << "\n";
1515 //}
1516
1517 AccumulatorChainArray<CoupledArrays<2, int>,
1518 Select<LabelArg<1>, ConvexHull> > chf;
1519 chf.ignoreLabel(0);
1520 extractFeatures(mask, chf);
1521
1522 shouldEqual(getAccumulator<ConvexHull>(chf, 1).inputArea(), 8.5);
1523 shouldEqualTolerance(getAccumulator<ConvexHull>(chf, 1).inputPerimeter(), 8.0 + 6.0*M_SQRT2, 1e-15);
1524
1525 typedef TinyVector<double, 2> P;
1526 P ref[] = { P(1.0, 0.5), P(0.5, 1.0), P(0.5, 3.0), P(1.0, 3.5), P(4.0, 3.5),
1527 P(4.5, 3.0), P(4.5, 1.0), P(4.0, 0.5), P(1.0, 0.5) };
1528 shouldEqual(get<ConvexHull>(chf, 1).hull().size(), 9);
1529 shouldEqualSequence(ref, ref+9, get<ConvexHull>(chf, 1).hull().begin());
1530 shouldEqual(get<ConvexHull>(chf, 1).hullArea(), 11.5);
1531 shouldEqualTolerance(get<ConvexHull>(chf, 1).hullPerimeter(), 10.0 + 2.0*M_SQRT2, 1e-15);
1532
1533 shouldEqualTolerance(get<ConvexHull>(chf, 1).convexity(), 8.5 / 11.5, 1e-15);
1534 shouldEqualTolerance(get<ConvexHull>(chf, 1).rugosity(), 1.2850586602653933, 1e-15);
1535
1536 shouldEqual(get<ConvexHull>(chf, 1).convexityDefectCount(), 2);
1537 shouldEqual(get<ConvexHull>(chf, 1).defectAreaList().size(), 2);
1538 shouldEqual(get<ConvexHull>(chf, 1).defectAreaList()[0], 2);
1539 shouldEqual(get<ConvexHull>(chf, 1).defectAreaList()[1], 1);
1540
1541 shouldEqualTolerance(get<ConvexHull>(chf, 1).convexityDefectAreaMean(), 1.5, 1e-15);
1542 shouldEqualTolerance(get<ConvexHull>(chf, 1).convexityDefectAreaVariance(), 0.5, 1e-15);
1543 shouldEqualTolerance(get<ConvexHull>(chf, 1).convexityDefectAreaSkewness(), 0.0, 1e-15);
1544 shouldEqualTolerance(get<ConvexHull>(chf, 1).convexityDefectAreaKurtosis(), 0.0, 1e-15);
1545 shouldEqualTolerance(get<ConvexHull>(chf, 1).meanDefectDisplacement(), 1.185185185185185, 1e-15);
1546
1547 shouldEqualTolerance(P(2.4444444444444444, 2.0), get<ConvexHull>(chf, 1).inputCenter(), P(1e-15));
1548 shouldEqualTolerance(P(2.5, 2.0), get<ConvexHull>(chf, 1).hullCenter(), P(1e-15));
1549 shouldEqualTolerance(P(2.6666666666666667, 2.0), get<ConvexHull>(chf, 1).convexityDefectCenter(), P(1e-15));
1550 }
15511492 };
15521493
15531494 struct FeaturesTestSuite : public vigra::test_suite
15641505 add(testCase(&AccumulatorTest::testHistogram));
15651506 add(testCase(&AccumulatorTest::testRegionAccumulators));
15661507 add(testCase(&AccumulatorTest::testIndexSpecifiers));
1567 add(testCase(&AccumulatorTest::testConvexHullFeatures));
15681508 }
15691509 };
15701510
0 /************************************************************************/
1 /* */
2 /* Copyright 2011-2012 by Ullrich Koethe */
3 /* */
4 /* This file is part of the VIGRA computer vision library. */
5 /* The VIGRA Website is */
6 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
7 /* Please direct questions, bug reports, and contributions to */
8 /* ullrich.koethe@iwr.uni-heidelberg.de or */
9 /* vigra@informatik.uni-hamburg.de */
10 /* */
11 /* Permission is hereby granted, free of charge, to any person */
12 /* obtaining a copy of this software and associated documentation */
13 /* files (the "Software"), to deal in the Software without */
14 /* restriction, including without limitation the rights to use, */
15 /* copy, modify, merge, publish, distribute, sublicense, and/or */
16 /* sell copies of the Software, and to permit persons to whom the */
17 /* Software is furnished to do so, subject to the following */
18 /* conditions: */
19 /* */
20 /* The above copyright notice and this permission notice shall be */
21 /* included in all copies or substantial portions of the */
22 /* Software. */
23 /* */
24 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
25 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
26 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
27 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
28 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
29 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
30 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
32 /* */
33 /************************************************************************/
34
35 #include <iostream>
36 #include <sstream>
37 #include <map>
38 #include <set>
39
40 #include <vigra/unittest.hxx>
41 #include <vigra/multi_array.hxx>
42 #include <vigra/accumulator.hxx>
43
44 namespace std {
45
46 template <unsigned int N, class T, class Stride>
47 ostream & operator<<(ostream & o, vigra::MultiArrayView<N, T, Stride> const & m)
48 {
49 for(vigra::MultiArrayIndex k=0; k<m.size(); ++k)
50 o << m[k] << " ";
51 return o;
52 }
53
54 } // namespace std
55
56 using namespace vigra;
57
58 // mask cl.exe shortcomings
59 #if defined(_MSC_VER)
60 #pragma warning( disable : 4503 )
61 #endif
62
63 struct AccumulatorTest
64 {
65 void testConvexHullFeatures2D()
66 {
67 using namespace vigra::acc;
68 std::string prefix("testConvexHullFeatures2D(): ");
69
70 int size = 6;
71 MultiArray<2, int> mask(vigra::Shape2(size, size));
72
73 mask(1, 1) = 1;
74 mask(2, 1) = 1;
75 mask(2, 2) = 1;
76 mask(2, 3) = 1;
77 mask(1, 3) = 1;
78 mask(3, 1) = 1;
79 mask(3, 3) = 1;
80 mask(4, 1) = 1;
81 mask(4, 3) = 1;
82
83 AccumulatorChainArray<
84 CoupledArrays<2, int>,
85 Select<LabelArg<1>, ConvexHullFeatures> > chf;
86 chf.ignoreLabel(0);
87 extractFeatures(mask, chf);
88
89 getAccumulator<ConvexHullFeatures>(chf, 1).finalize();
90
91 {
92 TinyVector<double, 2> ref(2.5 - 1./18., 2.);
93 shouldEqualSequenceTolerance(
94 get<ConvexHullFeatures>(chf, 1).inputCenter().begin(),
95 get<ConvexHullFeatures>(chf, 1).inputCenter().end(),
96 ref.begin(),
97 (std::numeric_limits<double>::epsilon() * 2));
98 }
99 {
100 TinyVector<double, 2> ref(2.5, 2.);
101 shouldEqualSequenceTolerance(
102 get<ConvexHullFeatures>(chf, 1).hullCenter().begin(),
103 get<ConvexHullFeatures>(chf, 1).hullCenter().end(),
104 ref.begin(),
105 (std::numeric_limits<double>::epsilon() * 2));
106 }
107 shouldEqual(get<ConvexHullFeatures>(chf, 1).inputVolume(), 9);
108 shouldEqual(get<ConvexHullFeatures>(chf, 1).hullVolume(), 12);
109 {
110 TinyVector<double, 2> ref(8. / 3., 2.);
111 shouldEqualSequenceTolerance(
112 get<ConvexHullFeatures>(chf, 1).defectCenter().begin(),
113 get<ConvexHullFeatures>(chf, 1).defectCenter().end(),
114 ref.begin(),
115 (std::numeric_limits<double>::epsilon() * 2));
116 }
117 shouldEqual(get<ConvexHullFeatures>(chf, 1).defectCount(), 2);
118 shouldEqualTolerance(
119 get<ConvexHullFeatures>(chf, 1).defectVolumeMean(),
120 1.5,
121 (std::numeric_limits<double>::epsilon() * 2));
122 shouldEqualTolerance(
123 get<ConvexHullFeatures>(chf, 1).defectVolumeVariance(),
124 0.5,
125 (std::numeric_limits<double>::epsilon() * 2));
126 shouldEqualTolerance(
127 get<ConvexHullFeatures>(chf, 1).defectVolumeSkewness(),
128 0.0,
129 (std::numeric_limits<double>::epsilon() * 2));
130 shouldEqualTolerance(
131 get<ConvexHullFeatures>(chf, 1).defectVolumeKurtosis(),
132 0.0,
133 (std::numeric_limits<double>::epsilon() * 2));
134 shouldEqualTolerance(
135 get<ConvexHullFeatures>(chf, 1).defectDisplacementMean(),
136 ((2.5 - 1./18. - 1.) + 2*(3.5 - 2.5 + 1./18.))/3.,
137 (std::numeric_limits<double>::epsilon() * 2));
138 shouldEqualTolerance(
139 get<ConvexHullFeatures>(chf, 1).convexity(),
140 9. / 12.,
141 (std::numeric_limits<double>::epsilon() * 2));
142 }
143
144 void testConvexHullFeatures3D()
145 {
146 using namespace vigra::acc;
147 std::string prefix("testConvexHullFeatures3D(): ");
148
149 int size = 5;
150 MultiArray<3, int> mask(vigra::Shape3(size, size, size), 0);
151 for (int i = 0; i < 9; i++)
152 {
153 mask(i / 3 + 1, i % 3 + 1, 1) = 1;
154 mask(i / 3 + 1, i % 3 + 1, 3) = 1;
155 }
156 mask(3, 1, 2) = 1; mask(3, 2, 2) = 1;
157 mask(1, 3, 2) = 1; mask(2, 3, 2) = 1;
158
159 // z = 0 | z = 1 | z = 2 | z = 3
160 // --------+---------+---------+--------
161 // 0 0 0 0 | 0 0 0 0 | 0 0 0 0 | 0 0 0 0
162 // 0 0 0 0 | 0 x x x | 0 0 0 x | 0 x x x
163 // 0 0 0 0 | 0 x x x | 0 0 0 x | 0 x x x
164 // 0 0 0 0 | 0 x x x | 0 x x 0 | 0 x x x
165
166 AccumulatorChainArray<
167 CoupledArrays<3, int>,
168 Select<LabelArg<1>, ConvexHullFeatures> > chf;
169 chf.ignoreLabel(0);
170 extractFeatures(mask, chf);
171
172 getAccumulator<ConvexHullFeatures>(chf, 1).finalize();
173 {
174 // x and y coordinate: (7*1 + 7*2 + 8*3) / 22 = 45 / 22
175 TinyVector<double, 3> ref(45. / 22., 45. / 22., 2.);
176 shouldEqualSequenceTolerance(
177 get<ConvexHullFeatures>(chf, 1).inputCenter().begin(),
178 get<ConvexHullFeatures>(chf, 1).inputCenter().end(),
179 ref.begin(),
180 (std::numeric_limits<double>::epsilon() * 2));
181 }
182 {
183 TinyVector<double, 3> ref(2., 2., 2.);
184 shouldEqualSequenceTolerance(
185 get<ConvexHullFeatures>(chf, 1).hullCenter().begin(),
186 get<ConvexHullFeatures>(chf, 1).hullCenter().end(),
187 ref.begin(),
188 (std::numeric_limits<double>::epsilon() * 2));
189 }
190 shouldEqual(get<ConvexHullFeatures>(chf, 1).inputVolume(), 22);
191 shouldEqual(get<ConvexHullFeatures>(chf, 1).hullVolume(), 27);
192 {
193 // x and y coordinate: (2*1 + 2*2 + 1*3) / 5 = 9 / 5
194 TinyVector<double, 3> ref(9. / 5., 9. / 5., 2.);
195 shouldEqualSequenceTolerance(
196 get<ConvexHullFeatures>(chf, 1).defectCenter().begin(),
197 get<ConvexHullFeatures>(chf, 1).defectCenter().end(),
198 ref.begin(),
199 (std::numeric_limits<double>::epsilon() * 2));
200 }
201 shouldEqual(get<ConvexHullFeatures>(chf, 1).defectCount(), 2);
202 shouldEqualTolerance(
203 get<ConvexHullFeatures>(chf, 1).defectVolumeMean(),
204 2.5,
205 (std::numeric_limits<double>::epsilon() * 2));
206 shouldEqualTolerance(
207 get<ConvexHullFeatures>(chf, 1).defectVolumeVariance(),
208 9. / 2.,
209 (std::numeric_limits<double>::epsilon() * 2));
210 shouldEqualTolerance(
211 get<ConvexHullFeatures>(chf, 1).defectVolumeSkewness(),
212 0.0,
213 (std::numeric_limits<double>::epsilon() * 2));
214 shouldEqualTolerance(
215 get<ConvexHullFeatures>(chf, 1).defectVolumeKurtosis(),
216 0.0,
217 (std::numeric_limits<double>::epsilon() * 2));
218 // (sqrt(2) * 4 * (45 / 22 - 3 / 2) + sqrt(2) * 1 * (3 - 45 / 22)) / 5
219 // = sqrt(2) / 5 * (90 / 11 - 3 - 45 / 22)
220 shouldEqualTolerance(
221 get<ConvexHullFeatures>(chf, 1).defectDisplacementMean(),
222 sqrt(2.) / 5. * (90. / 11. - 3. - 45. / 22.),
223 (std::numeric_limits<double>::epsilon() * 2));
224 shouldEqualTolerance(
225 get<ConvexHullFeatures>(chf, 1).convexity(),
226 22. / 27.,
227 (std::numeric_limits<double>::epsilon() * 2));
228 }
229 };
230
231 struct FeaturesTestSuite : public vigra::test_suite
232 {
233 FeaturesTestSuite()
234 : vigra::test_suite("FeaturesTestSuite")
235 {
236 add(testCase(&AccumulatorTest::testConvexHullFeatures2D));
237 add(testCase(&AccumulatorTest::testConvexHullFeatures3D));
238 }
239 };
240
241 int main(int argc, char** argv)
242 {
243 FeaturesTestSuite test;
244 const int failed = test.run(vigra::testsToBeExecuted(argc, argv));
245 std::cout << test.report() << std::endl;
246
247 return failed != 0;
248 }
0 VIGRA_ADD_TEST(test_permutation test.cxx LIBRARIES)
1
0 /************************************************************************/
1 /* */
2 /* Copyright 1998-2014 by */
3 /* Ullrich Koethe, */
4 /* Esteban Pardo */
5 /* */
6 /* This file is part of the VIGRA computer vision library. */
7 /* The VIGRA Website is */
8 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
9 /* Please direct questions, bug reports, and contributions to */
10 /* ullrich.koethe@iwr.uni-heidelberg.de or */
11 /* vigra@informatik.uni-hamburg.de */
12 /* */
13 /* Permission is hereby granted, free of charge, to any person */
14 /* obtaining a copy of this software and associated documentation */
15 /* files (the "Software"), to deal in the Software without */
16 /* restriction, including without limitation the rights to use, */
17 /* copy, modify, merge, publish, distribute, sublicense, and/or */
18 /* sell copies of the Software, and to permit persons to whom the */
19 /* Software is furnished to do so, subject to the following */
20 /* conditions: */
21 /* */
22 /* The above copyright notice and this permission notice shall be */
23 /* included in all copies or substantial portions of the */
24 /* Software. */
25 /* */
26 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
27 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
28 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
29 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
30 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
31 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
32 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
33 /* OTHER DEALINGS IN THE SOFTWARE. */
34 /* */
35 /************************************************************************/
36
37 #define VIGRA_CHECK_BOUNDS
38
39 #include <limits>
40 #include <algorithm>
41 #include <vigra/unittest.hxx>
42 #include <vigra/permutation.hxx>
43
44
45 namespace vigra
46 {
47
48 struct PermutationTest
49 {
50 void testN1()
51 {
52 PlainChangesPermutations<1> permutations;
53 shouldEqual(permutations.size(), 1);
54 shouldEqual(permutations[0][0], 0);
55 shouldEqual(permutations[0].sign(), 1);
56 }
57
58 void testN2()
59 {
60 PlainChangesPermutations<2> permutations;
61 shouldEqual(permutations.size(), 2);
62 shouldEqual(permutations[0][0], 0);
63 shouldEqual(permutations[0][1], 1);
64 shouldEqual(permutations[0].sign(), 1);
65 shouldEqual(permutations[1][0], 1);
66 shouldEqual(permutations[1][1], 0);
67 shouldEqual(permutations[1].sign(), -1);
68 }
69
70 void testN3()
71 {
72 PlainChangesPermutations<3> permutations;
73 shouldEqual(permutations.size(), 6);
74 shouldEqual(permutations[0][0], 0);
75 shouldEqual(permutations[0][1], 1);
76 shouldEqual(permutations[0][2], 2);
77 shouldEqual(permutations[0].sign(), 1);
78 shouldEqual(permutations[1][0], 0);
79 shouldEqual(permutations[1][1], 2);
80 shouldEqual(permutations[1][2], 1);
81 shouldEqual(permutations[1].sign(), -1);
82 shouldEqual(permutations[2][0], 2);
83 shouldEqual(permutations[2][1], 0);
84 shouldEqual(permutations[2][2], 1);
85 shouldEqual(permutations[2].sign(), 1);
86 shouldEqual(permutations[3][0], 2);
87 shouldEqual(permutations[3][1], 1);
88 shouldEqual(permutations[3][2], 0);
89 shouldEqual(permutations[3].sign(), -1);
90 shouldEqual(permutations[4][0], 1);
91 shouldEqual(permutations[4][1], 2);
92 shouldEqual(permutations[4][2], 0);
93 shouldEqual(permutations[4].sign(), 1);
94 shouldEqual(permutations[5][0], 1);
95 shouldEqual(permutations[5][1], 0);
96 shouldEqual(permutations[5][2], 2);
97 shouldEqual(permutations[5].sign(), -1);
98 }
99 };
100
101 struct PermutationTestSuite : public vigra::test_suite
102 {
103 PermutationTestSuite() : vigra::test_suite("PermutationTestSuite")
104 {
105 add(testCase(&PermutationTest::testN1));
106 add(testCase(&PermutationTest::testN2));
107 add(testCase(&PermutationTest::testN3));
108 }
109 };
110
111 } // namespace vigra
112
113 int main(int argc, char** argv)
114 {
115 vigra::PermutationTestSuite test;
116 const int failed = test.run(vigra::testsToBeExecuted(argc, argv));
117 std::cerr << test.report() << std::endl;
118
119 return failed != 0;
120 }
121
0 IF(WITH_LEMON)
1 VIGRA_ADD_TEST(test_polytope test.cxx LIBRARIES ${LEMON_LIBRARY})
2 INCLUDE_DIRECTORIES(${LEMON_INCLUDE_DIR})
3 SET_TARGET_PROPERTIES(test_polytope PROPERTIES COMPILE_FLAGS "-DWITH_LEMON")
4 ENDIF(WITH_LEMON)
5
0 /************************************************************************/
1 /* */
2 /* Copyright 2011-2012 by Ullrich Koethe */
3 /* */
4 /* This file is part of the VIGRA computer vision library. */
5 /* The VIGRA Website is */
6 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
7 /* Please direct questions, bug reports, and contributions to */
8 /* ullrich.koethe@iwr.uni-heidelberg.de or */
9 /* vigra@informatik.uni-hamburg.de */
10 /* */
11 /* Permission is hereby granted, free of charge, to any person */
12 /* obtaining a copy of this software and associated documentation */
13 /* files (the "Software"), to deal in the Software without */
14 /* restriction, including without limitation the rights to use, */
15 /* copy, modify, merge, publish, distribute, sublicense, and/or */
16 /* sell copies of the Software, and to permit persons to whom the */
17 /* Software is furnished to do so, subject to the following */
18 /* conditions: */
19 /* */
20 /* The above copyright notice and this permission notice shall be */
21 /* included in all copies or substantial portions of the */
22 /* Software. */
23 /* */
24 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
25 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
26 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
27 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
28 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
29 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
30 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
32 /* */
33 /************************************************************************/
34
35 #include <iostream>
36 #include <chrono>
37
38 #include <vigra/unittest.hxx>
39 #include <vigra/multi_array.hxx>
40 #include <vigra/polytope.hxx>
41 #include <vigra/accumulator.hxx>
42
43 namespace chrono = std::chrono;
44
45 namespace vigra
46 {
47
48 using namespace acc;
49
50 struct ConvexHullBenchmark
51 {
52
53 typedef chrono::steady_clock clock_type;
54
55 template <unsigned int N>
56 void testTypical()
57 {
58 std::cout << "# Benchmark for typical case with dim = " << N << ". ";
59 std::cout << "All time measures in ms." << std::endl;
60 std::cout << "# size, min, mean, max, std" << std::endl;
61 for (int size = 1; size < 10000; size *= 2)
62 {
63 ArrayVector<double> times = constructPolytopeTypical<N>(size, 32);
64 AccumulatorChain<
65 MultiArrayIndex,
66 Select<Minimum, Maximum, Mean, StdDev> > acc_chain;
67 extractFeatures(times.begin(), times.end(), acc_chain);
68 std::cout
69 << size << ", "
70 << get<Minimum>(acc_chain) << ", "
71 << get<Mean >(acc_chain) << ", "
72 << get<Maximum>(acc_chain) << ", "
73 << get<StdDev >(acc_chain) << std::endl;
74 }
75 }
76
77 template <unsigned int N>
78 void testWorst()
79 {
80 std::cout << "# Benchmark for worst case with dim = " << N << ". ";
81 std::cout << "All time measures in ms." << std::endl;
82 std::cout << "# size, min, mean, max, std" << std::endl;
83 for (int size = 1; size < 10000; size *= 2)
84 {
85 ArrayVector<double> times = constructPolytopeWorst<N>(size, 32);
86 AccumulatorChain<
87 MultiArrayIndex,
88 Select<Minimum, Maximum, Mean, StdDev> > acc_chain;
89 extractFeatures(times.begin(), times.end(), acc_chain);
90 std::cout
91 << size << ", "
92 << get<Minimum>(acc_chain) << ", "
93 << get<Mean >(acc_chain) << ", "
94 << get<Maximum>(acc_chain) << ", "
95 << get<StdDev >(acc_chain) << std::endl;
96 }
97 }
98
99 template <unsigned int N>
100 ArrayVector<double> constructPolytopeTypical(int size, int iterations) const
101 {
102 ArrayVector<double> ret;
103 for (int iteration = 0; iteration < iterations; iteration++)
104 {
105 ret.push_back(constructPolytopeTypical<N>(size));
106 }
107 return ret;
108 }
109
110 template <unsigned int N>
111 double constructPolytopeTypical(int size) const
112 {
113 clock_type::time_point start = clock_type::now();
114 // Construct the base polytope
115 ConvexPolytope<N, double> poly;
116 TinyVector<double, N> vec;
117 poly.addVertex(vec);
118 for (int n = 0; n < N; n++)
119 {
120 vec[n] = 1.;
121 if (n > 0)
122 {
123 vec[n-1] = 0.;
124 }
125 poly.addVertex(vec);
126 }
127 poly.close();
128
129 // Add the vertices
130 for (int n = 0; n < size; n++)
131 {
132 do
133 {
134 for (int dim = 0; dim < N; dim++)
135 {
136 vec[dim] = (2*rand() - 1)/static_cast<double>(RAND_MAX);
137 }
138 }
139 while (vec.magnitude() > 1.);
140 poly.addExtremeVertex(vec);
141 }
142 clock_type::time_point stop = clock_type::now();
143 return chrono::duration_cast<chrono::microseconds>(stop - start).count();
144 }
145
146 template <unsigned int N>
147 ArrayVector<double> constructPolytopeWorst(int size, int iterations) const
148 {
149 ArrayVector<double> ret;
150 for (int iteration = 0; iteration < iterations; iteration++)
151 {
152 ret.push_back(constructPolytopeWorst<N>(size));
153 }
154 return ret;
155 }
156
157 template <unsigned int N>
158 double constructPolytopeWorst(int size) const
159 {
160 clock_type::time_point start = clock_type::now();
161 // Construct the base polytope
162 ConvexPolytope<N, double> poly;
163 TinyVector<double, N> vec;
164 poly.addVertex(vec);
165 for (int n = 0; n < N; n++)
166 {
167 vec[n] = 1.;
168 if (n > 0)
169 {
170 vec[n-1] = 0.;
171 }
172 poly.addVertex(vec);
173 }
174 poly.close();
175
176 // Add the vertices
177 for (int n = 0; n < size; n++)
178 {
179 for (int dim = 0; dim < N; dim++)
180 {
181 vec[dim] = (2*rand() - 1)/static_cast<double>(RAND_MAX);
182 }
183 vec /= norm(vec);
184 poly.addExtremeVertex(vec);
185 }
186 clock_type::time_point stop = clock_type::now();
187 return chrono::duration_cast<chrono::microseconds>(stop - start).count();
188 }
189 };
190
191 struct ConvexHullBenchmarkSuite : public test_suite
192 {
193 ConvexHullBenchmarkSuite()
194 : test_suite("ConvexHullBenchmarkSuite")
195 {
196 // add(testCase(&ConvexHullBenchmark::testTypical<2>));
197 // add(testCase(&ConvexHullBenchmark::testTypical<3>));
198 // add(testCase(&ConvexHullBenchmark::testTypical<4>));
199 add(testCase(&ConvexHullBenchmark::testWorst<2>));
200 add(testCase(&ConvexHullBenchmark::testWorst<3>));
201 add(testCase(&ConvexHullBenchmark::testWorst<4>));
202 }
203 };
204
205 } // namespace vigra
206
207 int main(int argc, char** argv)
208 {
209 vigra::ConvexHullBenchmarkSuite benchmark;
210 const int failed = benchmark.run(vigra::testsToBeExecuted(argc, argv));
211 std::cout << benchmark.report() << std::endl;
212
213 return failed != 0;
214 }
0 #define VIGRA_CHECK_BOUNDS
1
2 #include <limits>
3 #include <algorithm>
4 #include <cmath>
5 #include <vigra/unittest.hxx>
6 #include <vigra/multi_array.hxx>
7 #include <vigra/polytope.hxx>
8
9
10 namespace vigra
11 {
12
13 template <class Iterable, class T>
14 unsigned int count(const Iterable & vec, const T & value)
15 {
16 return std::count(
17 vec.begin(),
18 vec.end(),
19 value);
20 }
21
22 struct FloatStarPolytopeTest
23 {
24 typedef TinyVector<double, 2> Vector2;
25 typedef TinyVector<double, 3> Vector3;
26 typedef StarPolytope<2, double> Polytope2;
27 typedef StarPolytope<3, double> Polytope3;
28
29 FloatStarPolytopeTest()
30 : eps_(std::numeric_limits<double>::epsilon() * 3)
31 {}
32
33 void testContains2D()
34 {
35 Polytope2 poly(
36 Vector2(0. , 0. ),
37 Vector2(1. , 0. ),
38 Vector2(0. , 1. ),
39 Vector2(0.25, 0.25));
40 // Internal
41 shouldEqual(poly.contains(Vector2(0.25, 0.25)), true);
42 // External
43 shouldEqual(poly.contains(Vector2(1. , 1. )), false);
44 // Edge
45 shouldEqual(poly.contains(Vector2(0.5 , 0. )), true);
46 shouldEqual(poly.contains(Vector2(0. , 0.5 )), true);
47 shouldEqual(poly.contains(Vector2(0.5 , 0.5 )), true);
48 // Vertex
49 shouldEqual(poly.contains(Vector2(0. , 0. )), true);
50 shouldEqual(poly.contains(Vector2(1. , 0. )), true);
51 shouldEqual(poly.contains(Vector2(0. , 1. )), true);
52 }
53
54 void testContains3D()
55 {
56 Polytope3 poly(
57 Vector3(0. , 0. , 0. ),
58 Vector3(1. , 0. , 0. ),
59 Vector3(0. , 1. , 0. ),
60 Vector3(0. , 0. , 1. ),
61 Vector3(0.25, 0.25, 0.25));
62 // Internal
63 shouldEqual(poly.contains(Vector3(0.25, 0.25, 0.25)), true);
64 // External
65 shouldEqual(poly.contains(Vector3(1. , 1. , 1. )), false);
66 // Facet
67 shouldEqual(poly.contains(Vector3(0. , 0.2 , 0.2)), true);
68 shouldEqual(poly.contains(Vector3(0.2 , 0. , 0.2)), true);
69 shouldEqual(poly.contains(Vector3(0.2 , 0.2 , 0. )), true);
70 shouldEqual(poly.contains(Vector3(0.25, 0.25, 0.5)), true);
71 // Edge
72 shouldEqual(poly.contains(Vector3(0.5 , 0. , 0. )), true);
73 shouldEqual(poly.contains(Vector3(0. , 0.5 , 0. )), true);
74 shouldEqual(poly.contains(Vector3(0. , 0. , 0.5)), true);
75 shouldEqual(poly.contains(Vector3(0. , 0.5 , 0.5)), true);
76 shouldEqual(poly.contains(Vector3(0.5 , 0. , 0.5)), true);
77 shouldEqual(poly.contains(Vector3(0.5 , 0.5 , 0.0)), true);
78 // Vertex
79 shouldEqual(poly.contains(Vector3(0. , 0. , 0. )), true);
80 shouldEqual(poly.contains(Vector3(1. , 0. , 0. )), true);
81 shouldEqual(poly.contains(Vector3(0. , 1. , 0. )), true);
82 shouldEqual(poly.contains(Vector3(0. , 0. , 1. )), true);
83 }
84
85 void testNVolume2D()
86 {
87 {
88 Polytope2 poly(
89 Vector2(0. , 0. ),
90 Vector2(1. , 0. ),
91 Vector2(0. , 1. ),
92 Vector2(0.25, 0.25));
93 shouldEqual(abs(poly.nVolume() - .5) < eps_, true);
94 }
95 {
96 Polytope2 poly(
97 Vector2(0.5 , 0.5 ),
98 Vector2(0.5 , 1. ),
99 Vector2(1. , 0.5 ),
100 Vector2(0.6 , 0.6 ));
101 shouldEqual(abs(poly.nVolume() - .125) < eps_, true);
102 }
103 }
104
105 void testNVolume3D()
106 {
107 {
108 Polytope3 poly(
109 Vector3(0. , 0. , 0. ),
110 Vector3(1. , 0. , 0. ),
111 Vector3(0. , 1. , 0. ),
112 Vector3(0. , 0. , 1. ),
113 Vector3(0.25, 0.25, 0.25));
114 shouldEqual(abs(poly.nVolume() - 1./6.) < eps_, true);
115 }
116 {
117 Polytope3 poly(
118 Vector3(0.5 , 0.5 , 0.5 ),
119 Vector3(1. , 0.5 , 0.5 ),
120 Vector3(0.5 , 1. , 0.5 ),
121 Vector3(0.5 , 0.5 , 1. ),
122 Vector3(0.6 , 0.6 , 0.6 ));
123 shouldEqual(abs(poly.nVolume() - 1./(6.*8.)) < eps_, true);
124 }
125 }
126
127 void testNSurface2D()
128 {
129 Polytope2 poly(
130 Vector2(0. , 0. ),
131 Vector2(1. , 0. ),
132 Vector2(0. , 1. ),
133 Vector2(0.25, 0.25));
134 shouldEqual(abs(poly.nSurface() - (2. + sqrt(2.))) < eps_, true);
135 }
136
137 void testNSurface3D()
138 {
139 Polytope3 poly(
140 Vector3(0. , 0. , 0. ),
141 Vector3(1. , 0. , 0. ),
142 Vector3(0. , 1. , 0. ),
143 Vector3(0. , 0. , 1. ),
144 Vector3(0.25, 0.25, 0.25));
145 const double surf = (3. + sqrt(3.)) / 2.;
146 shouldEqual(abs(poly.nSurface() - surf) < eps_, true);
147 }
148
149 void testClosed2D()
150 {
151 Polytope2 poly_closed(
152 Vector2(0. , 0. ),
153 Vector2(1. , 0. ),
154 Vector2(0. , 1. ),
155 Vector2(0.25, 0.25));
156 shouldEqual(poly_closed.closed(), true);
157
158 Polytope2 poly_open(Vector2(0.25, 0.25));
159 Polytope2::node_type n1 = poly_open.addVertex(Vector2(0, 0));
160 Polytope2::node_type n2 = poly_open.addVertex(Vector2(1, 0));
161 Polytope2::node_type n3 = poly_open.addVertex(Vector2(0, 1));
162 Polytope2::node_type f1 = poly_open.addFacet(n1, n2);
163 Polytope2::node_type f2 = poly_open.addFacet(n1, n3);
164 shouldEqual(poly_open.closed(), false);
165 shouldEqual(poly_open.closed(f1), false);
166 shouldEqual(poly_open.closed(f2), false);
167 }
168
169 void testClosed3D()
170 {
171 Polytope3 poly(Vector3(0.1, 0.1, 0.1));
172 Polytope3::node_type n1 = poly.addVertex(Vector3(0, 0, 0));
173 Polytope3::node_type n2 = poly.addVertex(Vector3(1, 0, 0));
174 Polytope3::node_type n3 = poly.addVertex(Vector3(0, 1, 0));
175 Polytope3::node_type n4 = poly.addVertex(Vector3(0, 0, 1));
176 Polytope3::node_type f1 = poly.addFacet(n2, n3, n4);
177 shouldEqual(poly.closed(), false);
178 Polytope3::node_type f2 = poly.addFacet(n1, n3, n4);
179 shouldEqual(poly.closed(), false);
180 Polytope3::node_type f3 = poly.addFacet(n1, n2, n4);
181 shouldEqual(poly.closed(), false);
182 Polytope3::node_type f4 = poly.addFacet(n1, n2, n3);
183 shouldEqual(poly.closed(), true);
184 }
185
186 void testFill2D()
187 {
188 Polytope2 poly(
189 Vector2(0. , 0. ),
190 Vector2(1. , 0. ),
191 Vector2(0. , 1. ),
192 Vector2(0.25, 0.25));
193 MultiArray<2, unsigned int> label_image(vigra::Shape2(5, 5));
194 for (auto it = label_image.begin(); it != label_image.end(); it++)
195 {
196 *it = 0;
197 }
198 unsigned int ref[25] = {
199 0, 0, 0, 0, 0,
200 0, 0, 0, 0, 0,
201 0, 0, 1, 1, 1,
202 0, 0, 1, 1, 0,
203 0, 0, 1, 0, 0};
204 Vector2 offset(-1., -1.);
205 Vector2 scale(0.5, 0.5);
206 poly.fill(label_image, 1, offset, scale);
207 shouldEqualSequence(label_image.begin(), label_image.end(), ref);
208 }
209
210 void testLitFacets2D()
211 {
212 Polytope2 poly(Vector2(0.25, 0.25));
213 Polytope2::node_type n1 = poly.addVertex(Vector2(0, 0));
214 Polytope2::node_type n2 = poly.addVertex(Vector2(1, 0));
215 Polytope2::node_type n3 = poly.addVertex(Vector2(0, 1));
216 Polytope2::node_type f1 = poly.addFacet(n2, n3);
217 Polytope2::node_type f2 = poly.addFacet(n1, n3);
218 Polytope2::node_type f3 = poly.addFacet(n1, n2);
219 auto lit_v1 = poly.litFacets(Vector2(-1. , -1. ));
220 auto lit_v2 = poly.litFacets(Vector2( 2. , -0.5));
221 auto lit_v3 = poly.litFacets(Vector2(-0.5, 2. ));
222 auto lit_e1 = poly.litFacets(Vector2( 1. , 1. ));
223 auto lit_e2 = poly.litFacets(Vector2(-2. , 0.5));
224 auto lit_e3 = poly.litFacets(Vector2( 0.5, -2. ));
225 auto lit0 = poly.litFacets(Vector2( 0.2, 0.2));
226 shouldEqual(lit0.size(), 0);
227 shouldEqual(lit_v1.size(), 2);
228 shouldEqual(lit_v2.size(), 2);
229 shouldEqual(lit_v3.size(), 2);
230 shouldEqual(lit_e1.size(), 1);
231 shouldEqual(lit_e2.size(), 1);
232 shouldEqual(lit_e3.size(), 1);
233 shouldEqual(count(lit_v1, f2), 1);
234 shouldEqual(count(lit_v1, f3), 1);
235 shouldEqual(count(lit_v2, f1), 1);
236 shouldEqual(count(lit_v2, f3), 1);
237 shouldEqual(count(lit_v3, f1), 1);
238 shouldEqual(count(lit_v3, f2), 1);
239 shouldEqual(count(lit_e1, f1), 1);
240 shouldEqual(count(lit_e2, f2), 1);
241 shouldEqual(count(lit_e3, f3), 1);
242 }
243
244 void testFindNeighbor2D()
245 {
246 Polytope2 poly(Vector2(0.25, 0.25));
247 Polytope2::node_type n1 = poly.addVertex(Vector2(0, 0));
248 Polytope2::node_type n2 = poly.addVertex(Vector2(1, 0));
249 Polytope2::node_type n3 = poly.addVertex(Vector2(0, 1));
250 Polytope2::node_type f1 = poly.addFacet(n2, n3);
251 {
252 auto aligns1 = poly.aligns_map_[f1];
253 shouldEqual(count(aligns1, lemon::INVALID), 2);
254 }
255 Polytope2::node_type f2 = poly.addFacet(n1, n3);
256 {
257 auto aligns1 = poly.aligns_map_[f1];
258 auto aligns2 = poly.aligns_map_[f2];
259 shouldEqual(count(aligns1, f2), 1);
260 shouldEqual(count(aligns1, lemon::INVALID), 1);
261 shouldEqual(count(aligns2, f1), 1);
262 shouldEqual(count(aligns2, lemon::INVALID), 1);
263 }
264 Polytope2::node_type f3 = poly.addFacet(n1, n2);
265 {
266 auto aligns1 = poly.aligns_map_[f1];
267 auto aligns2 = poly.aligns_map_[f2];
268 auto aligns3 = poly.aligns_map_[f3];
269 shouldEqual(count(aligns1, f2), 1);
270 shouldEqual(count(aligns1, f3), 1);
271 shouldEqual(count(aligns2, f1), 1);
272 shouldEqual(count(aligns2, f3), 1);
273 shouldEqual(count(aligns3, f2), 1);
274 shouldEqual(count(aligns3, f1), 1);
275 }
276 }
277
278 void testFindNeighbor3D()
279 {
280 Polytope3 poly(Vector3(0.1, 0.1, 0.1));
281 Polytope3::node_type n1 = poly.addVertex(Vector3(0, 0, 0));
282 Polytope3::node_type n2 = poly.addVertex(Vector3(1, 0, 0));
283 Polytope3::node_type n3 = poly.addVertex(Vector3(0, 1, 0));
284 Polytope3::node_type n4 = poly.addVertex(Vector3(0, 0, 1));
285 Polytope3::node_type f1 = poly.addFacet(n2, n3, n4);
286 {
287 auto aligns1 = poly.aligns_map_[f1];
288 shouldEqual(count(aligns1, lemon::INVALID), 3);
289 }
290 Polytope3::node_type f2 = poly.addFacet(n1, n3, n4);
291 {
292 auto aligns1 = poly.aligns_map_[f1];
293 auto aligns2 = poly.aligns_map_[f2];
294 shouldEqual(count(aligns1, f2), 1);
295 shouldEqual(count(aligns1, lemon::INVALID), 2);
296 shouldEqual(count(aligns2, f1), 1);
297 shouldEqual(count(aligns2, lemon::INVALID), 2);
298 }
299 Polytope3::node_type f3 = poly.addFacet(n1, n2, n4);
300 {
301 auto aligns1 = poly.aligns_map_[f1];
302 auto aligns2 = poly.aligns_map_[f2];
303 auto aligns3 = poly.aligns_map_[f3];
304
305 shouldEqual(count(aligns1, f2), 1);
306 shouldEqual(count(aligns1, f3), 1);
307 shouldEqual(count(aligns1, lemon::INVALID), 1);
308 shouldEqual(count(aligns2, f1), 1);
309 shouldEqual(count(aligns2, f3), 1);
310 shouldEqual(count(aligns2, lemon::INVALID), 1);
311 shouldEqual(count(aligns3, f1), 1);
312 shouldEqual(count(aligns3, f2), 1);
313 shouldEqual(count(aligns3, lemon::INVALID), 1);
314 }
315 Polytope3::node_type f4 = poly.addFacet(n1, n2, n3);
316 {
317 auto aligns1 = poly.aligns_map_[f1];
318 auto aligns2 = poly.aligns_map_[f2];
319 auto aligns3 = poly.aligns_map_[f3];
320 auto aligns4 = poly.aligns_map_[f4];
321 shouldEqual(count(aligns1, f2), 1);
322 shouldEqual(count(aligns1, f3), 1);
323 shouldEqual(count(aligns1, f4), 1);
324 shouldEqual(count(aligns2, f1), 1);
325 shouldEqual(count(aligns2, f3), 1);
326 shouldEqual(count(aligns2, f4), 1);
327 shouldEqual(count(aligns3, f1), 1);
328 shouldEqual(count(aligns3, f2), 1);
329 shouldEqual(count(aligns3, f4), 1);
330 shouldEqual(count(aligns4, f1), 1);
331 shouldEqual(count(aligns4, f2), 1);
332 shouldEqual(count(aligns4, f3), 1);
333 }
334 }
335
336 double eps_;
337 };
338
339 struct IntStarPolytopeTest
340 {
341 typedef TinyVector<int, 2> Vector2;
342 typedef TinyVector<int, 3> Vector3;
343 typedef StarPolytope<2, int> Polytope2;
344 typedef StarPolytope<3, int> Polytope3;
345 typedef NumericTraits<int>::RealPromote RealPromote;
346
347 IntStarPolytopeTest()
348 : eps_(std::numeric_limits<RealPromote>::epsilon() * 2)
349 {}
350
351 void testContains2D()
352 {
353 Polytope2 poly(
354 Vector2(0, 0),
355 Vector2(4, 0),
356 Vector2(0, 2),
357 Vector2(1, 1));
358 // Internal
359 shouldEqual(poly.contains(Vector2( 1, 1)), true);
360 // External
361 shouldEqual(poly.contains(Vector2( 3, 1)), false);
362 shouldEqual(poly.contains(Vector2(-1, 1)), false);
363 shouldEqual(poly.contains(Vector2( 2, -1)), false);
364 // Edge
365 shouldEqual(poly.contains(Vector2( 2, 1)), true);
366 shouldEqual(poly.contains(Vector2( 0, 1)), true);
367 shouldEqual(poly.contains(Vector2( 2, 0)), true);
368 // Vertex
369 shouldEqual(poly.contains(Vector2( 0, 0)), true);
370 shouldEqual(poly.contains(Vector2( 4, 0)), true);
371 shouldEqual(poly.contains(Vector2( 0, 2)), true);
372 }
373
374 void testContains3D()
375 {
376 Polytope3 poly(
377 Vector3( 0, 0, 0),
378 Vector3( 6, 0, 0),
379 Vector3( 0, 6, 0),
380 Vector3( 0, 0, 6),
381 Vector3( 1, 1, 1));
382 // Internal
383 shouldEqual(poly.contains(Vector3( 1, 1, 1)), true);
384 // External
385 shouldEqual(poly.contains(Vector3( 6, 6, 6)), false);
386 // Facet
387 shouldEqual(poly.contains(Vector3( 2, 2, 2)), true);
388 shouldEqual(poly.contains(Vector3( 0, 1, 1)), true);
389 shouldEqual(poly.contains(Vector3( 1, 0, 1)), true);
390 shouldEqual(poly.contains(Vector3( 1, 1, 0)), true);
391 // Edge
392 shouldEqual(poly.contains(Vector3( 1, 0, 0)), true);
393 shouldEqual(poly.contains(Vector3( 0, 1, 0)), true);
394 shouldEqual(poly.contains(Vector3( 0, 0, 1)), true);
395 shouldEqual(poly.contains(Vector3( 0, 3, 3)), true);
396 shouldEqual(poly.contains(Vector3( 3, 0, 3)), true);
397 shouldEqual(poly.contains(Vector3( 3, 3, 0)), true);
398 // Vertex
399 shouldEqual(poly.contains(Vector3( 0, 0, 0)), true);
400 shouldEqual(poly.contains(Vector3( 6, 0, 0)), true);
401 shouldEqual(poly.contains(Vector3( 0, 6, 0)), true);
402 shouldEqual(poly.contains(Vector3( 0, 0, 6)), true);
403 }
404
405 void testNVolume2D()
406 {
407 {
408 Polytope2 poly(
409 Vector2( 0, 0),
410 Vector2( 3, 0),
411 Vector2( 0, 3),
412 Vector2( 1, 1));
413 RealPromote n_volume = poly.nVolume();
414 shouldEqualTolerance(n_volume, 4.5, eps_);
415 }
416 {
417 Polytope2 poly(
418 Vector2( 1, 1),
419 Vector2( 4, 1),
420 Vector2( 1, 4),
421 Vector2( 2, 2));
422 shouldEqualTolerance(poly.nVolume(), 4.5, eps_);
423 }
424 }
425
426 void testNVolume3D()
427 {
428 {
429 Polytope3 poly(
430 Vector3(0, 0, 0),
431 Vector3(6, 0, 0),
432 Vector3(0, 6, 0),
433 Vector3(0, 0, 6),
434 Vector3(1, 1, 1));
435 shouldEqualTolerance(poly.nVolume(), 36., eps_);
436 }
437 {
438 Polytope3 poly(
439 Vector3(1, 1, 1),
440 Vector3(7, 1, 1),
441 Vector3(1, 7, 1),
442 Vector3(1, 1, 7),
443 Vector3(2, 2, 2));
444 shouldEqualTolerance(poly.nVolume(), 36., eps_);
445 }
446 }
447
448 void testNSurface2D()
449 {
450 Polytope2 poly(
451 Vector2(0, 0),
452 Vector2(3, 0),
453 Vector2(0, 3),
454 Vector2(1, 1));
455 const RealPromote surf = 2 * 3 + 3 * sqrt(2);
456 shouldEqualTolerance(poly.nSurface(), surf, eps_);
457 }
458
459 void testNSurface3D()
460 {
461 Polytope3 poly(
462 Vector3(0, 0, 0),
463 Vector3(6, 0, 0),
464 Vector3(0, 6, 0),
465 Vector3(0, 0, 6),
466 Vector3(1, 1, 1));
467 const RealPromote surf = 6 * 6 * (3. + sqrt(3.)) / 2.;
468 shouldEqualTolerance(poly.nSurface(), surf, eps_);
469 }
470
471 void testClosed2D()
472 {
473 Polytope2 poly_closed(
474 Vector2(0, 0),
475 Vector2(3, 0),
476 Vector2(0, 3),
477 Vector2(1, 1));
478 shouldEqual(poly_closed.closed(), true);
479
480 Polytope2 poly_open(Vector2(1, 1));
481 Polytope2::node_type n1 = poly_open.addVertex(Vector2(0, 0));
482 Polytope2::node_type n2 = poly_open.addVertex(Vector2(3, 0));
483 Polytope2::node_type n3 = poly_open.addVertex(Vector2(0, 3));
484 Polytope2::node_type f1 = poly_open.addFacet(n1, n2);
485 Polytope2::node_type f2 = poly_open.addFacet(n1, n3);
486 shouldEqual(poly_open.closed(), false);
487 shouldEqual(poly_open.closed(f1), false);
488 shouldEqual(poly_open.closed(f2), false);
489 }
490
491 void testClosed3D()
492 {
493 Polytope3 poly(Vector3(1, 1, 1));
494 Polytope3::node_type n1 = poly.addVertex(Vector3(0, 0, 0));
495 Polytope3::node_type n2 = poly.addVertex(Vector3(6, 0, 0));
496 Polytope3::node_type n3 = poly.addVertex(Vector3(0, 6, 0));
497 Polytope3::node_type n4 = poly.addVertex(Vector3(0, 0, 6));
498 Polytope3::node_type f1 = poly.addFacet(n2, n3, n4);
499 shouldEqual(poly.closed(), false);
500 Polytope3::node_type f2 = poly.addFacet(n1, n3, n4);
501 shouldEqual(poly.closed(), false);
502 Polytope3::node_type f3 = poly.addFacet(n1, n2, n4);
503 shouldEqual(poly.closed(), false);
504 Polytope3::node_type f4 = poly.addFacet(n1, n2, n3);
505 shouldEqual(poly.closed(), true);
506 }
507
508 // void testLitFacets2D()
509 // {
510 // Polytope2 poly(Vector2(0.25, 0.25));
511 // Polytope2::node_type n1 = poly.addVertex(Vector2(0, 0));
512 // Polytope2::node_type n2 = poly.addVertex(Vector2(1, 0));
513 // Polytope2::node_type n3 = poly.addVertex(Vector2(0, 1));
514 // Polytope2::node_type f1 = poly.addFacet(n2, n3);
515 // Polytope2::node_type f2 = poly.addFacet(n1, n3);
516 // Polytope2::node_type f3 = poly.addFacet(n1, n2);
517 // auto lit_v1 = poly.litFacets(Vector2(-1. , -1. ));
518 // auto lit_v2 = poly.litFacets(Vector2( 2. , -0.5));
519 // auto lit_v3 = poly.litFacets(Vector2(-0.5, 2. ));
520 // auto lit_e1 = poly.litFacets(Vector2( 1. , 1. ));
521 // auto lit_e2 = poly.litFacets(Vector2(-2. , 0.5));
522 // auto lit_e3 = poly.litFacets(Vector2( 0.5, -2. ));
523 // auto lit0 = poly.litFacets(Vector2( 0.2, 0.2));
524 // shouldEqual(lit0.size(), 0);
525 // shouldEqual(lit_v1.size(), 2);
526 // shouldEqual(lit_v2.size(), 2);
527 // shouldEqual(lit_v3.size(), 2);
528 // shouldEqual(lit_e1.size(), 1);
529 // shouldEqual(lit_e2.size(), 1);
530 // shouldEqual(lit_e3.size(), 1);
531 // shouldEqual(std::count(lit_v1.begin(), lit_v1.end(), f2), 1);
532 // shouldEqual(std::count(lit_v1.begin(), lit_v1.end(), f3), 1);
533 // shouldEqual(std::count(lit_v2.begin(), lit_v2.end(), f1), 1);
534 // shouldEqual(std::count(lit_v2.begin(), lit_v2.end(), f3), 1);
535 // shouldEqual(std::count(lit_v3.begin(), lit_v3.end(), f1), 1);
536 // shouldEqual(std::count(lit_v3.begin(), lit_v3.end(), f2), 1);
537 // shouldEqual(std::count(lit_e1.begin(), lit_e1.end(), f1), 1);
538 // shouldEqual(std::count(lit_e2.begin(), lit_e2.end(), f2), 1);
539 // shouldEqual(std::count(lit_e3.begin(), lit_e3.end(), f3), 1);
540 // }
541
542 // void testFindNeighbor2D()
543 // {
544 // Polytope2 poly(Vector2(0.25, 0.25));
545 // Polytope2::node_type n1 = poly.addVertex(Vector2(0, 0));
546 // Polytope2::node_type n2 = poly.addVertex(Vector2(1, 0));
547 // Polytope2::node_type n3 = poly.addVertex(Vector2(0, 1));
548 // Polytope2::node_type f1 = poly.addFacet(n2, n3);
549 // {
550 // auto aligns1 = poly.aligns_map_[f1];
551 // shouldEqual(aligns1.size(), 0);
552 // }
553 // Polytope2::node_type f2 = poly.addFacet(n1, n3);
554 // {
555 // auto aligns1 = poly.aligns_map_[f1];
556 // auto aligns2 = poly.aligns_map_[f2];
557 // shouldEqual(aligns1.size(), 1);
558 // shouldEqual(aligns1.count(f2), 1);
559 // shouldEqual(aligns2.size(), 1);
560 // shouldEqual(aligns2.count(f1), 1);
561 // }
562 // Polytope2::node_type f3 = poly.addFacet(n1, n2);
563 // {
564 // auto aligns1 = poly.aligns_map_[f1];
565 // auto aligns2 = poly.aligns_map_[f2];
566 // auto aligns3 = poly.aligns_map_[f3];
567 // shouldEqual(aligns1.size(), 2);
568 // shouldEqual(aligns1.count(f2), 1);
569 // shouldEqual(aligns1.count(f3), 1);
570 // shouldEqual(aligns2.size(), 2);
571 // shouldEqual(aligns2.count(f1), 1);
572 // shouldEqual(aligns2.count(f3), 1);
573 // shouldEqual(aligns3.size(), 2);
574 // shouldEqual(aligns3.count(f2), 1);
575 // shouldEqual(aligns3.count(f1), 1);
576 // }
577 // }
578
579 // void testFindNeighbor3D()
580 // {
581 // Polytope3 poly(Vector3(0.1, 0.1, 0.1));
582 // Polytope3::node_type n1 = poly.addVertex(Vector3(0, 0, 0));
583 // Polytope3::node_type n2 = poly.addVertex(Vector3(1, 0, 0));
584 // Polytope3::node_type n3 = poly.addVertex(Vector3(0, 1, 0));
585 // Polytope3::node_type n4 = poly.addVertex(Vector3(0, 0, 1));
586 // Polytope3::node_type f1 = poly.addFacet(n2, n3, n4);
587 // {
588 // auto aligns1 = poly.aligns_map_[f1];
589 // shouldEqual(aligns1.size(), 0);
590 // }
591 // Polytope3::node_type f2 = poly.addFacet(n1, n3, n4);
592 // {
593 // auto aligns1 = poly.aligns_map_[f1];
594 // auto aligns2 = poly.aligns_map_[f2];
595 // shouldEqual(aligns1.size(), 1);
596 // shouldEqual(aligns1.count(f2), 1);
597 // shouldEqual(aligns2.size(), 1);
598 // shouldEqual(aligns2.count(f1), 1);
599 // }
600 // Polytope3::node_type f3 = poly.addFacet(n1, n2, n4);
601 // {
602 // auto aligns1 = poly.aligns_map_[f1];
603 // auto aligns2 = poly.aligns_map_[f2];
604 // auto aligns3 = poly.aligns_map_[f3];
605
606 // shouldEqual(aligns1.size(), 2);
607 // shouldEqual(aligns1.count(f2), 1);
608 // shouldEqual(aligns1.count(f3), 1);
609 // shouldEqual(aligns2.size(), 2);
610 // shouldEqual(aligns2.count(f1), 1);
611 // shouldEqual(aligns2.count(f3), 1);
612 // shouldEqual(aligns3.size(), 2);
613 // shouldEqual(aligns3.count(f1), 1);
614 // shouldEqual(aligns3.count(f2), 1);
615 // }
616 // Polytope3::node_type f4 = poly.addFacet(n1, n2, n3);
617 // {
618 // auto aligns1 = poly.aligns_map_[f1];
619 // auto aligns2 = poly.aligns_map_[f2];
620 // auto aligns3 = poly.aligns_map_[f3];
621 // auto aligns4 = poly.aligns_map_[f4];
622 // shouldEqual(aligns1.size(), 3);
623 // shouldEqual(aligns1.count(f2), 1);
624 // shouldEqual(aligns1.count(f3), 1);
625 // shouldEqual(aligns1.count(f4), 1);
626 // shouldEqual(aligns2.size(), 3);
627 // shouldEqual(aligns2.count(f1), 1);
628 // shouldEqual(aligns2.count(f3), 1);
629 // shouldEqual(aligns2.count(f4), 1);
630 // shouldEqual(aligns3.size(), 3);
631 // shouldEqual(aligns3.count(f1), 1);
632 // shouldEqual(aligns3.count(f2), 1);
633 // shouldEqual(aligns3.count(f4), 1);
634 // shouldEqual(aligns4.size(), 3);
635 // shouldEqual(aligns4.count(f1), 1);
636 // shouldEqual(aligns4.count(f2), 1);
637 // shouldEqual(aligns4.count(f3), 1);
638 // }
639 // }
640
641 RealPromote eps_;
642 };
643
644 struct FloatConvexPolytopeTest
645 {
646 typedef TinyVector<double, 2> Vector2;
647 typedef TinyVector<double, 3> Vector3;
648 typedef ConvexPolytope<2, double> Polytope2;
649 typedef ConvexPolytope<3, double> Polytope3;
650
651 FloatConvexPolytopeTest()
652 : eps_(std::numeric_limits<double>::epsilon() * 3)
653 {}
654
655 void testAddExtremeVertex2D()
656 {
657 const int N = 100;
658 Polytope2 poly(
659 Vector2( 1., 0.),
660 Vector2(-1., 0.),
661 Vector2( 0., 1.));
662 poly.addExtremeVertex(Vector2( 0., -1.));
663 shouldEqualTolerance(poly.nVolume(), 2., eps_);
664 shouldEqualTolerance(poly.nSurface(), 4. * std::sqrt(2.), eps_);
665 for (int n = 0; n < N; n++)
666 {
667 Vector2 vec(
668 std::cos(2*M_PI*n/N),
669 std::sin(2*M_PI*n/N));
670 shouldEqualTolerance(vec.magnitude(), 1., eps_);
671 poly.addExtremeVertex(vec);
672 shouldEqual(poly.closed(), true);
673 }
674 const double sur_tgt = 2.*N*std::sin(M_PI / N);
675 shouldEqualTolerance(poly.nSurface(), sur_tgt, eps_);
676 const double vol_tgt = 1.*N*std::sin(M_PI / N) * std::cos(M_PI / N);
677 shouldEqualTolerance(poly.nVolume(), vol_tgt, eps_);
678 for (int n = 0; n < 100; n++)
679 {
680 Vector2 vec(
681 (2*rand() - 1)/static_cast<double>(RAND_MAX),
682 (2*rand() - 1)/static_cast<double>(RAND_MAX));
683 if (vec.magnitude() > 1)
684 {
685 shouldEqual(poly.contains(vec), false);
686 }
687 else if (vec.magnitude() < std::cos(M_PI / N))
688 {
689 shouldEqual(poly.contains(vec), true);
690 }
691 }
692 }
693
694 void testAddExtremeVertex3D()
695 {
696 const int N = 100;
697 Polytope3 poly(
698 Vector3( 1., 0., 1.),
699 Vector3(-1., 0., 1.),
700 Vector3( 0., 1., 1.),
701 Vector3( 0., 0., 0.));
702 for (int n = 0; n < N; n++)
703 {
704 Vector3 vec(
705 std::cos(2*M_PI*n/N),
706 std::sin(2*M_PI*n/N),
707 1.);
708 poly.addExtremeVertex(vec);
709 shouldEqual(poly.closed(), true);
710 }
711 const double sur_tgt = N * std::sin(M_PI / N) * (
712 std::cos(M_PI / N) + std::sqrt(
713 std::cos(M_PI / N) * std::cos(M_PI / N) + 1.));
714 shouldEqualTolerance(poly.nSurface(), sur_tgt, eps_ * N);
715 const double vol_tgt = N * std::sin(M_PI / N) * std::cos(M_PI / N) / 3.;
716 shouldEqualTolerance(poly.nVolume(), vol_tgt, eps_ * N);
717 for (int n = 0; n < 100; n++)
718 {
719 Vector3 vec(
720 (2*rand() - 1)/static_cast<double>(RAND_MAX),
721 (2*rand() - 1)/static_cast<double>(RAND_MAX),
722 rand()/static_cast<double>(RAND_MAX));
723 double dist = std::sqrt(vec[0] * vec[0] + vec[1] * vec[1]);
724 if (dist > vec[2])
725 {
726 shouldEqual(poly.contains(vec), false);
727 }
728 else if (dist < (vec[2] * std::cos(M_PI / N)))
729 {
730 shouldEqual(poly.contains(vec), true);
731 }
732 }
733 }
734
735 void testAddNonExtremeVertex2D()
736 {
737 const int N = 1000;
738 const double eps = 4. / sqrt(N);
739 Polytope2 poly(
740 Vector2( 1., 0.),
741 Vector2(-1., 0.),
742 Vector2( 0., 1.));
743 poly.addExtremeVertex(Vector2(0., -1.));
744 shouldEqual(abs(poly.nVolume() - 2.) < eps_, true);
745 shouldEqual(abs(poly.nSurface() - 4. * sqrt(2.)) < eps, true);
746 for (int n = 0; n < N; n++)
747 {
748 Vector2 vec(
749 (2*rand() - 1)/static_cast<double>(RAND_MAX),
750 (2*rand() - 1)/static_cast<double>(RAND_MAX));
751 if (vec.magnitude() <= 1.)
752 {
753 poly.addExtremeVertex(vec);
754 shouldEqual(poly.closed(), true);
755 }
756 }
757 const double sur_err = (2*M_PI - poly.nSurface()) / (2.*M_PI);
758 shouldEqual(sur_err < eps, true);
759 shouldEqual(sur_err > 0, true);
760 const double vol_err = (M_PI - poly.nVolume()) / (M_PI);
761 shouldEqualTolerance(vol_err, 0, eps);
762 shouldEqual(vol_err > 0, true);
763 for (int n = 0; n < 100; n++)
764 {
765 Vector2 vec(
766 (2*rand() - 1)/static_cast<double>(RAND_MAX),
767 (2*rand() - 1)/static_cast<double>(RAND_MAX));
768 if (abs(vec.magnitude() - 1) > eps)
769 {
770 shouldEqual(poly.contains(vec), vec.magnitude() < 1.);
771 }
772 }
773 }
774
775 void testAddNonExtremeVertex3D()
776 {
777 const int N = 1000;
778 const double eps = 9. / sqrt(N);
779 Polytope3 poly(
780 Vector3( 1., 0., 0.),
781 Vector3(-1., 0., 0.),
782 Vector3( 0., 1., 0.),
783 Vector3( 0., 0., 1.));
784 for (int n = 0; n < N; n++)
785 {
786 Vector3 vec(
787 (2*rand() - 1)/static_cast<double>(RAND_MAX),
788 (2*rand() - 1)/static_cast<double>(RAND_MAX),
789 (2*rand() - 1)/static_cast<double>(RAND_MAX));
790 if (vec.magnitude() <= 1.)
791 {
792 poly.addExtremeVertex(vec);
793 shouldEqual(poly.closed(), true);
794 }
795 }
796 const double sur_err = (4.*M_PI - poly.nSurface()) / (4.*M_PI);
797 shouldEqualTolerance(sur_err, 0, eps);
798 shouldEqual(sur_err > 0, true);
799 const double vol_err = (4./3.*M_PI - poly.nVolume()) / (4./3.*M_PI);
800 shouldEqualTolerance(vol_err, 0, eps);
801 shouldEqual(vol_err > 0, true);
802 for (int n = 0; n < 100; n++)
803 {
804 Vector3 vec(
805 (2*rand() - 1)/static_cast<double>(RAND_MAX),
806 (2*rand() - 1)/static_cast<double>(RAND_MAX),
807 (2*rand() - 1)/static_cast<double>(RAND_MAX));
808 if (abs(vec.magnitude() - 1) > eps)
809 {
810 shouldEqual(poly.contains(vec), vec.magnitude() < 1.);
811 }
812 }
813 }
814
815 double eps_;
816 };
817
818 struct PolytopeTestSuite : public vigra::test_suite
819 {
820 PolytopeTestSuite() : vigra::test_suite("PolytopeTestSuite")
821 {
822 add(testCase(&FloatStarPolytopeTest::testClosed2D));
823 add(testCase(&FloatStarPolytopeTest::testClosed3D));
824 add(testCase(&FloatStarPolytopeTest::testContains2D));
825 add(testCase(&FloatStarPolytopeTest::testContains3D));
826 add(testCase(&FloatStarPolytopeTest::testFill2D));
827 add(testCase(&FloatStarPolytopeTest::testFindNeighbor2D));
828 add(testCase(&FloatStarPolytopeTest::testFindNeighbor3D));
829 add(testCase(&FloatStarPolytopeTest::testLitFacets2D));
830 add(testCase(&FloatStarPolytopeTest::testNSurface2D));
831 add(testCase(&FloatStarPolytopeTest::testNSurface3D));
832 add(testCase(&FloatStarPolytopeTest::testNVolume2D));
833 add(testCase(&FloatStarPolytopeTest::testNVolume3D));
834 add(testCase(&FloatConvexPolytopeTest::testAddExtremeVertex2D));
835 add(testCase(&FloatConvexPolytopeTest::testAddExtremeVertex3D));
836 /*
837 add(testCase(&FloatConvexPolytopeTest::testAddNonExtremeVertex2D));
838 add(testCase(&FloatConvexPolytopeTest::testAddNonExtremeVertex3D));
839 */
840 add(testCase(&IntStarPolytopeTest::testClosed2D));
841 add(testCase(&IntStarPolytopeTest::testClosed3D));
842 add(testCase(&IntStarPolytopeTest::testContains2D));
843 add(testCase(&IntStarPolytopeTest::testContains3D));
844 add(testCase(&IntStarPolytopeTest::testNSurface2D));
845 add(testCase(&IntStarPolytopeTest::testNSurface3D));
846 add(testCase(&IntStarPolytopeTest::testNVolume2D));
847 add(testCase(&IntStarPolytopeTest::testNVolume3D));
848 }
849 };
850
851 } // namespace vigra
852
853 int main(int argc, char** argv)
854 {
855 vigra::PolytopeTestSuite test;
856 const int failed = test.run(vigra::testsToBeExecuted(argc, argv));
857 std::cerr << test.report() << std::endl;
858
859 return failed != 0;
860 }
861
0 VIGRA_CONFIGURE_THREADING()
1
2 if(THREADING_FOUND)
3 if(HDF5_FOUND)
4 INCLUDE_DIRECTORIES(${HDF5_INCLUDE_DIR})
5 ADD_DEFINITIONS(${HDF5_CPPFLAGS} -DHasHDF5)
6 VIGRA_ADD_TEST(test_random_forest_new test.cxx LIBRARIES ${THREADING_LIBRARIES} vigraimpex ${HDF5_LIBRARIES})
7 else()
8 VIGRA_ADD_TEST(test_random_forest_new test.cxx)
9 endif()
10 else()
11 MESSAGE(STATUS "** WARNING: No threading implementation found.")
12 MESSAGE(STATUS "** test_random_forest_new will not be executed on this platform.")
13 endif()
14
15 add_subdirectory(data)
0 VIGRA_COPY_TEST_DATA(rf.h5)
0 /************************************************************************/
1 /* */
2 /* Copyright 2014-2015 by Ullrich Koethe and Philip Schill */
3 /* */
4 /* This file is part of the VIGRA computer vision library. */
5 /* The VIGRA Website is */
6 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
7 /* Please direct questions, bug reports, and contributions to */
8 /* ullrich.koethe@iwr.uni-heidelberg.de or */
9 /* vigra@informatik.uni-hamburg.de */
10 /* */
11 /* Permission is hereby granted, free of charge, to any person */
12 /* obtaining a copy of this software and associated documentation */
13 /* files (the "Software"), to deal in the Software without */
14 /* restriction, including without limitation the rights to use, */
15 /* copy, modify, merge, publish, distribute, sublicense, and/or */
16 /* sell copies of the Software, and to permit persons to whom the */
17 /* Software is furnished to do so, subject to the following */
18 /* conditions: */
19 /* */
20 /* The above copyright notice and this permission notice shall be */
21 /* included in all copies or substantial portions of the */
22 /* Software. */
23 /* */
24 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
25 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
26 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
27 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
28 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
29 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
30 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
32 /* */
33 /************************************************************************/
34 #include <vigra/unittest.hxx>
35 #include <vigra/random_forest_3.hxx>
36 #include <vigra/random.hxx>
37 #ifdef HasHDF5
38 #include <vigra/random_forest_3_hdf5_impex.hxx>
39 #endif
40
41 using namespace vigra;
42 using namespace vigra::rf3;
43
44 struct RandomForestTests
45 {
46 void test_base_class()
47 {
48 typedef BinaryForest Graph;
49 typedef Graph::Node Node;
50 typedef LessEqualSplitTest<double> SplitTest;
51 typedef ArgMaxAcc Acc;
52 typedef RandomForest<MultiArray<2, double>, MultiArray<1, int>, SplitTest, Acc> RF;
53
54 // Build a forest from scratch.
55 Graph gr;
56 RF::NodeMap<SplitTest>::type split_tests;
57 RF::NodeMap<size_t>::type leaf_responses;
58 {
59 Node n0 = gr.addNode();
60 Node n1 = gr.addNode();
61 Node n2 = gr.addNode();
62 Node n3 = gr.addNode();
63 Node n4 = gr.addNode();
64 Node n5 = gr.addNode();
65 Node n6 = gr.addNode();
66 gr.addArc(n0, n1);
67 gr.addArc(n0, n2);
68 gr.addArc(n1, n3);
69 gr.addArc(n1, n4);
70 gr.addArc(n2, n5);
71 gr.addArc(n2, n6);
72
73 split_tests.insert(n0, SplitTest(0, 0.6));
74 split_tests.insert(n1, SplitTest(1, 0.25));
75 split_tests.insert(n2, SplitTest(1, 0.75));
76 leaf_responses.insert(n3, 0);
77 leaf_responses.insert(n3, 0);
78 leaf_responses.insert(n4, 1);
79 leaf_responses.insert(n5, 2);
80 leaf_responses.insert(n6, 3);
81 }
82 std::vector<int> distinct_labels;
83 distinct_labels.push_back(0);
84 distinct_labels.push_back(1);
85 distinct_labels.push_back(-7);
86 distinct_labels.push_back(3);
87 auto const pspec = ProblemSpec<int>().num_features(2).distinct_classes(distinct_labels);
88 RF rf = RF(gr, split_tests, leaf_responses, pspec);
89
90 // Check if the given points are predicted correctly.
91 double test_x_values[] = {
92 0.2, 0.4, 0.2, 0.4, 0.7, 0.8, 0.7, 0.8,
93 0.2, 0.2, 0.7, 0.7, 0.2, 0.2, 0.8, 0.8
94 };
95 MultiArray<2, double> test_x(Shape2(8, 2), test_x_values);
96 int test_y_values[] = {
97 0, 0, 1, 1, -7, -7, 3, 3
98 };
99 MultiArray<1, int> test_y(Shape1(8), test_y_values);
100 MultiArray<1, int> pred_y(Shape1(8));
101 rf.predict(test_x, pred_y, 1);
102 shouldEqualSequence(pred_y.begin(), pred_y.end(), test_y.begin());
103 }
104
105 void test_default_rf()
106 {
107 typedef MultiArray<2, double> Features;
108 typedef MultiArray<1, int> Labels;
109
110 double train_x_values[] = {
111 0.2, 0.4, 0.2, 0.4, 0.7, 0.8, 0.7, 0.8,
112 0.2, 0.2, 0.7, 0.7, 0.2, 0.2, 0.8, 0.8
113 };
114 Features train_x(Shape2(8, 2), train_x_values);
115 int train_y_values[] = {
116 0, 0, 1, 1, -7, -7, 3, 3
117 };
118 Labels train_y(Shape1(8), train_y_values);
119 Features test_x(train_x);
120 Labels test_y(train_y);
121
122 std::vector<RandomForestOptionTags> splits;
123 splits.push_back(RF_GINI);
124 splits.push_back(RF_ENTROPY);
125 splits.push_back(RF_KSD);
126 for (auto split : splits)
127 {
128 RandomForestOptions const options = RandomForestOptions()
129 .tree_count(1)
130 .bootstrap_sampling(false)
131 .split(split)
132 .n_threads(1);
133 auto rf = random_forest(train_x, train_y, options);
134 Labels pred_y(test_y.shape());
135 rf.predict(test_x, pred_y, 1);
136 shouldEqualSequence(pred_y.begin(), pred_y.end(), test_y.begin());
137 }
138 }
139
140 void test_oob_visitor()
141 {
142 // Create a (noisy) grid with datapoints and assign classes as in a 4x4 chessboard.
143 size_t const nx = 100;
144 size_t const ny = 100;
145
146 RandomNumberGenerator<MersenneTwister> rand;
147 MultiArray<2, double> train_x(Shape2(nx*ny, 2));
148 MultiArray<1, int> train_y(Shape1(nx*ny));
149 for (size_t y = 0; y < ny; ++y)
150 {
151 for (size_t x = 0; x < nx; ++x)
152 {
153 train_x(y*nx+x, 0) = x + 2*rand.uniform()-1;
154 train_x(y*nx+x, 1) = y + 2*rand.uniform()-1;
155 if ((x/25+y/25) % 2 == 0)
156 train_y(y*nx+x) = 0;
157 else
158 train_y(y*nx+x) = 1;
159 }
160 }
161
162 RandomForestOptions const options = RandomForestOptions()
163 .tree_count(10)
164 .bootstrap_sampling(true)
165 .n_threads(1);
166 OOBError oob;
167 auto rf = random_forest(train_x, train_y, options, create_visitor(oob));
168 should(oob.oob_err_ > 0.02 && oob.oob_err_ < 0.04); // FIXME: Use a statistical approach here.
169 }
170
171 void test_var_importance_visitor()
172 {
173 // Create a (noisy) grid with datapoints and split the classes according to an oblique line.
174 size_t const nx = 20;
175 size_t const ny = 20;
176
177 RandomNumberGenerator<MersenneTwister> rand;
178 MultiArray<2, double> train_x(Shape2(nx*ny, 2));
179 MultiArray<1, int> train_y(Shape1(nx*ny));
180 for (size_t y = 0; y < ny; ++y)
181 {
182 for (size_t x = 0; x < nx; ++x)
183 {
184 train_x(y*nx+x, 0) = x + 2*rand.uniform()-1;
185 train_x(y*nx+x, 1) = y + 2*rand.uniform()-1;
186 if (x - nx/2.0 + 4*y - 4*ny/2.0 < 0)
187 train_y(y*nx+x) = 0;
188 else
189 train_y(y*nx+x) = 1;
190 }
191 }
192
193 RandomForestOptions const options = RandomForestOptions()
194 .tree_count(10)
195 .bootstrap_sampling(true)
196 .n_threads(1);
197 VariableImportance var_imp;
198 auto rf = random_forest(train_x, train_y, options, create_visitor(var_imp));
199
200 // The permutation importances of feature 1 should be about
201 // 10 times as big as the importances of feature 0.
202 for (size_t i = 0; i < 4; ++i)
203 {
204 should(var_imp.variable_importance_(1, i) > 5 * var_imp.variable_importance_(0, i));
205 }
206 }
207
208 #ifdef HasHDF5
209 void test_import()
210 {
211 typedef float FeatureType;
212 typedef UInt32 LabelType;
213 typedef MultiArray<2, FeatureType> Features;
214 typedef MultiArray<1, LabelType> Labels;
215
216 // Load the dummy random forest. This RF was trained on 500
217 // 2-dimensional points in [0, 1]^2. All points with x<0.5
218 // and y<0.5 have class 0, points with x<0.5 and y>=0.5 have
219 // class 1, points with x>0.5 and y>=0.5 have class 2, and
220 // points with x>=0.5 and y<0.5 have class 3.
221 HDF5File hfile("data/rf.h5", HDF5File::ReadOnly);
222 auto rf = random_forest_import_HDF5<Features, Labels>(hfile);
223
224 // Create some test data.
225 FeatureType test_x_data[] = {
226 0.2f, 0.4f, 0.6f, 0.8f, 0.2f, 0.4f, 0.6f, 0.8f, 0.2f, 0.4f, 0.6f, 0.8f, 0.2f, 0.4f, 0.6f, 0.8f,
227 0.2f, 0.2f, 0.2f, 0.2f, 0.4f, 0.4f, 0.4f, 0.4f, 0.6f, 0.6f, 0.6f, 0.6f, 0.8f, 0.8f, 0.8f, 0.8f
228 };
229 Features test_x(Shape2(16, 2), test_x_data);
230 LabelType test_y_data[] = {
231 0, 0, 3, 3, 0, 0, 3, 3, 1, 1, 2, 2, 1, 1, 2, 2
232 };
233 Labels test_y(Shape1(16), test_y_data);
234 Labels pred_y(Shape1(16));
235
236 // Use the RF to predict the data.
237 rf.predict(test_x, pred_y);
238 for (size_t i = 0; i < (size_t)test_y.size(); ++i)
239 should(test_y(i) == pred_y(i));
240 }
241
242 void test_export()
243 {
244 typedef float FeatureType;
245 typedef UInt32 LabelType;
246 typedef MultiArray<2, FeatureType> Features;
247 typedef MultiArray<1, LabelType> Labels;
248
249 // Load the dummy random forest.
250 HDF5File infile("data/rf.h5", HDF5File::ReadOnly);
251 auto rf = random_forest_import_HDF5<Features, Labels>(infile);
252
253 // Save the dummy random forest.
254 HDF5File outfile("data/rf_out.h5", HDF5File::New);
255 random_forest_export_HDF5(rf, outfile);
256 }
257 #endif
258 };
259
260 struct RandomForestTestSuite : public test_suite
261 {
262 RandomForestTestSuite()
263 :
264 test_suite("RandomForest test")
265 {
266 add(testCase(&RandomForestTests::test_base_class));
267 add(testCase(&RandomForestTests::test_default_rf));
268 add(testCase(&RandomForestTests::test_oob_visitor));
269 add(testCase(&RandomForestTests::test_var_importance_visitor));
270 #ifdef HasHDF5
271 add(testCase(&RandomForestTests::test_import));
272 add(testCase(&RandomForestTests::test_export));
273 #endif
274 }
275 };
276
277 int main(int argc, char** argv)
278 {
279 RandomForestTestSuite forest_test;
280 int failed = forest_test.run(testsToBeExecuted(argc, argv));
281 std::cout << forest_test.report() << std::endl;
282 return (failed != 0);
283 }
00 VIGRA_CONFIGURE_THREADING()
11
22 if(FFTW3_FOUND)
3 INCLUDE_DIRECTORIES(${FFTW3_INCLUDE_DIR})
3 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${FFTW3_INCLUDE_DIR})
44 ADD_DEFINITIONS(-DHasFFTW3)
55
66 VIGRA_ADD_TEST(test_registration test.cxx LIBRARIES ${FFTW3_LIBRARIES} ${FFTW3F_LIBRARIES} ${THREADING_LIBRARIES} vigraimpex)
651651 RadialBasisFunctor rbf;
652652 Matrix<double> weight_matrix = rbfMatrix2DFromCorrespondingPoints(s_points.begin(), s_points.end(), d_points.begin(),rbf);
653653
654 for(int j=0; j< d_points.size(); j++)
654 for(decltype(d_points.size()) j=0; j< d_points.size(); j++)
655655 {
656656 double x = d_points[j][0];
657657 double y = d_points[j][1];
660660 sy = weight_matrix(point_count,1)+weight_matrix(point_count+1,1)*x+ weight_matrix(point_count+2,1)*y;
661661
662662 //RBS part
663 for(int i=0; i<d_points.size(); i++)
663 for(decltype(d_points.size()) i=0; i<d_points.size(); i++)
664664 {
665665 double weight = rbf(d_points[i], d_points[j]);
666666 sx += weight_matrix(i,0)*weight;
00 if(HDF5_FOUND)
1 INCLUDE_DIRECTORIES(${HDF5_INCLUDE_DIR})
2
1 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${HDF5_INCLUDE_DIR})
2
33 ADD_DEFINITIONS(-DHasHDF5 ${HDF5_CPPFLAGS})
44 endif()
55
143143 SIFImportInfo infoSIF(sifFile);
144144
145145 // compare
146 should (infoSIF.shape()[0] == infoSIF.width());
147 should (infoSIF.shape()[1] == infoSIF.height());
148 should (infoSIF.shape()[2] == infoSIF.stacksize());
146 should (infoSIF.shape()[0] == (unsigned)infoSIF.width());
147 should (infoSIF.shape()[1] == (unsigned)infoSIF.height());
148 should (infoSIF.shape()[2] == (unsigned)infoSIF.stacksize());
149149 for (int i = 0; i < 3; ++i) {
150 should (infoSIF.shape()[i] == infoSIF.shapeOfDimension(i));
150 should (infoSIF.shape()[i] == (unsigned)infoSIF.shapeOfDimension(i));
151151 }
152152 }
153153
00 if(FFTW3_FOUND)
1 INCLUDE_DIRECTORIES(${FFTW3_INCLUDE_DIR})
1 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${FFTW3_INCLUDE_DIR})
22 ADD_DEFINITIONS(-DHasFFTW3)
3
3
44 VIGRA_ADD_TEST(test_simpleanalysis test.cxx LIBRARIES vigraimpex ${FFTW3_LIBRARIES})
55 else()
66 VIGRA_ADD_TEST(test_simpleanalysis test.cxx LIBRARIES vigraimpex)
5454 {
5555 typedef MultiArray<N, float> FArray;
5656 typedef MultiArray<N, RGBValue<float> > FRGBArray;
57 typedef MultiArray<N, int> IArray;
57 typedef MultiArray<N, unsigned int> IArray;
5858 typedef typename MultiArrayShape<N>::type Shape;
5959
6060 ImageImportInfo info;
6565 lennaImage(info.shape())
6666 {
6767 importImage(info, destImage(lennaImage));
68 transformMultiArray(srcMultiArrayRange(lennaImage), destMultiArray(lennaImage), RGBPrime2LabFunctor<float>());
68 transformMultiArray(srcMultiArrayRange(lennaImage), destMultiArray(lennaImage), RGBPrime2LabFunctor<float>());
6969 }
7070
7171 void test_seeding()
5050 for (size_t i = 0; i < v.size(); ++i)
5151 {
5252 pool.enqueue(
53 [&v, i](size_t thread_id)
53 [&v, i](size_t /*thread_id*/)
5454 {
5555 v[i] = 0;
5656 for (size_t k = 0; k < i+1; ++k)
8080 {
8181 futures.emplace_back(
8282 pool.enqueue(
83 [&v, &exception_string, i](size_t thread_id)
83 [&v, &exception_string, i](size_t /*thread_id*/)
8484 {
8585 v[i] = 1;
8686 if (i == 5000)
109109 std::iota(v_in.begin(), v_in.end(), 0);
110110 std::vector<int> v_out(n);
111111 parallel_foreach(4, v_in.begin(), v_in.end(),
112 [&v_out](size_t thread_id, int x)
112 [&v_out](size_t /*thread_id*/, int x)
113113 {
114114 v_out[x] = x*(x+1)/2;
115115 }
133133 try
134134 {
135135 parallel_foreach(4, v_in.begin(), v_in.end(),
136 [&v_out, &exception_string](size_t thread_id, int x)
136 [&v_out, &exception_string](size_t /*thread_id*/, int x)
137137 {
138138 if (x == 5000)
139139 throw std::runtime_error(exception_string);
220220 std::vector<size_t> results(n_threads, 0);
221221 TIC;
222222 parallel_foreach(n_threads, input.begin(), input.end(),
223 [&results](size_t thread_id, size_t x)
223 [&results](size_t thread_id, size_t /*x*/)
224224 {
225225 results[thread_id] += 1;
226226 }
22 # the data from 'example_data.h5' are now in 'test_data.hxx', so HDF5 is
33 # no longer needed for this test
44 # if(HDF5_FOUND)
5 # INCLUDE_DIRECTORIES(${HDF5_INCLUDE_DIR})
6
5 # INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${HDF5_INCLUDE_DIR})
6
77 # ADD_DEFINITIONS(${HDF5_CPPFLAGS})
88
99 # VIGRA_ADD_TEST(test_unsupervised test.cxx LIBRARIES vigraimpex ${HDF5_LIBRARIES})
10
10
1111 # VIGRA_COPY_TEST_DATA(example_data.h5)
1212 # else()
1313 # MESSAGE(STATUS "** WARNING: test_unsupervised will not be executed")
226226 shouldEqualSequence(vector_.begin(), vector_.end(), data);
227227 }
228228
229 void testBackInsertion_failedOnVC14()
229 void testBackInsertionUntilReallocation()
230230 {
231231 // regression test for bug appearing with VC14,
232232 // see https://github.com/ukoethe/vigra/issues/256
11551155 should(typeid(UnqualifiedType<const int*&>::type) == typeid(int));
11561156 }
11571157
1158 #if 0
11591158 struct FinallyTester
11601159 {
1161 mutable int & v_;
1160 int & v_;
11621161
11631162 FinallyTester(int & v)
11641163 : v_(v)
11661165
11671166 void sq() const
11681167 {
1169 v_ = v_*v_;
1168 const_cast<int &>(v_) = v_*v_;
11701169 }
11711170 };
1172 #endif
11731171
11741172 void testFinally()
11751173 {
1176 std::cout << "testFinally() is disabled because many compilers do not yet support it." << std::endl;
1177 #if 0
11781174 int v = 0;
11791175 {
11801176 FinallyTester finally_tester(v);
11961192 }
11971193 catch(std::runtime_error &) {}
11981194 shouldEqual(v, 2);
1199 #endif
12001195 }
12011196 };
12021197
14481443 shouldEqual(a.get<int>(), 10);
14491444 shouldEqual(b.get<int>(), 10);
14501445
1451 b.release();
1446 b.destroy();
14521447 shouldNot(bool(b));
14531448 should(b.empty());
14541449
15321527
15331528 add( testCase( &ArrayVectorTest::testAccessor));
15341529 add( testCase( &ArrayVectorTest::testBackInsertion));
1530 add( testCase( &ArrayVectorTest::testBackInsertionUntilReallocation));
15351531 add( testCase( &ArrayVectorTest::testAmbiguousConstructor));
15361532 add( testCase( &BucketQueueTest::testDescending));
15371533 add( testCase( &BucketQueueTest::testAscending));
44 SET(vigranumpy_tmp_dir "${CMAKE_BINARY_DIR}/vigranumpy/vigra")
55 FILE(MAKE_DIRECTORY ${vigranumpy_tmp_dir})
66
7 INCLUDE_DIRECTORIES(${VIGRANUMPY_INCLUDE_DIRS})
7 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${VIGRANUMPY_INCLUDE_DIRS})
88
99 IF(HDF5_FOUND)
1010 ADD_DEFINITIONS(-DHasHDF5 ${HDF5_CPPFLAGS})
11 INCLUDE_DIRECTORIES(${HDF5_INCLUDE_DIR})
11 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${HDF5_INCLUDE_DIR})
1212 SET(VIGRANUMPY_IMPEX_LIBRARIES vigraimpex ${HDF5_LIBRARIES})
1313 ELSE()
1414 SET(VIGRANUMPY_IMPEX_LIBRARIES vigraimpex)
610610 hessian = filters.hessianOfGaussian(image, scale,
611611 sigma_d=sigma_d, step_size=step_size,
612612 window_size=window_size, roi=roi)
613 return filters.tensorEigenvalues(hessian, out=out)
613 if out is None:
614 return filters.tensorEigenvalues(hessian)
615
616 try:
617 return filters.tensorEigenvalues(hessian, out=out)
618 except ValueError:
619 pass
620 # retry without 'out', since its strides might not match
621 out[...] = filters.tensorEigenvalues(hessian)
622 return out
614623
615624 hessianOfGaussianEigenvalues.__module__ = 'vigra.filters'
616625 filters.hessianOfGaussianEigenvalues = hessianOfGaussianEigenvalues
626635 st = filters.structureTensor(image, innerScale, outerScale,
627636 sigma_d=sigma_d, step_size=step_size,
628637 window_size=window_size, roi=roi)
629 return filters.tensorEigenvalues(st, out=out)
638 if out is None:
639 return filters.tensorEigenvalues(st)
640
641 try:
642 return filters.tensorEigenvalues(st, out=out)
643 except ValueError:
644 pass
645 # retry without 'out', since its strides might not match
646 out[...] = filters.tensorEigenvalues(st)
647 return out
630648
631649 structureTensorEigenvalues.__module__ = 'vigra.filters'
632650 filters.structureTensorEigenvalues = structureTensorEigenvalues
668686 analysis.supportedRegionFeatures = supportedRegionFeatures
669687
670688 def supportedConvexHullFeatures(labels):
671 '''Return a list of Convex Hull feature names that are available for the given 2D label array.
672 These Convex Hull feature names are the valid inputs to a call of
673 :func:`extractConvexHullFeatures`. E.g., to compute just the first two features in the
689 '''Return a list of Convex Hull feature names that are available for the given label array.
690 These Convex Hull feature names are the valid inputs to a call with
691 :func:`extract2DConvexHullFeatures` or `extract3DConvexHullFeatures`. E.g., to compute just the first two features in the
674692 list, use::
675693
676694 f = vigra.analysis.supportedConvexHullFeatures(labels)
677695 print("Computing Convex Hull features:", f[:2])
678 r = vigra.analysis.extractConvexHullFeatures(labels, features=f[:2])
696 r = vigra.analysis.extract2DConvexHullFeatures(labels, features=f[:2])
679697 '''
680698 try:
681 return analysis.extractConvexHullFeatures(labels, list_features_only=True)
699 return analysis.extract2DConvexHullFeatures(labels, list_features_only=True)
682700 except:
683 return []
701 try:
702 return analysis.extract3DConvexHullFeatures(labels, list_features_only=True)
703 except:
704 return []
684705
685706 supportedConvexHullFeatures.__module__ = 'vigra.analysis'
686707 analysis.supportedConvexHullFeatures = supportedConvexHullFeatures
12221243 else:
12231244 return graphs._ragEdgeFeatures(self,graph,affiliatedEdges,edgeFeatures,weights,acc,out)
12241245
1246 def accumulateEdgeStatistics(self, edgeFeatures, out=None):
1247 if not isinstance(self, RegionAdjacencyGraph):
1248 raise AttributeError("accumulateEdgeFeaturesNew not implemented for " + type(self))
1249 graph = self.baseGraph
1250 affiliatedEdges = self.affiliatedEdges
1251 out = graphs._ragEdgeStatistics(self, graph, affiliatedEdges, edgeFeatures, out)
1252 return out
12251253
12261254 def accumulateNodeFeatures(self,nodeFeatures,acc='mean',out=None):
12271255 """ accumulate edge features from base graphs edges features
168168 ``taggedView()`` depends on whether ``array`` already has axistags or not.
169169
170170 1. If ``array`` has no axistags or ``force=True`` (i.e. existing axistags
171 shall be ignored) and neither the ``axistags`` nor the ``order`` parameters
172 are given, the function acts as if ``order="C"`` was specified (case 2 below).
173
174 2. If ``array`` has no axistags or ``force=True`` (i.e. existing axistags
171175 shall be ignored) and the ``order`` parameter is given, the function
172176 constructs appropriate axistags via :meth:`~vigra.VigraArray.defaultAxistags`::
173177
174178 >>> view = array.view(VigraArray)
175179 >>> view.axistags = VigraArray.defaultAxistags(view.ndim, order, noChannels)
176180
177 2. If ``array`` has no axistags (or ``force=True``) and the ``axistags`` parameter
181 3. If ``array`` has no axistags (or ``force=True``) and the ``axistags`` parameter
178182 is given, the function transforms this specification into an object of type
179183 :class:`~vigra.AxisTags` and attaches the result to the view::
180184
181185 >>> view = array.view(VigraArray)
182186 >>> view.axistags = makeAxistags(axistags)
183187
184 3. If ``array`` has axistags (and ``force=False``) and the ``order`` parameter is
188 4. If ``array`` has axistags (and ``force=False``) and the ``order`` parameter is
185189 given, the function transposes the array into the desired order::
186190
187191 >>> view = array.transposeToOrder(order)
188192 >>> if noChannels:
189193 ... view = view.dropChannelAxis()
190194
191 4. If ``array`` has axistags (and ``force=False``) and the ``axistags`` parameter
195 5. If ``array`` has axistags (and ``force=False``) and the ``axistags`` parameter
192196 is given, the function calls :meth:`~vigra.VigraArray.withAxes` to transforms
193197 the present axistags into the desired ones::
194198
208212 array = array.withAxes(axistags)
209213 else:
210214 if not axistags:
215 if not order:
216 order = 'C'
211217 axistags = VigraArray.defaultAxistags(array.ndim, order, noChannels)
212218 else:
213219 axistags = makeAxistags(axistags)
775781 clip = False
776782 if m == M:
777783 return res
778 f = 255.0 // (M - m)
784 f = 255.0 / (M - m)
779785 img = f * (img - m)
780786 if clip:
781787 img = numpy.minimum(255.0, numpy.maximum(0.0, img))
3636 ${VIGRANUMPY_THREAD_LIBRARIES}
3737 VIGRANUMPY)
3838
39 VIGRA_ADD_NUMPY_MODULE(analysis SOURCES
40 segmentation.cxx
41 edgedetection.cxx
42 interestpoints.cxx
43 accumulator.cxx
44 accumulator-region-singleband.cxx
45 accumulator-region-multiband.cxx
46 LIBRARIES
47 ${VIGRANUMPY_THREAD_LIBRARIES}
48 VIGRANUMPY)
39 IF(WITH_LEMON)
40 VIGRA_ADD_NUMPY_MODULE(analysis SOURCES
41 segmentation.cxx
42 edgedetection.cxx
43 interestpoints.cxx
44 accumulator.cxx
45 accumulator-region-singleband.cxx
46 accumulator-region-multiband.cxx
47 LIBRARIES
48 ${VIGRANUMPY_THREAD_LIBRARIES}
49 ${LEMON_LIBRARY}
50 VIGRANUMPY)
51 INCLUDE_DIRECTORIES(${LEMON_INCLUDE_DIR})
52 SET_TARGET_PROPERTIES(vigranumpy_analysis PROPERTIES COMPILE_FLAGS "-DWITH_LEMON")
53 ELSE(WITH_LEMON)
54 VIGRA_ADD_NUMPY_MODULE(analysis SOURCES
55 segmentation.cxx
56 edgedetection.cxx
57 interestpoints.cxx
58 accumulator.cxx
59 accumulator-region-singleband.cxx
60 accumulator-region-multiband.cxx
61 LIBRARIES
62 ${VIGRANUMPY_THREAD_LIBRARIES}
63 VIGRANUMPY)
64 ENDIF(WITH_LEMON)
4965
5066 VIGRA_ADD_NUMPY_MODULE(learning SOURCES
5167 random_forest_old.cxx
5268 random_forest.cxx
69 random_forest_3.cxx
5370 learning.cxx
5471 LIBRARIES
55 ${VIGRANUMPY_IMPEX_LIBRARIES}
72 ${VIGRANUMPY_IMPEX_LIBRARIES} ${VIGRANUMPY_THREAD_LIBRARIES}
5673 VIGRANUMPY)
5774
5875 VIGRA_ADD_NUMPY_MODULE(colors SOURCES
4747 #define STR(s) #s
4848 #define XSTR(s) s
4949
50 #ifdef WITH_LEMON
51
5052 template <unsigned int N, class T>
5153 python::object
5254 extractConvexHullFeatures(NumpyArray<N, Singleband<T> > const & labels,
5557 {
5658 using namespace vigra::acc;
5759
58 #define VIGRA_CONVEX_HULL_FEATURE_INPUT_COUNT "Input Count"
59 #define VIGRA_CONVEX_HULL_FEATURE_INPUT_PERIMETER "Input Perimeter"
60 #define VIGRA_CONVEX_HULL_FEATURE_INPUT_AREA "Input Area"
61 #define VIGRA_CONVEX_HULL_FEATURE_AREA "Area"
62 #define VIGRA_CONVEX_HULL_FEATURE_PERIMETER "Perimeter"
63 #define VIGRA_CONVEX_HULL_FEATURE_RUGOSITY "Rugosity"
60 #define VIGRA_CONVEX_HULL_FEATURE_INPUT_VOLUME "InputVolume"
61 #define VIGRA_CONVEX_HULL_FEATURE_HULL_VOLUME "HullVolume"
6462 #define VIGRA_CONVEX_HULL_FEATURE_CONVEXITY "Convexity"
65 #define VIGRA_CONVEX_HULL_FEATURE_DEFECT_COUNT "Defect Count"
66 #define VIGRA_CONVEX_HULL_FEATURE_DEFECT_MEAN_DISPLACEMENT "Defect Mean Displacement"
67 #define VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_LIST "Defect Area List"
68 #define VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_MEAN "Defect Area Mean"
69 #define VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_VARIANCE "Defect Area Variance"
70 #define VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_SKEWNESS "Defect Area Skewness"
71 #define VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_KURTOSIS "Defect Area Kurtosis"
72 #define VIGRA_CONVEX_HULL_FEATURE_POLYGON "Polygon"
73
74 #define VIGRA_CONVEX_HULL_VECTOR_FEATURE_INPUT_CENTER "Input Center"
75 #define VIGRA_CONVEX_HULL_VECTOR_FEATURE_CENTER "Center"
76 #define VIGRA_CONVEX_HULL_VECTOR_FEATURE_DEFECT_CENTER "Defect Center"
63 #define VIGRA_CONVEX_HULL_FEATURE_DEFECT_VOLUME_MEAN "DefectVolumeMean"
64 #define VIGRA_CONVEX_HULL_FEATURE_DEFECT_VOLUME_VARIANCE "DefectVolumeVariance"
65 #define VIGRA_CONVEX_HULL_FEATURE_DEFECT_VOLUME_SKEWNESS "DefectVolumeSkewness"
66 #define VIGRA_CONVEX_HULL_FEATURE_DEFECT_VOLUME_KURTOSIS "DefectVolumeKurtosis"
67 #define VIGRA_CONVEX_HULL_FEATURE_DEFECT_COUNT "DefectCount"
68 #define VIGRA_CONVEX_HULL_FEATURE_DEFECT_DISPLACEMENT_MEAN "DefectDisplacementMean"
69
70 #define VIGRA_CONVEX_HULL_VECTOR_FEATURE_INPUT_CENTER "InputCenter"
71 #define VIGRA_CONVEX_HULL_VECTOR_FEATURE_HULL_CENTER "HullCenter"
72 #define VIGRA_CONVEX_HULL_VECTOR_FEATURE_DEFECT_CENTER "DefectCenter"
7773
7874 if(list_features_only)
7975 {
80
8176 python::list res;
82 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_INPUT_COUNT));
83 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_INPUT_PERIMETER));
84 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_INPUT_AREA));
85 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_AREA));
86 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_PERIMETER));
87 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_RUGOSITY));
77 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_INPUT_VOLUME));
78 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_HULL_VOLUME));
8879 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_CONVEXITY));
89 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_POLYGON));
90
80 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_DEFECT_VOLUME_MEAN));
81 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_DEFECT_VOLUME_VARIANCE));
82 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_DEFECT_VOLUME_SKEWNESS));
83 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_DEFECT_VOLUME_KURTOSIS));
9184 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_DEFECT_COUNT));
92 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_DEFECT_MEAN_DISPLACEMENT));
93 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_LIST));
94 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_MEAN));
95 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_VARIANCE));
96 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_SKEWNESS));
97 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_KURTOSIS));
85 res.append(XSTR(VIGRA_CONVEX_HULL_FEATURE_DEFECT_DISPLACEMENT_MEAN));
9886
9987 res.append(XSTR(VIGRA_CONVEX_HULL_VECTOR_FEATURE_INPUT_CENTER));
100 res.append(XSTR(VIGRA_CONVEX_HULL_VECTOR_FEATURE_CENTER));
101
88 res.append(XSTR(VIGRA_CONVEX_HULL_VECTOR_FEATURE_HULL_CENTER));
10289 res.append(XSTR(VIGRA_CONVEX_HULL_VECTOR_FEATURE_DEFECT_CENTER));
10390
10491 return res;
10693
10794 TinyVector<npy_intp, N> permutation = labels.template permuteLikewise<N>();
10895
109 AccumulatorChainArray<CoupledArrays<N, T>,
110 Select<ConvexHull, DataArg<1>, LabelArg<1> >
111 > acc;
96 AccumulatorChainArray<
97 CoupledArrays<N, T>,
98 Select<ConvexHullFeatures, DataArg<1>, LabelArg<1> > > acc;
11299
113100 MultiArrayIndex ignored_label = -1;
114101 if(ignore_label != python::object())
119106
120107 {
121108 PyAllowThreads _pythread;
122
123109 extractFeatures(labels, acc);
124110 }
125
111
126112 int size = acc.maxRegionLabel()+1;
113
114 // finalize the calculations
115 for (int k = 0; k < size; ++k)
116 {
117 if (k != ignored_label && get<Count>(acc, k) != 0)
118 {
119 getAccumulator<ConvexHullFeatures>(acc, k).finalize();
120 }
121 }
122
123 // initialize return dict
127124 python::dict res;
128 {
129 NumpyArray<1, npy_uint32> array((Shape1(size)));
130 for(int k=0; k<size; ++k)
131 {
132 if(k == ignored_label)
133 continue;
134 array(k) = get<Count>(acc, k);
135 }
136 res[XSTR(VIGRA_CONVEX_HULL_FEATURE_INPUT_COUNT)] = array;
137 }
138
125
139126 #define VIGRA_CONVEX_HULL_FEATURE(TYPE, NAME, FUNCTION) \
140127 { \
141128 NumpyArray<1, TYPE> array((Shape1(size))); \
143130 { \
144131 if(k == ignored_label || get<Count>(acc, k) == 0) \
145132 continue; \
146 array(k) = get<ConvexHull>(acc, k).FUNCTION(); \
133 array(k) = get<ConvexHullFeatures>(acc, k).FUNCTION(); \
147134 } \
148135 res[XSTR(NAME)] = array; \
149136 }
150
151 #define VIGRA_CONVEX_HULL_FEATURE_DEFECT(TYPE, NAME, FUNCTION) \
152 { \
153 NumpyArray<1, double> array((Shape1(size))); \
154 for(int k=0; k<size; ++k) \
155 { \
156 if(k == ignored_label || get<Count>(acc, k) == 0) \
157 continue; \
158 array(k) = get<ConvexHull>(acc, k).meanDefectDisplacement(); \
159 } \
160 res[XSTR(NAME)] = array; \
161 }
162
163 VIGRA_CONVEX_HULL_FEATURE(double, VIGRA_CONVEX_HULL_FEATURE_INPUT_PERIMETER, inputPerimeter)
164 VIGRA_CONVEX_HULL_FEATURE(double, VIGRA_CONVEX_HULL_FEATURE_INPUT_AREA, inputArea)
165 VIGRA_CONVEX_HULL_FEATURE(double, VIGRA_CONVEX_HULL_FEATURE_PERIMETER, hullPerimeter)
166 VIGRA_CONVEX_HULL_FEATURE(double, VIGRA_CONVEX_HULL_FEATURE_AREA, hullArea)
137
138 VIGRA_CONVEX_HULL_FEATURE(npy_uint32, VIGRA_CONVEX_HULL_FEATURE_INPUT_VOLUME, inputVolume)
139 VIGRA_CONVEX_HULL_FEATURE(npy_uint32, VIGRA_CONVEX_HULL_FEATURE_HULL_VOLUME, hullVolume)
167140 VIGRA_CONVEX_HULL_FEATURE(double, VIGRA_CONVEX_HULL_FEATURE_CONVEXITY, convexity)
168 VIGRA_CONVEX_HULL_FEATURE(double, VIGRA_CONVEX_HULL_FEATURE_RUGOSITY, rugosity)
169 VIGRA_CONVEX_HULL_FEATURE(npy_uint32, VIGRA_CONVEX_HULL_FEATURE_DEFECT_COUNT, convexityDefectCount)
170
171 VIGRA_CONVEX_HULL_FEATURE_DEFECT(double, VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_MEAN, convexityDefectAreaMean)
172 VIGRA_CONVEX_HULL_FEATURE_DEFECT(double, VIGRA_CONVEX_HULL_FEATURE_DEFECT_MEAN_DISPLACEMENT, meanDefectDisplacement)
173 VIGRA_CONVEX_HULL_FEATURE_DEFECT(double, VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_VARIANCE, convexityDefectAreaVariance)
174 VIGRA_CONVEX_HULL_FEATURE_DEFECT(double, VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_SKEWNESS, convexityDefectAreaSkewness)
175 VIGRA_CONVEX_HULL_FEATURE_DEFECT(double, VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_KURTOSIS, convexityDefectAreaKurtosis)
176
141 VIGRA_CONVEX_HULL_FEATURE(double, VIGRA_CONVEX_HULL_FEATURE_DEFECT_VOLUME_MEAN, defectVolumeMean)
142 VIGRA_CONVEX_HULL_FEATURE(double, VIGRA_CONVEX_HULL_FEATURE_DEFECT_VOLUME_VARIANCE, defectVolumeVariance)
143 VIGRA_CONVEX_HULL_FEATURE(double, VIGRA_CONVEX_HULL_FEATURE_DEFECT_VOLUME_SKEWNESS, defectVolumeSkewness)
144 VIGRA_CONVEX_HULL_FEATURE(double, VIGRA_CONVEX_HULL_FEATURE_DEFECT_VOLUME_KURTOSIS, defectVolumeKurtosis)
145 VIGRA_CONVEX_HULL_FEATURE(npy_uint32, VIGRA_CONVEX_HULL_FEATURE_DEFECT_COUNT, defectCount)
146 VIGRA_CONVEX_HULL_FEATURE(double, VIGRA_CONVEX_HULL_FEATURE_DEFECT_DISPLACEMENT_MEAN, defectDisplacementMean)
147
177148 #undef VIGRA_CONVEX_HULL_FEATURE
178
179 {
180 python::list hulls;
181 for(int k=0; k<size; ++k)
182 {
183 if(k == ignored_label || get<Count>(acc, k) == 0)
184 {
185 hulls.append(python::object());
186 continue;
187 }
188 int hull_size = get<ConvexHull>(acc, k).hull().size();
189 NumpyArray<2, double> array(Shape2(hull_size, N));
190 Polygon<TinyVector<double, 2> > poly = (permutation == Shape2(0,1))
191 ? get<ConvexHull>(acc, k).hull()
192 : reverse(transpose(get<ConvexHull>(acc, k).hull()));
193 for(int p=0; p<hull_size; ++p)
194 {
195 for(int j=0; j<N; ++j)
196 array(p, j) = poly[p][j];
197 }
198 hulls.append(array);
199 }
200 res[XSTR(VIGRA_CONVEX_HULL_FEATURE_POLYGON)] = hulls;
201 }
202
203 {
204 NumpyArray<2, double> array(Shape2(size, 3));
205 for(int k=0; k<size; ++k)
206 {
207 if(k == ignored_label || get<Count>(acc, k) == 0)
208 continue;
209 int defects = min<int>(3, get<ConvexHull>(acc, k).convexityDefectCount());
210 for(int j=0; j<defects; ++j)
211 array(k, j) = get<ConvexHull>(acc, k).defectAreaList()[j];
212 }
213 res[XSTR(VIGRA_CONVEX_HULL_FEATURE_DEFECT_AREA_LIST)] = array;
214 }
215
149
216150 #define VIGRA_CONVEX_HULL_VECTOR_FEATURE(NAME, FUNCTION) \
217151 { \
218152 NumpyArray<2, double> array(Shape2(size, N)); \
220154 { \
221155 if(k == ignored_label || get<Count>(acc, k) == 0) \
222156 continue; \
223 for(int j=0; j<N; ++j) \
224 array(k, permutation[j]) = get<ConvexHull>(acc, k).FUNCTION()[j]; \
157 for(unsigned j=0; j<N; ++j) \
158 array(k, permutation[j]) = get<ConvexHullFeatures>(acc, k).FUNCTION()[j]; \
225159 } \
226160 res[XSTR(NAME)] = array; \
227161 }
228
229 #define VIGRA_CONVEX_HULL_VECTOR_FEATURE_DEFECT(NAME, FUNCTION) \
230 { \
231 NumpyArray<2, double> array(Shape2(size, N)); \
232 for(int k=0; k<size; ++k) \
233 { \
234 if(k == ignored_label || get<Count>(acc, k) == 0) \
235 continue; \
236 for(int j=0; j<N; ++j) \
237 array(k, permutation[j]) = get<ConvexHull>(acc, k).FUNCTION()[j]; \
238 } \
239 res[XSTR(NAME)] = array; \
240 }
241
162
242163 VIGRA_CONVEX_HULL_VECTOR_FEATURE(VIGRA_CONVEX_HULL_VECTOR_FEATURE_INPUT_CENTER, inputCenter)
243 VIGRA_CONVEX_HULL_VECTOR_FEATURE(VIGRA_CONVEX_HULL_VECTOR_FEATURE_CENTER, hullCenter)
244
245 VIGRA_CONVEX_HULL_VECTOR_FEATURE_DEFECT(VIGRA_CONVEX_HULL_VECTOR_FEATURE_DEFECT_CENTER, convexityDefectCenter)
164 VIGRA_CONVEX_HULL_VECTOR_FEATURE(VIGRA_CONVEX_HULL_VECTOR_FEATURE_HULL_CENTER, hullCenter)
165 VIGRA_CONVEX_HULL_VECTOR_FEATURE(VIGRA_CONVEX_HULL_VECTOR_FEATURE_DEFECT_CENTER, defectCenter)
246166
247167 #undef VIGRA_CONVEX_HULL_VECTOR_FEATURE
248168
249169 return res;
250170 }
171
172 #endif // WITH_LEMON
251173
252174 template <unsigned int N, class T>
253175 python::object
263185 #define VIGRA_SKELETON_FEATURE_AVERAGE_LENGTH "Average Length"
264186 #define VIGRA_SKELETON_FEATURE_BRANCH_COUNT "Branch Count"
265187 #define VIGRA_SKELETON_FEATURE_HOLE_COUNT "Hole Count"
266 #define VIGRA_SKELETON_VECTOR_FEATURE_CENTER "Center"
188 #define VIGRA_SKELETON_VECTOR_FEATURE_CENTER "Skeleton Center"
267189 #define VIGRA_SKELETON_VECTOR_FEATURE_TERMINAL_1 "Terminal 1"
268190 #define VIGRA_SKELETON_VECTOR_FEATURE_TERMINAL_2 "Terminal 2"
269191
321243 NumpyArray<2, double> array(Shape2(size, N)); \
322244 for(int k=0; k<size; ++k) \
323245 { \
324 for(int j=0; j<N; ++j) \
246 for(unsigned j=0; j<N; ++j) \
325247 array(k, permutation[j]) = features[k].ATTRIBUTE[j]; \
326248 } \
327249 res[XSTR(NAME)] = array; \
354276 definePythonAccumulatorArraySingleband<2, float, ScalarRegionAccumulators>();
355277 definePythonAccumulatorArraySingleband<3, float, ScalarRegionAccumulators>();
356278
357 def("extractConvexHullFeatures",
358 registerConverters(&extractConvexHullFeatures<2, npy_uint32>),
359 (arg("labels"),
360 arg("ignoreLabel")=python::object(),
361 arg("list_features_only")=false),
362 "\nExtract convex hull features for each region of a labeled 2D image\n"
363 "(with dtype=numpy.uint32) and return a dictionary holding the\n"
364 "resulting feature arrays. Argument 'ignoreLabel' can be used to specify\n"
365 "an optional background label that is to be skipped. Note that the\n"
366 "convex hull itself and its features are computed from the interpixel\n"
367 "contour around each region. In the following, 'convexity defects'\n"
368 "are defined as the connected components of the set difference\n"
369 "between the convex hull and the original region.\n\n"
370 "The result dictionary holds the following keys:\n\n"
371 " - 'InputCount': the number of pixels in the original region\n\n"
372 " - 'InputPerimeter': the perimeter of the original interpixel contour\n\n"
373 " - 'InputArea': the areay enclosed by the original interpixel contour\n\n"
374 " - 'InputCenter': the centroid of the original interpixel contour polygon\n\n"
375 " - 'Perimeter': the perimeter of the convex hull polygon\n\n"
376 " - 'Area': the area enclosed by the convex hull polygon\n\n"
377 " - 'Center': the centroid of the convex hull polygon\n\n"
378 " - 'Rugosity': ratio between original perimeter and hull perimeter (>= 1)\n\n"
379 " - 'Convexity': the ratio between hull area and original area (<= 1)\n\n"
380 " - 'DefectCount': the number of convexity defects\n\n"
381 " - 'DefectCenter': the combined centroid of the defects\n\n"
382 " - 'MeanDefectDisplacement': mean distance between the centroids of the\n"
383 " original object and the centroids of the defects,\n"
384 " weighted by defect area\n\n"
385 " - 'DefectAreaList': the area of the three largest convexity defects\n\n"
386 " - 'DefectAreaMean': mean of the convexity defect areas\n\n"
387 " - 'DefectAreaVariance': variance of the convexity defect areas\n\n"
388 " - 'DefectAreaSkewness': skewness of the convexity defect areas\n\n"
389 " - 'DefectAreaKurtosis': kurtosis of the convexity defect areas\n\n"
390 " - 'Polygon': the convex hull polygon\n\n");
391
279 #ifdef WITH_LEMON
280 def("extract2DConvexHullFeatures",
281 registerConverters(&extractConvexHullFeatures<2, npy_uint32>),
282 ( arg("labels"),
283 arg("ignoreLabel")=python::object(),
284 arg("list_features_only")=false),
285 "\nExtract convex hull features for each region of a labeled 2D image (with\n"
286 "dtype=numpy.uint32) and return a dictionary holding the resulting feature\n"
287 "arrays. The argument 'ignoreLabel' can be used to specify an optional\n"
288 "background label that is to be skipped. Note that the convex hull itself and\n"
289 "its features are computed from the interpixel contour around each region. In\n"
290 "the following, 'convexity defects' are the connected components of the set\n"
291 "difference between the convex hull and the original region.\n"
292 "The result dictionary holds the following keys:\n\n"
293 " - InputVolume : the number of pixels in the original region\n\n"
294 " - HullVolume : the number of pixels in the convex hull\n\n"
295 " - Convexity : the ratio between the convex hull volume and the input\n"
296 " volume\n\n"
297 " - DefectVolumeMean : mean of the volumes of the convexity defects\n\n"
298 " - DefectVolumeVariance : variance of the volumes of the convexity\n"
299 " defects\n\n"
300 " - DefectVolumeSkewness : skewness of the volumes of the convexity\n"
301 " defects\n\n"
302 " - DefectVolumeKurtosis : kurtosis of the volumes of the convexity\n"
303 " defects\n\n"
304 " - DefectCount : number of convexity defects\n\n"
305 " - DefectDisplacementMean : mean distance between the center of the input\n"
306 " region and the center of the defects, weighted by the defect volumes\n\n"
307 " - InputCenter : center of the input region\n\n"
308 " - HullCenter : center of the convex hull\n\n"
309 " - DefectCenter : center of the defects\n\n");
310
311 def("extract3DConvexHullFeatures",
312 registerConverters(&extractConvexHullFeatures<3, npy_uint32>),
313 ( arg("labels"),
314 arg("ignoreLabel")=python::object(),
315 arg("list_features_only")=false),
316 "\nExtract convex hull features for each region of a labeled 3D image (with\n"
317 "dtype=numpy.uint32) and return a dictionary holding the resulting feature\n"
318 "arrays. The argument 'ignoreLabel' can be used to specify an optional\n"
319 "background label that is to be skipped. Note that the convex hull itself and\n"
320 "its features are computed from the interpixel contour around each region. In\n"
321 "the following, 'convexity defects' are the connected components of the set\n"
322 "difference between the convex hull and the original region.\n"
323 "The result dictionary holds the following keys:\n\n"
324 " - InputVolume : the number of pixels in the original region\n\n"
325 " - HullVolume : the number of pixels in the convex hull\n\n"
326 " - Convexity : the ratio between the convex hull volume and the input\n"
327 " volume\n\n"
328 " - DefectVolumeMean : mean of the volumes of the convexity defects\n\n"
329 " - DefectVolumeVariance : variance of the volumes of the convexity\n"
330 " defects\n\n"
331 " - DefectVolumeSkewness : skewness of the volumes of the convexity\n"
332 " defects\n\n"
333 " - DefectVolumeKurtosis : kurtosis of the volumes of the convexity\n"
334 " defects\n\n"
335 " - DefectCount : number of convexity defects\n\n"
336 " - DefectDisplacementMean : mean distance between the center of the input\n"
337 " region and the center of the defects, weighted by the defect volumes\n\n"
338 " - InputCenter : center of the input region\n\n"
339 " - HullCenter : center of the convex hull\n\n"
340 " - DefectCenter : center of the defects\n\n");
341 #endif // WITH_LEMON
342
392343 def("extractSkeletonFeatures",
393344 registerConverters(&pyExtractSkeletonFeatures<2, npy_uint32>),
394345 (arg("labels"),
367367 void pythonGray2QImage_ARGB32Premultiplied(
368368 const NumpyArray<2, Singleband<T> >& image,
369369 NumpyArray<3, Multiband<npy_uint8> > qimageView,
370 NumpyArray<1, T> normalize = boost::python::object()
370 NumpyArray<1, float> normalize
371371 )
372372 {
373373 vigra_precondition(image.isUnstrided() || image.transpose().isUnstrided(),
438438 const NumpyArray<2, Singleband<T> >& image,
439439 NumpyArray<3, Multiband<npy_uint8> > qimageView,
440440 NumpyArray<1, float> tintColor,
441 NumpyArray<1, T> normalize
441 NumpyArray<1, float> normalize
442442 )
443443 {
444444 vigra_precondition(image.isUnstrided() || image.transpose().isUnstrided(),
4141 #include <vigra/numpy_array_converters.hxx>
4242 #include <boost/python.hpp>
4343 #include <boost/python/to_python_converter.hpp>
44 #include <numpy/arrayscalars.h>
4445
4546 namespace python = boost::python;
4647
4950 #define VIGRA_NUMPY_TYPECHECKER(type) \
5051 if(python::object((python::detail::new_reference)PyArray_TypeObjectFromType(type)).ptr() == obj) \
5152 return obj;
52
53
5354 #define VIGRA_NUMPY_TYPECONVERTER(type) \
5455 if(python::object((python::detail::new_reference)PyArray_TypeObjectFromType(type)).ptr() == obj) \
5556 typeID = type;
56
57
5758 struct NumpyTypenumConverter
5859 {
5960 NumpyTypenumConverter()
6061 {
61 python::converter::registry::insert(&convertible, &construct,
62 python::converter::registry::insert(&convertible, &construct,
6263 python::type_id<NPY_TYPES>());
6364 python::to_python_converter<NPY_TYPES, NumpyTypenumConverter>();
6465 }
65
66
6667 static void* convertible(PyObject* obj)
6768 {
6869 // FIXME: there should be a more elegant way to program this...
8182 VIGRA_NUMPY_TYPECHECKER(NPY_UINT32)
8283 VIGRA_NUMPY_TYPECHECKER(NPY_INT)
8384 VIGRA_NUMPY_TYPECHECKER(NPY_UINT)
84 VIGRA_NUMPY_TYPECHECKER(NPY_INT64)
85 VIGRA_NUMPY_TYPECHECKER(NPY_INT64)
8586 VIGRA_NUMPY_TYPECHECKER(NPY_UINT64)
8687 VIGRA_NUMPY_TYPECHECKER(NPY_FLOAT32)
8788 VIGRA_NUMPY_TYPECHECKER(NPY_FLOAT64)
9394 }
9495
9596 // from Python
96 static void construct(PyObject* obj,
97 static void construct(PyObject* obj,
9798 python::converter::rvalue_from_python_stage1_data* data)
9899 {
99 void* const storage =
100 void* const storage =
100101 ((python::converter::rvalue_from_python_storage<NumpyAnyArray>* ) data)->storage.bytes;
101102
102103 // FIXME: there should be a more elegant way to program this...
112113 VIGRA_NUMPY_TYPECONVERTER(NPY_UINT32)
113114 VIGRA_NUMPY_TYPECONVERTER(NPY_INT)
114115 VIGRA_NUMPY_TYPECONVERTER(NPY_UINT)
115 VIGRA_NUMPY_TYPECONVERTER(NPY_INT64)
116 VIGRA_NUMPY_TYPECONVERTER(NPY_INT64)
116117 VIGRA_NUMPY_TYPECONVERTER(NPY_UINT64)
117118 VIGRA_NUMPY_TYPECONVERTER(NPY_FLOAT32)
118119 VIGRA_NUMPY_TYPECONVERTER(NPY_FLOAT64)
140141 {
141142 NumpyAnyArrayConverter()
142143 {
143 python::converter::registry::insert(&convertible, &construct,
144 python::converter::registry::insert(&convertible, &construct,
144145 python::type_id<NumpyAnyArray>());
145146 python::to_python_converter<NumpyAnyArray, NumpyAnyArrayConverter>();
146147 }
147
148
148149 static void* convertible(PyObject* obj)
149150 {
150151 return obj && (obj == Py_None || PyArray_Check(obj))
153154 }
154155
155156 // from Python
156 static void construct(PyObject* obj,
157 static void construct(PyObject* obj,
157158 python::converter::rvalue_from_python_stage1_data* data)
158159 {
159 void* const storage =
160 void* const storage =
160161 ((python::converter::rvalue_from_python_storage<NumpyAnyArray>* ) data)->storage.bytes;
161162
162163 if(obj == Py_None)
163164 obj = 0;
164
165
165166 new (storage) NumpyAnyArray(obj);
166167
167168 data->convertible = storage;
168169 }
169
170
170171 static PyObject* convert(NumpyAnyArray const& a)
171172 {
172173 return returnNumpyArray(a);
212213 {
213214
214215 typedef typename detail::MultiArrayShapeConverterTraits<M, T>::ShapeType ShapeType;
215
216
216217 MultiArrayShapeConverter()
217218 {
218 python::converter::registry::insert(&convertible, &construct,
219 python::converter::registry::insert(&convertible, &construct,
219220 python::type_id<ShapeType>());
220221 python::to_python_converter<ShapeType, MultiArrayShapeConverter>();
221222 }
222
223
223224 static void* convertible(PyObject* obj)
224225 {
225226 if(obj == 0)
235236 }
236237
237238 // from Python
238 static void construct(PyObject* obj,
239 static void construct(PyObject* obj,
239240 python::converter::rvalue_from_python_stage1_data* data)
240241 {
241 void* const storage =
242 void* const storage =
242243 ((python::converter::rvalue_from_python_storage<ShapeType>* ) data)->storage.bytes;
243244
244245 detail::MultiArrayShapeConverterTraits<M, T>::construct(storage, obj);
284285 //from python
285286 static void construct(PyObject* obj, python::converter::rvalue_from_python_stage1_data* data)
286287 {
287 void* const storage =
288 void* const storage =
288289 ((python::converter::rvalue_from_python_storage<Point2D>*) data)->storage.bytes;
289290 new (storage) Point2D(python::extract<int>(PySequence_Fast_GET_ITEM(obj,0)),
290291 python::extract<int>(PySequence_Fast_GET_ITEM(obj,1)));
333334 }
334335
335336 #if 0 // FIXME: reimplement to replace the Python versions for consistence?
336 PyObject *
337 constructNumpyArrayFromShape(python::object type, ArrayVector<npy_intp> const & shape,
337 PyObject *
338 constructNumpyArrayFromShape(python::object type, ArrayVector<npy_intp> const & shape,
338339 unsigned int spatialDimensions, unsigned int channels,
339340 NPY_TYPES typeCode, std::string order, bool init)
340341 {
344345 return detail::constructNumpyArrayImpl((PyTypeObject *)obj, shape, spatialDimensions, channels, typeCode, order, init).release();
345346 }
346347
347 PyObject *
348 constructNumpyArrayFromArray(python::object type, NumpyAnyArray array,
348 PyObject *
349 constructNumpyArrayFromArray(python::object type, NumpyAnyArray array,
349350 unsigned int spatialDimensions, unsigned int channels,
350351 NPY_TYPES typeCode, std::string order, bool init)
351352 {
352353 PyObject * obj = type.ptr();
353354 vigra_precondition(obj && PyType_Check(obj) && PyType_IsSubtype((PyTypeObject *)obj, &PyArray_Type),
354355 "constructNumpyArray(type, ...): first argument was not an array type.");
355 PyObject * res = detail::constructNumpyArrayImpl((PyTypeObject *)obj, array.shape(), spatialDimensions, channels,
356 PyObject * res = detail::constructNumpyArrayImpl((PyTypeObject *)obj, array.shape(), spatialDimensions, channels,
356357 typeCode, order, false, array.strideOrdering()).release();
357358 if(init)
358359 {
363364 }
364365 #endif
365366
366 PyObject *
367 constructArrayFromAxistags(python::object type, ArrayVector<npy_intp> const & shape,
367 PyObject *
368 constructArrayFromAxistags(python::object type, ArrayVector<npy_intp> const & shape,
368369 NPY_TYPES typeCode, AxisTags const & axistags, bool init)
369370 {
370371 PyAxisTags pyaxistags(python_ptr(python::object(axistags).ptr()));
371
372
372373 ArrayVector<npy_intp> norm_shape(shape);
373374 if(pyaxistags.size() > 0)
374375 {
375376 ArrayVector<npy_intp> permutation(pyaxistags.permutationToNormalOrder());
376377 applyPermutation(permutation.begin(), permutation.end(), shape.begin(), norm_shape.begin());
377378 }
378
379
379380 TaggedShape tagged_shape(norm_shape, pyaxistags);
380
381
381382 // FIXME: check that type is an array class?
382383 return constructArray(tagged_shape, typeCode, init, python_ptr(type.ptr()));
383384 }
386387 struct MatrixConverter
387388 {
388389 typedef linalg::Matrix<T> ArrayType;
389
390
390391 MatrixConverter();
391
392
392393 // to Python
393394 static PyObject* convert(ArrayType const& a)
394395 {
400401 MatrixConverter<T>::MatrixConverter()
401402 {
402403 using namespace boost::python;
403
404
404405 converter::registration const * reg = converter::registry::query(type_id<ArrayType>());
405
406
406407 // register the to_python_converter only once
407408 if(!reg || !reg->rvalue_chain)
408409 {
409410 to_python_converter<ArrayType, MatrixConverter>();
410411 }
411412 }
413
414 template <typename ScalarType>
415 struct NumpyScalarConverter
416 {
417 NumpyScalarConverter()
418 {
419 using namespace boost::python;
420 converter::registry::push_back( &convertible, &construct, type_id<ScalarType>());
421 }
422
423 // Determine if obj_ptr is a supported numpy.number
424 static void* convertible(PyObject* obj_ptr)
425 {
426 if (PyArray_IsScalar(obj_ptr, Float32) ||
427 PyArray_IsScalar(obj_ptr, Float64) ||
428 PyArray_IsScalar(obj_ptr, Int8) ||
429 PyArray_IsScalar(obj_ptr, Int16) ||
430 PyArray_IsScalar(obj_ptr, Int32) ||
431 PyArray_IsScalar(obj_ptr, Int64) ||
432 PyArray_IsScalar(obj_ptr, UInt8) ||
433 PyArray_IsScalar(obj_ptr, UInt16) ||
434 PyArray_IsScalar(obj_ptr, UInt32) ||
435 PyArray_IsScalar(obj_ptr, UInt64))
436 {
437 return obj_ptr;
438 }
439 return 0;
440 }
441
442 static void construct( PyObject* obj_ptr, boost::python::converter::rvalue_from_python_stage1_data* data)
443 {
444 using namespace boost::python;
445
446 // Grab pointer to memory into which to construct the C++ scalar
447 void* storage = ((converter::rvalue_from_python_storage<ScalarType>*) data)->storage.bytes;
448
449 // in-place construct the new scalar value
450 ScalarType * scalar = new (storage) ScalarType;
451
452 if (PyArray_IsScalar(obj_ptr, Float32))
453 (*scalar) = PyArrayScalar_VAL(obj_ptr, Float32);
454 else if (PyArray_IsScalar(obj_ptr, Float64))
455 (*scalar) = PyArrayScalar_VAL(obj_ptr, Float64);
456 else if (PyArray_IsScalar(obj_ptr, Int8))
457 (*scalar) = PyArrayScalar_VAL(obj_ptr, Int8);
458 else if (PyArray_IsScalar(obj_ptr, Int16))
459 (*scalar) = PyArrayScalar_VAL(obj_ptr, Int16);
460 else if (PyArray_IsScalar(obj_ptr, Int32))
461 (*scalar) = PyArrayScalar_VAL(obj_ptr, Int32);
462 else if (PyArray_IsScalar(obj_ptr, Int64))
463 (*scalar) = PyArrayScalar_VAL(obj_ptr, Int64);
464 else if (PyArray_IsScalar(obj_ptr, UInt8))
465 (*scalar) = PyArrayScalar_VAL(obj_ptr, UInt8);
466 else if (PyArray_IsScalar(obj_ptr, UInt16))
467 (*scalar) = PyArrayScalar_VAL(obj_ptr, UInt16);
468 else if (PyArray_IsScalar(obj_ptr, UInt32))
469 (*scalar) = PyArrayScalar_VAL(obj_ptr, UInt32);
470 else if (PyArray_IsScalar(obj_ptr, UInt64))
471 (*scalar) = PyArrayScalar_VAL(obj_ptr, UInt64);
472
473 // Stash the memory chunk pointer for later use by boost.python
474 data->convertible = storage;
475 }
476 };
477
412478
413479 void registerNumpyArrayConverters()
414480 {
418484 NumpyAnyArrayConverter();
419485 MatrixConverter<float>();
420486 MatrixConverter<double>();
421
487
488 NumpyScalarConverter<signed char>();
489 NumpyScalarConverter<short>();
490 NumpyScalarConverter<int>();
491 NumpyScalarConverter<long>();
492 NumpyScalarConverter<long long>();
493 NumpyScalarConverter<unsigned char>();
494 NumpyScalarConverter<unsigned short>();
495 NumpyScalarConverter<unsigned int>();
496 NumpyScalarConverter<unsigned long>();
497 NumpyScalarConverter<unsigned long long>();
498 NumpyScalarConverter<float>();
499 NumpyScalarConverter<double>();
500
422501 python::docstring_options doc_options(true, true, false);
423
502
424503 doc_options.disable_all();
425504 python::def("constructArrayFromAxistags", &constructArrayFromAxistags);
426505 // python::def("constructNumpyArray", &constructNumpyArrayFromArray);
3838 #include <vigra/numpy_array.hxx>
3939 #include <vigra/numpy_array_converters.hxx>
4040 #include <vigra/edgedetection.hxx>
41 #include <vigra/multi_convolution.hxx>
42 #include <vigra/tensorutilities.hxx>
4143
4244 namespace python = boost::python;
4345
6971 e.y = Edgel::value_type(v);
7072 }
7173
72 unsigned int Edgel__len__(Edgel const & e)
74 unsigned int Edgel__len__(Edgel const &)
7375 {
7476 return 2;
7577 }
177179 PyAllowThreads _pythread;
178180 cannyEdgeImage(srcImageRange(image), destImage(res),
179181 scale, threshold, edgeMarker);
182 }
183
184 return res;
185 }
186
187 template < class SrcPixelType, typename DestPixelType >
188 NumpyAnyArray
189 pythonCannyEdgeImageColor(NumpyArray<2, RGBValue<SrcPixelType> > image,
190 double scale, double threshold, DestPixelType edgeMarker,
191 NumpyArray<2, Singleband<DestPixelType> > res = python::object())
192 {
193 std::string description("Canny edges, scale=");
194 description += asString(scale) + ", threshold=" + asString(threshold);
195
196 res.reshapeIfEmpty(image.taggedShape().setChannelDescription(description),
197 "cannyEdgeImage(): Output array has wrong shape.");
198
199 {
200 PyAllowThreads _pythread;
201 MultiArray<2, TinyVector<float, 2>> gradient(image.shape());
202 MultiArray<2, TinyVector<float, 3>> tmp(image.shape()),
203 gradient_tensor(image.shape());
204 for(int k=0; k<3; ++k)
205 {
206 gaussianGradientMultiArray(image.bindElementChannel(k), gradient, scale);
207 vectorToTensor(gradient, tmp);
208 gradient_tensor += tmp;
209 }
210 tensorEigenRepresentation(gradient_tensor, tmp);
211 transformMultiArray(tmp, gradient, [](TinyVector<float, 3> const & v) {
212 return TinyVector<float, 2>(std::cos(v[2])*sqrt(v[0]), std::sin(v[2])*sqrt(v[0]));
213 });
214 cannyEdgeImageFromGradWithThinning(gradient, res,
215 threshold, edgeMarker, false);
180216 }
181217
182218 return res;
400436 "Detect and mark edges in an edge image using Canny's algorithm.\n\n"
401437 "For details see cannyEdgeImage_ in the vigra C++ documentation.\n");
402438
439 def("cannyEdgeImage",
440 registerConverters(&pythonCannyEdgeImageColor<float, UInt8>),
441 (arg("image"), arg("scale"), arg("threshold"), arg("edgeMarker"),arg("out")=python::object()),
442 "Detect and mark edges in an edge image using Canny's algorithm.\n\n"
443 "For details see cannyEdgeImage_ in the vigra C++ documentation.\n");
444
403445 def("cannyEdgeImageWithThinning",
404446 registerConverters(&pythonCannyEdgeImageWithThinning<float, UInt8>),
405447 (arg("image"), arg("scale"), arg("threshold"), arg("edgeMarker"),
2525
2626
2727 template<class GRAPH>
28 class LemonGraphAlgorithmVisitor
28 class LemonGraphAlgorithmVisitor
2929 : public boost::python::def_visitor<LemonGraphAlgorithmVisitor<GRAPH> >
3030 {
3131 public:
3636
3737 typedef LemonGraphAlgorithmVisitor<GRAPH> VisitorType;
3838 // Lemon Graph Typedefs
39
39
4040 typedef typename Graph::index_type index_type;
4141 typedef typename Graph::Edge Edge;
4242 typedef typename Graph::Node Node;
183183 )
184184 );
185185
186
186
187187
188188 python::def("nodeGtToEdgeGt",registerConverters(&pyNodeGtToEdgeGt),
189189 (
243243
244244 std::string clsName_;
245245 template <class classT>
246 void visit(classT& c) const
247 {
246 void visit(classT& /*c*/) const
247 {
248248 // - watersheds-segmentation
249249 // - carving-segmentation
250250 // - felzenwalb-segmentation
282282
283283 find3Cycles(graph, cyclesNodes);
284284 cyclesEdges.reshapeIfEmpty(cyclesNodes.shape());
285
285
286286 Node nodes[3];
287287 Edge edges[3];
288288
304304 static NumpyAnyArray pyCyclesEdges(
305305 const GRAPH & graph,
306306 NumpyArray<1, vigra::TinyVector<Int32, 3> > cycles,
307 NumpyArray<1, vigra::TinyVector<Int32, 3> > edgesOut
307 NumpyArray<1, vigra::TinyVector<Int32, 3> > edgesOut
308308 ){
309309
310310 Node nodes[3];
321321 for(size_t j=0; j<3; ++j){
322322 edgesOut(i)[j] = graph.id(edges[j]);
323323 }
324 }
324 }
325325 return edgesOut;
326326 }
327327
328
328
329329
330330 static NumpyAnyArray pyWardCorrection(
331331 const Graph & g,
368368
369369 NumpyArray<2,UInt32> vis (( typename NumpyArray<2,UInt64>::difference_type(g.edgeNum(),2)));
370370 NumpyArray<1,float > weights (( typename NumpyArray<1,double>::difference_type(g.edgeNum() )));
371
371
372372 size_t denseIndex = 0 ;
373373 for(NodeIt iter(g);iter!=lemon::INVALID;++iter){
374374 toDenseArrayMap[*iter]=denseIndex;
436436 out(i)=nodeLabelArrayMap[g.nodeFromId(nodeIds(i))];
437437 return out;
438438 }
439
439
440440 static NumpyAnyArray pyNodeIdsFeatures(
441441 const GRAPH & g,
442442 NumpyArray<1,Singleband<UInt32> > nodeIds,
509509 // numpy arrays => lemon maps
510510 FloatNodeArrayMap nodeFeatureArrayMap(g,nodeFeaturesArray);
511511 FloatEdgeArrayMap edgeWeightsArrayMap(g,edgeWeightsArray);
512
512
513513 for(EdgeIt e(g);e!=lemon::INVALID;++e){
514514 const Edge edge(*e);
515515 const Node u=g.u(edge);
532532 // numpy arrays => lemon maps
533533 MultiFloatNodeArrayMap nodeFeatureArrayMap(g,nodeFeaturesArray);
534534 FloatEdgeArrayMap edgeWeightsArrayMap(g,edgeWeightsArray);
535
535
536536 for(EdgeIt e(g);e!=lemon::INVALID;++e){
537537 const Edge edge(*e);
538538 const Node u=g.u(edge);
548548 UInt32NodeArray seedsArray,
549549 UInt32NodeArray labelsArray
550550 ){
551 // resize output ?
551 // resize output ?
552552 labelsArray.reshapeIfEmpty( IntrinsicGraphShape<Graph>::intrinsicNodeMapShape(g) );
553553
554554 // numpy arrays => lemon maps
571571 UInt32NodeArray labelsArray
572572 ){
573573
574 // resize output ?
574 // resize output ?
575575 labelsArray.reshapeIfEmpty( IntrinsicGraphShape<Graph>::intrinsicNodeMapShape(g) );
576576
577577
590590 //lemon_graph::graph_detail::generateWatershedSeeds(g, nodeWeightsArrayMap, labelsArrayMap, watershedsOption.seed_options);
591591 lemon_graph::watershedsGraph(g, nodeWeightsArrayMap, labelsArrayMap, watershedsOption);
592592 //lemon_graph::graph_detail::seededWatersheds(g, nodeWeightsArrayMap, seedsArrayMap, watershedsOption);
593
593
594594 return labelsArray;
595595 }
596596
604604 UInt32NodeArray seedsArray
605605 ){
606606 const std::string method="regionGrowing";
607 // resize output ?
607 // resize output ?
608608 seedsArray.reshapeIfEmpty( IntrinsicGraphShape<Graph>::intrinsicNodeMapShape(g) );
609609
610610 WatershedOptions watershedsOption;
629629 const float noBiasBelow,
630630 UInt32NodeArray labelsArray
631631 ){
632 // resize output ?
632 // resize output ?
633633 labelsArray.reshapeIfEmpty( IntrinsicGraphShape<Graph>::intrinsicNodeMapShape(g) );
634634
635635 // numpy arrays => lemon maps
652652 UInt32NodeArray labelsArray
653653 ){
654654
655 // resize output ?
655 // resize output ?
656656 labelsArray.reshapeIfEmpty( IntrinsicGraphShape<Graph>::intrinsicNodeMapShape(g) );
657657
658658 // numpy arrays => lemon maps
667667 shortestPathSegmentation<
668668 Graph,FloatEdgeArrayMap, FloatNodeArrayMap, UInt32NodeArrayMap, float
669669 >(g, edgeWeightsArrayMap, nodeWeightsArrayMap, labelsArrayMap);
670
671
670
671
672672 return labelsArray;
673673 }
674674
681681 const int nodeNumStop,
682682 UInt32NodeArray labelsArray
683683 ){
684 // resize output ?
684 // resize output ?
685685 labelsArray.reshapeIfEmpty( IntrinsicGraphShape<Graph>::intrinsicNodeMapShape(g) );
686686
687687 // numpy arrays => lemon maps
732732
733733
734734 template<class GRAPH>
735 class LemonGridGraphAlgorithmAddonVisitor
735 class LemonGridGraphAlgorithmAddonVisitor
736736 : public boost::python::def_visitor<LemonGridGraphAlgorithmAddonVisitor<GRAPH> >
737737 {
738738 public:
743743
744744 typedef LemonGraphAlgorithmVisitor<GRAPH> VisitorType;
745745 // Lemon Graph Typedefs
746
746
747747 typedef typename Graph::index_type index_type;
748748 typedef typename Graph::Edge Edge;
749749 typedef typename Graph::Node Node;
784784 typedef typename GraphDescriptorToMultiArrayIndex<Graph>::IntrinsicNodeMapShape NodeCoordinate;
785785 typedef NumpyArray<1,NodeCoordinate> NodeCoorinateArray;
786786
787 LemonGridGraphAlgorithmAddonVisitor(const std::string & clsName){}
787 LemonGridGraphAlgorithmAddonVisitor(const std::string & /*clsName*/){}
788788
789789
790790 template <class classT>
791791 void visit(classT& c) const
792 {
792 {
793793
794794 // - edge weights from interpolated image
795795 exportMiscAlgorithms(c);
798798
799799 template <class classT>
800800 void exportMiscAlgorithms(classT & c)const{
801
801
802802
803803
804804 python::def("edgeFeaturesFromInterpolatedImage",registerConverters(&pyEdgeWeightsFromInterpolatedImage),
807807 python::arg("image"),
808808 python::arg("out")=python::object()
809809 ),
810 "convert an image with with ``shape = graph.shape*2 - 1`` to an edge weight array"
810 "convert an image with ``shape = graph.shape*2 - 1`` to an edge weight array"
811811 );
812812
813813 python::def("edgeFeaturesFromImage",registerConverters(&pyEdgeWeightsFromImage),
816816 python::arg("image"),
817817 python::arg("out")=python::object()
818818 ),
819 "convert an image with with shape = graph.shape OR shape = graph.shape *2 -1 to an edge weight array"
819 "convert an image with shape = graph.shape OR shape = graph.shape *2 -1 to an edge weight array"
820820 );
821821
822822 python::def("edgeFeaturesFromImage",registerConverters(&pyEdgeWeightsFromImageMb),
825825 python::arg("image"),
826826 python::arg("out")=python::object()
827827 ),
828 "convert an image with with shape = graph.shape OR shape = graph.shape *2 -1 to an edge weight array"
828 "convert an image with shape = graph.shape OR shape = graph.shape *2 -1 to an edge weight array"
829829 );
830830
831831
844844 //' python::arg("image"),
845845 //' python::arg("out")=python::object()
846846 //' ),
847 //' "convert an image with with shape = graph.shape *2 -1 to an edge weight array"
847 //' "convert an image with shape = graph.shape *2 -1 to an edge weight array"
848848 //' ""
849849 //');
850850
852852 }
853853
854854
855
855
856856
857857
858858 static size_t pyAffiliatedEdgesSerializationSize(
881881 topologicalShape=false;
882882 }
883883 }
884
884
885885 if(regularShape)
886886 return pyEdgeWeightsFromOrginalSizeImage(g,image,edgeWeightsArray);
887887 else if(topologicalShape)
911911 topologicalShape=false;
912912 }
913913 }
914
914
915915 if(regularShape)
916916 return pyEdgeWeightsFromOrginalSizeImageMb(g,image,edgeWeightsArray);
917917 else if(topologicalShape)
997997
998998 edgeWeightsArray.reshapeIfEmpty( MultiFloatEdgeArray::ArrayTraits::taggedShape(outShape,"nc") );
999999
1000
1000
10011001 // numpy arrays => lemon maps
10021002 MultiFloatEdgeArrayMap edgeWeightsArrayMap(g,edgeWeightsArray);
10031003 typedef typename FloatNodeArray::difference_type CoordType;
2828
2929
3030 template<class GRAPH>
31 class LemonGraphHierachicalClusteringVisitor
31 class LemonGraphHierachicalClusteringVisitor
3232 : public boost::python::def_visitor<LemonGraphHierachicalClusteringVisitor<GRAPH> >
3333 {
3434 public:
4040
4141 typedef LemonGraphHierachicalClusteringVisitor<GRAPH> VisitorType;
4242 // Lemon Graph Typedefs
43
43
4444 typedef typename Graph::index_type index_type;
4545 typedef typename Graph::Edge Edge;
4646 typedef typename Graph::Node Node;
9494
9595 typedef cluster_operators::PythonOperator<MergeGraph> PythonClusterOperator;
9696
97
97
9898
9999
100100 LemonGraphHierachicalClusteringVisitor(const std::string clsName)
124124
125125 ;
126126
127 python::def("__mergeGraph",&pyMergeGraphConstructor ,
127 python::def("__mergeGraph",&pyMergeGraphConstructor ,
128128 python::with_custodian_and_ward_postcall< 0,1 ,
129 python::return_value_policy< python::manage_new_object > >()
129 python::return_value_policy< python::manage_new_object > >()
130130 )
131131 ;
132132 }
133133
134 static void setLiftedEdges(DefaultClusterOperator & op,
135 NumpyArray<1, uint32_t> liftedEdgeIds
136 ){
137 op.setLiftedEdges(liftedEdgeIds.begin(), liftedEdgeIds.end());
138 }
139
134140 void exportHierarchicalClusteringOperators()const{
135 {
141 {
136142 const std::string operatorName = clsName_ + std::string("MergeGraph") + std::string("MinEdgeWeightNodeDistOperator");
137143 python::class_<DefaultClusterOperator >(operatorName.c_str(),python::no_init)
138144 .def("__init__", python::make_constructor(&pyEdgeWeightNodeFeaturesConstructor))
145 .def("setLiftedEdges",registerConverters(&setLiftedEdges))
146 .def("enableStopWeight",&DefaultClusterOperator::enableStopWeight)
139147 ;
140148 python::def("__minEdgeWeightNodeDistOperator",registerConverters(&pyEdgeWeightNodeFeaturesConstructor),
141149 python::with_custodian_and_ward_postcall< 0,1 ,
145153 python::with_custodian_and_ward_postcall< 0 ,5,
146154 python::with_custodian_and_ward_postcall< 0 ,6,
147155 python::with_custodian_and_ward_postcall< 0 ,7,
148 python::return_value_policy< python::manage_new_object
149 > > > > > > > >()
156 python::return_value_policy< python::manage_new_object
157 > > > > > > > >()
150158 );
151159
152160 }
153 //{
161 //{
154162 // const std::string operatorName = clsName_ + std::string("MergeGraph") + std::string("NeuroOperator");
155163 // python::class_<NeuroClusterOperator >(operatorName.c_str(),python::no_init)
156164 // .def("__init__", python::make_constructor(&pyNeuroConstructor))
162170 // python::with_custodian_and_ward_postcall< 0 ,4,
163171 // python::with_custodian_and_ward_postcall< 0 ,5,
164172 // python::with_custodian_and_ward_postcall< 0 ,6,
165 // python::return_value_policy< python::manage_new_object
166 // > > > > > > >()
173 // python::return_value_policy< python::manage_new_object
174 // > > > > > > >()
167175 // );
168176 //}
169177 {
176184 python::def("__pythonClusterOperator",registerConverters(&pyPythonOperatorConstructor),
177185 python::with_custodian_and_ward_postcall< 0,1 ,
178186 python::with_custodian_and_ward_postcall< 0,2 ,
179 python::return_value_policy< python::manage_new_object > > >()
187 python::return_value_policy< python::manage_new_object > > >()
180188 );
181189 }
182190 }
184192 template<class CLUSTER_OPERATOR>
185193 void exportHierarchicalClustering(const std::string & opClsName)const{
186194 typedef CLUSTER_OPERATOR ClusterOperator;
187 typedef HierarchicalClustering<ClusterOperator> HCluster;
195 typedef HierarchicalClusteringImpl<ClusterOperator> HCluster;
188196
189197 const std::string clsName = std::string("HierarchicalClustering")+ opClsName;
190198 python::class_<HCluster,boost::noncopyable>(
192200 )
193201 .def("cluster",&HCluster::cluster)
194202 .def("reprNodeIds",registerConverters(&pyReprNodeIds<HCluster>))
203 .def("ucmTransform",registerConverters(&pyUcmTransform<HCluster>))
195204 .def("resultLabels",registerConverters(&pyResultLabels<HCluster>),
196205 (
197206 python::arg("out")=python::object()
202211 // free function
203212 python::def("__hierarchicalClustering",registerConverters(&pyHierarchicalClusteringConstructor<ClusterOperator>),
204213 python::with_custodian_and_ward_postcall< 0,1 ,
205 python::return_value_policy< python::manage_new_object > >()
214 python::return_value_policy< python::manage_new_object > >()
206215 );
207216 }
208217
238247
239248
240249 template <class classT>
241 void visit(classT& c) const
242 {
250 void visit(classT& /*c*/) const
251 {
243252 // the merge graph itself and factory functions to get a merge graph
244253 exportMergeGraph();
245254
277286 ){
278287 return EdgeHolder<MergeGraph>(mg,mg.reprEdge(edge));
279288 }
280
289
281290
282291 static void pyContractEdgeA(
283292 MergeGraph & mg,
303312
304313
305314 template<class CLUSTER_OP>
306 static HierarchicalClustering<CLUSTER_OP> * pyHierarchicalClusteringConstructor(
315 static HierarchicalClusteringImpl<CLUSTER_OP> * pyHierarchicalClusteringConstructor(
307316 CLUSTER_OP & clusterOp,
308317 const size_t nodeNumStopCond,
309318 const bool buildMergeTreeEncoding
310319
311320
312321 ){
313 typename HierarchicalClustering<CLUSTER_OP>::Parameter param;
322 typename HierarchicalClusteringImpl<CLUSTER_OP>::Parameter param;
314323 param.nodeNumStopCond_=nodeNumStopCond;
315324 param.buildMergeTreeEncoding_=buildMergeTreeEncoding;
316325 param.verbose_=true;
317 return new HierarchicalClustering<CLUSTER_OP>(clusterOp,param);
318 }
319
320
321
322
323 static DefaultClusterOperator *
326 return new HierarchicalClusteringImpl<CLUSTER_OP>(clusterOp,param);
327 }
328
329
330
331
332 static DefaultClusterOperator *
324333 pyEdgeWeightNodeFeaturesConstructor(
325334 MergeGraph & mergeGraph,
326335 FloatEdgeArray edgeIndicatorMapArray,
336345 ){
337346
338347 FloatEdgeArrayMap edgeIndicatorMap(mergeGraph.graph(),edgeIndicatorMapArray);
339 FloatEdgeArrayMap edgeSizeMap(mergeGraph.graph(),edgeSizeMapArray);
348 FloatEdgeArrayMap edgeSizeMap(mergeGraph.graph(),edgeSizeMapArray);
340349 MultiFloatNodeArrayMap nodeFeatureMap(mergeGraph.graph(),nodeFeatureMapArray);
341350 FloatNodeArrayMap nodeSizeMap(mergeGraph.graph(),nodeSizeMapArray);
342351 FloatEdgeArrayMap edgeMinWeightMap(mergeGraph.graph(),edgeMinWeightMapArray);
356365
357366
358367
359 static PythonClusterOperator *
368 static PythonClusterOperator *
360369 pyPythonOperatorConstructor(
361370 MergeGraph & mergeGraph,
362371 python::object object,
365374 const bool useEraseEdgeCallback
366375 ){
367376 return new PythonClusterOperator(mergeGraph,object,useMergeNodeCallback,useMergeEdgesCallback,useEraseEdgeCallback);
368 }
377 }
369378
370379
371380
398407 //TOC;
399408 return resultArray;
400409 }
410
411 template<class HCLUSTER>
412 static void pyUcmTransform(
413 const HCLUSTER & hcluster,
414 FloatEdgeArray inputArray
415 ){
416 FloatEdgeArrayMap inputArrayMap(hcluster.graph(),inputArray);
417 hcluster.ucmTransform(inputArrayMap);
418 }
419
401420
402421 template<class HCLUSTER>
403422 static python::tuple mergeTreeEncodingAsNumpyArray(const HCLUSTER & hcluster) {
416435 indices(m,2)=encoding[m].r_;
417436 }
418437 return python::make_tuple(indices,w);
419 }
438 }
420439
421440
422441 template<class HCLUSTER>
424443 const HCLUSTER & hcluster,
425444 const typename HCLUSTER::MergeGraphIndexType treeNodeId,
426445 NumpyArray<1,UInt32> leafes = (NumpyArray<1,UInt32>())
427 ) {
446 ) {
428447 leafes.reshapeIfEmpty( typename NumpyArray<1,UInt32>::difference_type( hcluster.graph().nodeNum()) );
429448 if(leafes.shape(0)!=hcluster.graph().nodeNum()){
430449 throw std::runtime_error("out.shape(0) must be equal nodeNum");
433452 // todo make leafes size check
434453 const size_t leafNum=hcluster.leafNodeIds(treeNodeId,leafes.begin());
435454 return python::make_tuple(leafes,leafNum);
436 }
455 }
437456
438457 /*
439458
448467 ){
449468 // reshape out ( last argument (out) will be reshaped if empty, and #channels is taken from second argument)
450469 reshapeNodeMapIfEmpty(graph,ragNodeFeaturesArray,graphNodeFeaturesArray);
451 // numpy arrays => lemon maps
470 // numpy arrays => lemon maps
452471 typename PyNodeMapTraits<Graph, UInt32>::Map labelsWhichGeneratedRagArrayMap(graph, labelsWhichGeneratedRagArray);
453472 typename PyNodeMapTraits<RagGraph,T >::Map ragNodeFeaturesArrayMap(rag,ragNodeFeaturesArray);
454473 typename PyNodeMapTraits<Graph, T >::Map graphNodeFeaturesArrayMap(graph,graphNodeFeaturesArray);
2020 #include <vigra/multi_gridgraph.hxx>
2121 #include <vigra/error.hxx>
2222 #include <vigra/graph_rag_project_back.hxx>
23 #include <vigra/threadpool.hxx>
24
25 #include <vigra/accumulator.hxx>
26
2327 namespace python = boost::python;
2428
2529 namespace vigra{
121125
122126
123127 template <class classT>
124 void visit(classT& c) const
128 void visit(classT& /*c*/) const
125129 {
126130
127131 // something like RagEdgeMap< std::vector< Edge > >
157161 python::arg("out")=python::object()
158162 )
159163 );
164 python::def("_ragEdgeStatistics",
165 registerConverters(
166 &pyRagEdgeFeaturesFromImplicit< float, float, ImplicitEdgeMap >
167 ),
168 (
169 python::arg("rag"),
170 python::arg("graph"),
171 python::arg("affiliatedEdges"),
172 python::arg("edgeFeatures"),
173 python::arg("out")=python::object()
174 )
175 );
176
160177
161178 }
162179
563580 template<class T_PIXEL, class T, class OTF_EDGES>
564581 static NumpyAnyArray pyRagEdgeMeanFromImplicit(
565582 const RagGraph & rag,
566 const Graph & graph,
583 const Graph & /*graph*/,
567584 const RagAffiliatedEdges & affiliatedEdges,
568585 const OTF_EDGES & otfEdgeMap,
569586 const std::string & accumulator,
622639
623640 }
624641
625
642
643 template<class T_PIXEL, class T, class OTF_EDGES>
644 static NumpyAnyArray pyRagEdgeFeaturesFromImplicit(
645 const RagGraph & rag,
646 const Graph & /*graph*/,
647 const RagAffiliatedEdges & affiliatedEdges,
648 const OTF_EDGES & otfEdgeMap,
649 NumpyArray<RagEdgeMapDim+1, T> ragEdgeFeaturesArray
650 ){
651
652 // preconditions
653 vigra_precondition(rag.edgeNum()>=1,"rag.edgeNum()>=1 is violated");
654
655 using namespace vigra::acc;
656
657 const size_t NFeatures = 12;
658
659 // resize out
660 typename MultiArray<RagEdgeMapDim+1,int>::difference_type outShape;
661 for(size_t d=0;d<RagEdgeMapDim;++d){
662 outShape[d]=IntrinsicGraphShape<RagGraph>::intrinsicEdgeMapShape(rag)[d];
663 }
664 outShape[RagEdgeMapDim]= NFeatures;
665
666 ragEdgeFeaturesArray.reshapeIfEmpty(outShape);
667
668 // define histogram for quantiles
669 typedef StandardQuantiles<AutoRangeHistogram<0> > Quantiles;
670 size_t n_bins_min = 2;
671 size_t n_bins_max = 64;
672
673 //in parallel with threadpool
674 // -1 = use all cores
675 parallel_foreach( -1, rag.edgeNum(),
676 [&](size_t /*thread_id*/, int id)
677 {
678 auto feat = ragEdgeFeaturesArray.bindInner(id);
679 // init the accumulator chain with the appropriate statistics
680 AccumulatorChain<double,
681 Select<Mean, Sum, Minimum, Maximum, Variance, Skewness, Kurtosis, Quantiles> > a;
682 const std::vector<Edge> & affEdges = affiliatedEdges[id];
683
684 // set n_bins = ceil( n_values**1./2.5 ) , clipped to [2,64]
685 // turned out to be suitable empirically
686 // see https://github.com/consti123/quantile_tests
687 size_t n_bins = std::pow( affiliatedEdges.size(), 1. / 2.5);
688 n_bins = std::max( n_bins_min, std::min(n_bins, n_bins_max) );
689 a.setHistogramOptions(HistogramOptions().setBinCount(n_bins));
690
691 // accumulate the values of this edge
692 for(unsigned int k=1; k <= a.passesRequired(); ++k)
693 for(size_t i=0;i<affEdges.size();++i)
694 a.updatePassN( otfEdgeMap[affEdges[i]], k );
695
696 feat[0] = get<Mean>(a);
697 feat[1] = get<Sum>(a);
698 feat[2] = get<Minimum>(a);
699 feat[3] = get<Maximum>(a);
700 feat[4] = get<Variance>(a);
701 feat[5] = get<Skewness>(a);
702 feat[6] = get<Kurtosis>(a);
703 // get quantiles, keep only the ones we care for
704 TinyVector<double, 7> quant = get<Quantiles>(a);
705 // we keep: 0.1, 0.25, 05 (median), 0.75 and 0.9 quantile
706 feat[7] = quant[1];
707 feat[8] = quant[2];
708 feat[9] = quant[3];
709 feat[10] = quant[4];
710 feat[11] = quant[5];
711 }
712 );
713
714 return ragEdgeFeaturesArray;
715
716 }
626717
627718
628719 template<class T>
205205
206206 std::string clsName_;
207207 template <class classT>
208 void visit(classT& c) const
208 void visit(classT& /*c*/) const
209209 {
210210 // - Dijkstra
211211 exportShortestPathAlgorithms();
212212 }
213213
214 static ShortestPathDijkstraType * pyShortestPathDijkstraTypeFactory(const Graph & g){
214 static ShortestPathDijkstraType * pyShortestPathDijkstraTypeFactory(const Graph & g){
215215 return new ShortestPathDijkstraType(g);
216216 }
217217
267267 // comput length of the path
268268 const size_t length = pathLength(Node(source),Node(target),predMap);
269269 nodeIdPath.reshapeIfEmpty(typename NumpyArray<1,Singleband<UInt32> >::difference_type(length));
270 pathIds(sp.graph(),source,target,predMap,nodeIdPath);
270 {
271 PyAllowThreads _pythread;
272 pathIds(sp.graph(),source,target,predMap,nodeIdPath);
273 }
271274 return nodeIdPath;
272275
273276 }
282285 // comput length of the path
283286 const size_t length = pathLength(Node(source),Node(target),predMap);
284287 nodeCoordinates.reshapeIfEmpty(typename NumpyArray<1,Singleband<UInt32> >::difference_type(length));
285 pathCoordinates(sp.graph(),source,target,predMap,nodeCoordinates);
288 {
289 PyAllowThreads _pythread;
290 pathCoordinates(sp.graph(),source,target,predMap,nodeCoordinates);
291 }
286292 return nodeCoordinates;
287293 }
288294
292298 PyNode source,
293299 PyNode target
294300 ){
295 // numpy arrays => lemon maps
296 FloatEdgeArrayMap edgeWeightsArrayMap(sp.graph(),edgeWeightsArray);
297
298 // run algorithm itself
299 sp.run(edgeWeightsArrayMap,source,target);
301 {
302 PyAllowThreads _pythread;
303 // numpy arrays => lemon maps
304 FloatEdgeArrayMap edgeWeightsArrayMap(sp.graph(),edgeWeightsArray);
305 // run algorithm itself
306 sp.run(edgeWeightsArrayMap,source,target);
307 }
300308 }
301309
302310 static void runShortestPathNoTarget(
304312 FloatEdgeArray edgeWeightsArray,
305313 PyNode source
306314 ){
307 // numpy arrays => lemon maps
308 FloatEdgeArrayMap edgeWeightsArrayMap(sp.graph(),edgeWeightsArray);
309
310 // run algorithm itself
311 sp.run(edgeWeightsArrayMap,source);
312 }
313
314
315 static void runShortestPathImplicit(
315 {
316 PyAllowThreads _pythread;
317 // numpy arrays => lemon maps
318 FloatEdgeArrayMap edgeWeightsArrayMap(sp.graph(),edgeWeightsArray);
319 // run algorithm itself
320 sp.run(edgeWeightsArrayMap,source);
321 }
322 }
323
324
325 static void runShortestPathImplicit(
316326 ShortestPathDijkstraType & sp,
317327 const ImplicitEdgeMap & edgeWeights,
318328 PyNode source,
321331 // numpy arrays => lemon maps
322332 //FloatEdgeArrayMap edgeWeightsArrayMap(sp.graph(),edgeWeightsArray);
323333
324 // run algorithm itself
325 sp.run(edgeWeights,source,target);
334 {
335 PyAllowThreads _pythread;
336 // run algorithm itself
337 sp.run(edgeWeights,source,target);
338 }
326339 }
327340
328341 static void runShortestPathNoTargetImplicit(
334347 //FloatEdgeArrayMap edgeWeightsArrayMap(sp.graph(),edgeWeightsArray);
335348
336349 // run algorithm itself
337 sp.run(edgeWeights,source);
350 {
351 PyAllowThreads _pythread;
352 sp.run(edgeWeights,source);
353 }
338354 }
339355
340356 };
451451 }
452452
453453 template<class ITEM>
454 static bool eqToInvalid(const ITEM & item,const lemon::Invalid iv){
454 static bool eqToInvalid(const ITEM & item,const lemon::Invalid /*iv*/){
455455 return item.graph_==NULL || item==lemon::INVALID;
456456 }
457457
458458 template<class ITEM>
459 static bool neqToInvalid(const ITEM & item,const lemon::Invalid iv){
459 static bool neqToInvalid(const ITEM & item,const lemon::Invalid /*iv*/){
460460 return item.graph_!=NULL && item!=lemon::INVALID;
461461 }
462462
355355 }
356356
357357 AxisTags
358 pythonGetAxisTags(const ImageImportInfo& info)
358 pythonGetAxisTags(const ImageImportInfo& /*info*/)
359359 {
360360 return AxisTags(AxisInfo::x(), AxisInfo::y(), AxisInfo::c());
361361 }
132132 void defineRandomForest();
133133 void defineRandomForestOld();
134134
135 namespace rf3 {
136 void exportRandomForest3();
137 }
138
135139 } // namespace vigra
136140
137141
144148 defineUnsupervised();
145149 defineRandomForest();
146150 defineRandomForestOld();
151 rf3::exportRandomForest3();
147152 }
148153
149154
563563 {
564564 res.reshapeIfEmpty(image.taggedShape(),
565565 "eccentricityTransform(): Output array has wrong shape.");
566 eccentricityTransformOnLabels(image, res);
566 {
567 PyAllowThreads _pythread;
568 eccentricityTransformOnLabels(image, res);
569 }
567570 return res;
568571 }
569572
575578 {
576579 typedef typename MultiArrayShape<N>::type Point;
577580 ArrayVector<Point> centers;
578 eccentricityCenters(image, centers);
581 {
582 PyAllowThreads _pythread;
583 eccentricityCenters(image, centers);
584 }
579585
580586 python::list centerlist = python::list();
581 for (int i=0; i<centers.size(); ++i) {
587 for (decltype(centers.size()) i=0; i<centers.size(); ++i) {
582588 centerlist.append(centers[i]);
583589 }
584590 return centerlist;
595601 res.reshapeIfEmpty(image.taggedShape(),
596602 "eccentricityTransformWithCenters(): Output array has wrong shape.");
597603 ArrayVector<Point> centers;
598 eccentricityTransformOnLabels(image, res, centers);
604 {
605 PyAllowThreads _pythread;
606 eccentricityTransformOnLabels(image, res, centers);
607 }
599608
600609 python::list centerlist = python::list();
601 for (int i=0; i<centers.size(); ++i) {
610 for (decltype(centers.size()) i=0; i<centers.size(); ++i) {
602611 centerlist.append(centers[i]);
603612 }
604613 return python::make_tuple(res, centerlist);
260260 else
261261 at = AxisTags(python::extract<AxisTags const &>(axistags)());
262262 int N = Array::shape_type::static_size;
263 vigra_precondition(at.size() == 0 || at.size() == N,
263 vigra_precondition(at.size() == 0 || at.size() == unsigned(N),
264264 "ChunkedArray(): axistags have invalid length.");
265 if(at.size() == N)
265 if(at.size() == unsigned(N))
266266 {
267267 int res = PyObject_SetAttrString(py_array, "axistags", python::object(at).ptr());
268268 pythonToCppException(res != 0);
6868 double averagingQuantile=0.8,
6969 double noiseEstimationQuantile=1.5,
7070 double noiseVarianceInitialGuess=10.0,
71 NumpyArray<3, Multiband<PixelType> > res=python::object())
71 NumpyArray<3, Multiband<PixelType> > /*res*/=python::object())
7272 {
7373 NoiseNormalizationOptions noiseNormalizationOptions;
7474 noiseNormalizationOptions
9898 double averagingQuantile=0.8,
9999 double noiseEstimationQuantile=1.5,
100100 double noiseVarianceInitialGuess=10.0,
101 NumpyArray<3, Multiband<PixelType> > res=python::object())
101 NumpyArray<3, Multiband<PixelType> > /*res*/=python::object())
102102 {
103103 NoiseNormalizationOptions noiseNormalizationOptions;
104104 noiseNormalizationOptions
6161 {}
6262
6363 template <class Permutation>
64 GetTag_Visitor(Permutation const & p)
64 GetTag_Visitor(Permutation const &)
6565 {}
6666
6767 python::object to_python(signed char t) const { return python::object(t); }
190190 struct ToPythonArray<TAG, Error__Attempt_to_access_inactive_statistic<T>, Accu>
191191 {
192192 template <class Permutation>
193 static python::object exec(Accu & a, Permutation const & p)
193 static python::object exec(Accu &, Permutation const &)
194194 {
195195 vigra_precondition(false, "PythonAccumulator::get(): Attempt to access inactive statistic.");
196196 return python::object();
201201 struct ToPythonArray<TAG, std::pair<T1, T2>, Accu>
202202 {
203203 template <class Permutation>
204 static python::object exec(Accu & a, Permutation const & p)
204 static python::object exec(Accu &, Permutation const &)
205205 {
206206 vigra_precondition(false, "PythonAccumulator::get(): Export for this statistic is not implemented, sorry.");
207207 return python::object();
282282
283283 struct PythonFeatureAccumulator
284284 {
285 virtual void activate(std::string const & tag) { throw std::runtime_error("abstract function called."); }
286 virtual bool isActive(std::string const & tag) const { throw std::runtime_error("abstract function called."); return false; }
285 virtual void activate(std::string const &) { throw std::runtime_error("abstract function called."); }
286 virtual bool isActive(std::string const &) const { throw std::runtime_error("abstract function called."); return false; }
287287 virtual python::list activeNames() const { throw std::runtime_error("abstract function called."); return python::list(); }
288288 virtual python::list names() const { throw std::runtime_error("abstract function called."); return python::list(); }
289 virtual python::object get(std::string const & tag) { throw std::runtime_error("abstract function called."); return python::object(); }
290 virtual void merge(PythonFeatureAccumulator const & o) { throw std::runtime_error("abstract function called."); }
289 virtual python::object get(std::string const &) { throw std::runtime_error("abstract function called."); return python::object(); }
290 virtual void merge(PythonFeatureAccumulator const &) { throw std::runtime_error("abstract function called."); }
291291 virtual PythonFeatureAccumulator * create() const { throw std::runtime_error("abstract function called."); return 0; }
292292 virtual ~PythonFeatureAccumulator() {}
293293
329329 : public PythonFeatureAccumulator
330330 {
331331 virtual MultiArrayIndex maxRegionLabel() { throw std::runtime_error("abstract function called."); }
332 virtual void mergeAll(PythonRegionFeatureAccumulator const & o) { throw std::runtime_error("abstract function called."); }
333 virtual void remappingMerge(PythonFeatureAccumulator const & o, NumpyArray<1, npy_uint32> labelMapping) { throw std::runtime_error("abstract function called."); }
334 virtual void mergeRegions(npy_uint32 i, npy_uint32 j) { throw std::runtime_error("abstract function called."); }
332 virtual void mergeAll(PythonRegionFeatureAccumulator const &) { throw std::runtime_error("abstract function called."); }
333 virtual void remappingMerge(PythonRegionFeatureAccumulator const &, NumpyArray<1, npy_uint32>) { throw std::runtime_error("abstract function called."); }
334 virtual void mergeRegions(npy_uint32, npy_uint32) { throw std::runtime_error("abstract function called."); }
335335 virtual PythonRegionFeatureAccumulator * create() const { throw std::runtime_error("abstract function called."); return 0; }
336336
337337 static void definePythonClass()
454454 merge(o);
455455 }
456456
457 void remappingMerge(PythonFeatureAccumulator const & o, NumpyArray<1, npy_uint32> labelMapping)
457 void remappingMerge(PythonRegionFeatureAccumulator const & o, NumpyArray<1, npy_uint32> labelMapping)
458458 {
459459 PythonAccumulator const * p = dynamic_cast<PythonAccumulator const *>(&o);
460460 if(p == 0)
1717 /* Software is furnished to do so, subject to the following */
1818 /* conditions: */
1919 /* */
20 /* The above copyrigfht notice and this permission notice shall be */
20 /* The above copyright notice and this permission notice shall be */
2121 /* included in all copies or substantial portions of the */
2222 /* Software. */
2323 /* */
167167 double
168168 pythonLearnRandomForest(RandomForest<LabelType> & rf,
169169 NumpyArray<2,FeatureType> trainData,
170 NumpyArray<2,LabelType> trainLabels,
170 NumpyArray<2,LabelType> trainLabels, // FIXME why are the labels 2d ?!
171171 UInt32 randomSeed=0,
172172 int maxdepth=-1,
173173 int minsize=0)
0 /************************************************************************/
1 /* */
2 /* Copyright 2009 by Ullrich Koethe */
3 /* */
4 /* This file is part of the VIGRA computer vision library. */
5 /* The VIGRA Website is */
6 /* http://hci.iwr.uni-heidelberg.de/vigra/ */
7 /* Please direct questions, bug reports, and contributions to */
8 /* ullrich.koethe@iwr.uni-heidelberg.de or */
9 /* vigra@informatik.uni-hamburg.de */
10 /* */
11 /* Permission is hereby granted, free of charge, to any person */
12 /* obtaining a copy of this software and associated documentation */
13 /* files (the "Software"), to deal in the Software without */
14 /* restriction, including without limitation the rights to use, */
15 /* copy, modify, merge, publish, distribute, sublicense, and/or */
16 /* sell copies of the Software, and to permit persons to whom the */
17 /* Software is furnished to do so, subject to the following */
18 /* conditions: */
19 /* */
20 /* The above copyright notice and this permission notice shall be */
21 /* included in all copies or substantial portions of the */
22 /* Software. */
23 /* */
24 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */
25 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */
26 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */
27 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */
28 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */
29 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */
30 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */
31 /* OTHER DEALINGS IN THE SOFTWARE. */
32 /* */
33 /************************************************************************/
34
35 #define PY_ARRAY_UNIQUE_SYMBOL vigranumpylearning_PyArray_API
36 #define NO_IMPORT_ARRAY
37
38 #include <vigra/numpy_array.hxx>
39 #include <vigra/numpy_array_converters.hxx>
40 #include <vigra/random_forest_3.hxx>
41
42 #ifdef HasHDF5
43 # include <vigra/random_forest_3_hdf5_impex.hxx>
44 #endif
45
46 #include <boost/python.hpp>
47
48 //#include <vigra/timing.hxx>
49 //#include <vigra/random.hxx>
50 //#include <set>
51 //#include <cmath>
52 //#include <memory>
53
54 namespace python = boost::python;
55
56 namespace vigra
57 {
58 namespace rf3
59 {
60
61 typedef float FeatureType;
62 typedef uint32_t LabelType;
63 typedef NumpyArray<2,FeatureType> FeatureArrayType;
64 typedef NumpyArray<1,LabelType> LabelArrayType; // FIXME TODO dunno if this needs to be 1 or 2d
65 typedef DefaultRF<FeatureArrayType,LabelArrayType>::type RandomForestType;
66
67 // random forest constructor
68 RandomForestType *
69 pythonConstructRandomForest3(
70 FeatureArrayType features,
71 LabelArrayType labels,
72 int treeCount,
73 int mtry,
74 int min_split_node_size,
75 bool sample_with_replacement,
76 bool sample_classes_individually,
77 size_t resample_count,
78 size_t max_depth,
79 double tau,
80 size_t n_threads
81 )
82 {
83
84 RandomForestOptions rf_opts;
85
86 rf_opts.tree_count(treeCount);
87
88 if(mtry > 0)
89 rf_opts.features_per_node(mtry);
90
91 // TODO training_set_size -> can't find the corresponding parameter, afaik this is the number of bootstrap samples used
92
93 rf_opts.bootstrap_sampling(sample_with_replacement);
94 rf_opts.min_num_instances(min_split_node_size);
95 rf_opts.use_stratification(sample_classes_individually);
96
97 // this is the number of instances that is resampled in each split / this disables bootstrap sampling and
98 rf_opts.resample_count(resample_count);
99
100 // expose the max tree depth, not in old rf afaik
101 rf_opts.max_depth(max_depth);
102
103 //expose node complexity, not in okd rf afaik
104 rf_opts.node_complexity_tau(tau);
105
106 // expose n_threads for multithreading
107 rf_opts.n_threads(n_threads);
108
109 // TODO expose class_weights
110 // class_weights(std::vector<double> const & v)
111
112 // TODO expose the split criterion (GINI, ENTTROPY, KSD), not in old rf afaik
113 // split(RandomForestOptionTags p_split)
114
115 // return pointer to the random forest
116 PyAllowThreads _pythread;
117 RandomForestType rf_tmp = random_forest(features, labels, rf_opts);
118 RandomForestType * rf = new RandomForestType(rf_tmp);
119 return rf;
120 }
121
122 // prediction
123 NumpyAnyArray
124 pythonPredictProbabilities(const RandomForestType & rf,
125 FeatureArrayType features,
126 size_t n_threads,
127 NumpyArray<2,float> res) {
128
129 res.reshapeIfEmpty(MultiArrayShape<2>::type(features.shape(0), rf.num_classes()),
130 "RandomForest.predictProbabilities(): Output array has wrong dimensions.");
131 {
132 PyAllowThreads _pythread;
133 rf.predict_probabilities(features, res, n_threads);
134 }
135 return res;
136 }
137
138 NumpyAnyArray
139 pythonPredictLabels(const RandomForestType & rf,
140 FeatureArrayType features,
141 size_t n_threads,
142 NumpyArray<1,LabelType> res) {
143
144 res.reshapeIfEmpty(MultiArrayShape<1>::type(features.shape(0)),
145 "RandomForest.predictProbabilities(): Output array has wrong dimensions.");
146 {
147 PyAllowThreads _pythread;
148 rf.predict(features, res, n_threads);
149 }
150 return res;
151 }
152
153 #ifdef HasHDF5
154 RandomForestType *
155 pythonImportFromHDF5(const std::string & filename, const std::string & pathname )
156 {
157 HDF5File h5ctx(filename);
158 RandomForestType rf_tmp = random_forest_import_HDF5<FeatureArrayType,LabelArrayType>(h5ctx, pathname);
159 RandomForestType * rf = new RandomForestType(rf_tmp);
160 return rf;
161 }
162
163 void pythonExportHDF5(const RandomForestType & rf,
164 const std::string & filename,
165 const std::string & pathname)
166 {
167 HDF5File h5ctx(filename, HDF5File::ReadWrite );
168 random_forest_export_HDF5(rf, h5ctx, pathname);
169 }
170 #endif // HasHDF5
171
172 // expose the rf 3 to python
173 // expose the rf 3 as a python class + basic API
174 void exportRandomForest3()
175 {
176 using namespace python;
177
178 docstring_options doc_options(true, true, false);
179
180 enum_<RandomForestOptionTags>("RF3_MTRY_SWITCH")
181 .value("RF3_MTRY_LOG", vigra::rf3::RF_LOG)
182 .value("RF3_MTRY_SQRT",vigra::rf3::RF_SQRT)
183 .value("RF3_MTRY_ALL", vigra::rf3::RF_ALL);
184
185 class_<RandomForestType> rfclass3("RandomForest3",python::no_init);
186 //class_<RandomForestType> rfclass3("RandomForest3");
187
188 rfclass3
189 .def("__init__",
190 python::make_constructor(registerConverters(&pythonConstructRandomForest3),
191 boost::python::default_call_policies(),
192 (arg("features"),
193 arg("labels"),
194 arg("treeCount")=255,
195 arg("mtry")= -1,
196 arg("min_split_node_size")=1,
197 arg("sample_with_replacement")=true,
198 arg("sample_classes_individually")=false,
199 arg("resample_count")=0,
200 arg("max_depth")=0,
201 arg("tau")=-1,
202 arg("n_threads")=1)
203 ),
204 "\nConstruct a new random forest::\n\n"
205 " RandomForest(features, labels, treeCount = 255, mtry=RF_SQRT, min_split_node_size=1,\n"
206 " training_set_size=0, training_set_proportions=1.0,\n"
207 " sample_with_replacement=True, sample_classes_individually=False,\n"
208 " )\n\n"
209 "treeCount:\n"
210 " controls the number of trees that are created.\n"
211 "See RandomForest_ and RandomForestOptions_ in the C++ documentation "
212 "for the meaning of the other parameters.\n")
213 #ifdef HasHDF5
214 .def("__init__",python::make_constructor(&pythonImportFromHDF5,
215 boost::python::default_call_policies(),
216 ( arg("filename"),
217 arg("pathInFile"))),
218 "\nLoad from HDF5 file::\n\n"
219 " RandomForest(filename, pathInFile)\n\n")
220 .def("writeHDF5", &pythonExportHDF5,
221 boost::python::default_call_policies(),
222 ( arg("filename"),
223 arg("pathInFile")),
224 "Store the random forest in the given HDF5 file 'filename' under the internal\n"
225 "path 'pathInFile'.\n")
226 #endif // HasHDF5
227 .def("featureCount",
228 &RandomForestType::num_features,
229 "Returns the number of features the RandomForest works with.\n")
230 .def("labelCount",
231 &RandomForestType::num_classes,
232 "Returns the number of labels, the RandomForest knows.\n")
233 .def("treeCount",
234 &RandomForestType::num_trees,
235 "Returns the 'treeCount', that was set when constructing the RandomForest.\n")
236 .def("predictProbabilities",
237 registerConverters(&pythonPredictProbabilities),
238 (
239 arg("features"),
240 arg("n_threads")=1,
241 arg("out")=object()
242 ),
243 "Predict probabilities for different classes on 'testData'.\n\n"
244 "The output is an array containing a probability for every test sample and class.\n")
245 .def("predictLabels",
246 registerConverters(&pythonPredictLabels),
247 (
248 arg("features"),
249 arg("n_threads")=1,
250 arg("out")=object()),
251 "Predict labels on 'features'.\n\n"
252 "The output is an array containing a label for every test samples.\n")
253 ;
254 }
255
256
257
258
259 } // end namespace rf3
260 } // end namepace vigra
261
262
5050 #include <vigra/multi_convolution.hxx>
5151 #include <vigra/slic.hxx>
5252 #include <vigra/seg_to_seeds.hxx>
53
53 #include <vigra/multi_pointoperators.hxx>
5454
5555 #include <string>
5656 #include <cmath>
57 #include <unordered_set>
58 #include <unordered_map>
59 #include <algorithm>
60
61 #include <boost/python/stl_iterator.hpp>
5762
5863 #include "tws.hxx"
5964
10761081 .blockShape(blockShape));
10771082 return python::make_tuple(out, nSeg);
10781083 }
1084
1085 /** \brief Map all values in src to new values using the given mapping (a dict).
1086 * See python docstring for details.
1087 */
1088 template <unsigned int NDIM, class SrcVoxelType, class DestVoxelType>
1089 NumpyAnyArray
1090 pythonApplyMapping(NumpyArray<NDIM, Singleband<SrcVoxelType> > src,
1091 python::dict mapping,
1092 bool allow_incomplete_mapping = false,
1093 NumpyArray<NDIM, Singleband<DestVoxelType> > res = NumpyArray<NDIM, Singleband<SrcVoxelType> >())
1094 {
1095 using namespace boost::python;
1096
1097 res.reshapeIfEmpty(src.taggedShape(), "applyMapping(): Output array has wrong shape.");
1098
1099 // Copy dict into a c++ unordered_map of ints,
1100 // which is ~10x faster than using a Python dict
1101 typedef std::unordered_map<SrcVoxelType, DestVoxelType> labelmap_t;
1102 labelmap_t labelmap(2*len(mapping)); // Using 2*N buckets seems to speed things up by 10%
1103
1104 typedef stl_input_iterator<tuple> dict_iter_t;
1105
1106 #if PY_MAJOR_VERSION < 3
1107 dict_iter_t map_iter = mapping.iteritems();
1108 #else
1109 dict_iter_t map_iter = mapping.items();
1110 #endif
1111
1112 for (; map_iter != dict_iter_t(); ++map_iter)
1113 {
1114 object key = (*map_iter)[0];
1115 object value = (*map_iter)[1];
1116 labelmap[extract<SrcVoxelType>(key)] = extract<DestVoxelType>(value);
1117 }
1118
1119 // Enforce const capture in the lambda below.
1120 labelmap_t const & _labelmap = labelmap;
1121
1122 {
1123 std::unique_ptr<PyAllowThreads> pythread_ptr(new PyAllowThreads);
1124
1125 transformMultiArray(src, res,
1126 [&_labelmap, allow_incomplete_mapping, &pythread_ptr](SrcVoxelType px) -> DestVoxelType {
1127 typename labelmap_t::const_iterator iter = _labelmap.find(px);
1128
1129 if (iter != _labelmap.end())
1130 {
1131 return iter->second;
1132 }
1133
1134 if (allow_incomplete_mapping)
1135 {
1136 // Key is missing. Return the original value.
1137 return static_cast<DestVoxelType>(px);
1138 }
1139
1140 // Reclaim the GIL before setting the error string.
1141 pythread_ptr.reset();
1142
1143 std::ostringstream err_msg;
1144 err_msg << "Key not found in mapping: " << +px;
1145 PyErr_SetString( PyExc_KeyError, err_msg.str().c_str() );
1146 python::throw_error_already_set();
1147
1148 return 0; // unreachable line
1149 });
1150 }
1151
1152 return res;
1153 }
1154
1155 // Unfortunately, can't use this macro because the template args uses TWO dtypes
1156 //VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyApplyMapping, pythonApplyMapping)
1157
1158
1159 /** \brief Find unique values in the given array.
1160 */
1161 template <class VoxelType, unsigned int NDIM>
1162 NumpyAnyArray
1163 pythonUnique(NumpyArray<NDIM, Singleband<VoxelType> > src, bool sort=true)
1164 {
1165 std::unordered_set<VoxelType> labelset;
1166 auto f = [&labelset](VoxelType px) { labelset.insert(px); };
1167 inspectMultiArray(src, f);
1168
1169 NumpyArray<1, VoxelType> result;
1170 result.reshape( Shape1(labelset.size()) );
1171 std::copy( labelset.begin(), labelset.end(), result.begin() );
1172
1173 if (sort)
1174 {
1175 std::sort( result.begin(), result.end() );
1176 }
1177 return result;
1178 }
1179
1180 VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyUnique, pythonUnique)
1181
1182
1183 /** \brief Relabel an array such that all labels are consecutive
1184 * (i.e. there are no gaps in the label values used by the array)
1185 * See python docstring below for details.
1186 */
1187 template <unsigned int NDIM, class SrcVoxelType, class DestVoxelType>
1188 boost::python::tuple
1189 pythonRelabelConsecutive(NumpyArray<NDIM, Singleband<SrcVoxelType> > src,
1190 DestVoxelType start_label = 1,
1191 bool keep_zeros = true,
1192 NumpyArray<NDIM, Singleband<DestVoxelType> > res = NumpyArray<NDIM, Singleband<SrcVoxelType> >())
1193 {
1194 using namespace boost::python;
1195 res.reshapeIfEmpty(src.taggedShape(), "relabelConsecutive(): Output array has wrong shape.");
1196
1197 std::unordered_map<SrcVoxelType, DestVoxelType> labelmap;
1198 if (keep_zeros)
1199 {
1200 vigra_precondition(!keep_zeros || start_label > 0,
1201 "relabelConsecutive(): start_label must be non-zero if using keep_zeros=True");
1202
1203 // pre-initialize the mapping to keep zeros unchanged
1204 labelmap[0] = 0;
1205 }
1206
1207 {
1208 PyAllowThreads _pythread;
1209
1210 transformMultiArray(src, res,
1211 [&](SrcVoxelType px) -> DestVoxelType {
1212 auto iter = labelmap.find(px);
1213 if (iter != labelmap.end())
1214 {
1215 return iter->second;
1216 }
1217 // We haven't seen this label yet.
1218 // Create a new entry in the hash table.
1219 DestVoxelType newlabel = labelmap.size() - int(keep_zeros) + start_label;
1220 labelmap[px] = newlabel;
1221 return newlabel;
1222 });
1223 }
1224
1225 // Convert labelmap to dict
1226 dict labelmap_dict;
1227 for (auto old_new_pair : labelmap)
1228 {
1229 labelmap_dict[old_new_pair.first] = old_new_pair.second;
1230 }
1231
1232 DestVoxelType max_label = labelmap.size() - int(keep_zeros) - 1 + start_label;
1233 return make_tuple(res, max_label, labelmap_dict);
1234 }
1235
1236 // Unfortunately, can't use this macro because the template args uses TWO dtypes
1237 //VIGRA_PYTHON_MULTITYPE_FUNCTOR_NDIM(pyRelabelConsecutive, pythonRelabelConsecutive)
1238
10791239
10801240 void defineSegmentation()
10811241 {
14511611 "\n"
14521612 "The function returns a Python tuple (labelImage, maxRegionLabel)\n"
14531613 "\n");
1614
1615 multidef("unique",
1616 pyUnique<1,5,npy_uint8, npy_uint32, npy_uint64, npy_int64>(),
1617 (arg("arr"), arg("sort")=true),
1618 "Find unique values in the given label array.\n"
1619 "If ``sort`` is True, then the output is sorted.\n"
1620 "Much faster then ``numpy.unique()``.\n");
1621
1622 //-- 3D relabelConsecutive
1623 def("relabelConsecutive", registerConverters(&pythonRelabelConsecutive<3, npy_uint64, npy_uint32>),
1624 (arg("labels"), arg("start_label")=1, arg("keep_zeros")=true, arg("out")=python::object()),
1625 "Relabel the given label image to have consecutive label values.\n"
1626 "Note: The relative order between label values will not necessarily be preserved.\n"
1627 "\n"
1628 "Parameters\n"
1629 "----------\n"
1630 "labels: ndarray\n"
1631 "start_label: The lowest label of the output array.\n"
1632 "keep_zeros: Don't relabel zero-valued items.\n"
1633 "out: ndarray to hold the data. If None, it will be allocated for you.\n"
1634 " A combination of uint64 labels and uint32 'out' is permitted.\n"
1635 "\n"
1636 "Returns a tuple of ``(newlabels, maxlabel, mapping)``, where:\n"
1637 "``maxlabel`` is the maximum label of the new labels, and\n"
1638 "``mapping`` is a dict showing how the old labels were converted to the new label values.\n"
1639 "\n"
1640 "Note: As with other vigra functions, you should provide accurate axistags for optimal performance.\n");
1641 def("relabelConsecutive", registerConverters(&pythonRelabelConsecutive<3, npy_uint64, npy_uint64>), (arg("labels"), arg("start_label")=1, arg("keep_zeros")=true, arg("out")=python::object()));
1642 def("relabelConsecutive", registerConverters(&pythonRelabelConsecutive<3, npy_uint32, npy_uint32>), (arg("labels"), arg("start_label")=1, arg("keep_zeros")=true, arg("out")=python::object()));
1643 def("relabelConsecutive", registerConverters(&pythonRelabelConsecutive<3, npy_uint8, npy_uint8>), (arg("labels"), arg("start_label")=1, arg("keep_zeros")=true, arg("out")=python::object()));
1644
1645 //-- 2D relabelConsecutive
1646 def("relabelConsecutive", registerConverters(&pythonRelabelConsecutive<2, npy_uint64, npy_uint32>), (arg("labels"), arg("start_label")=1, arg("keep_zeros")=true, arg("out")=python::object()));
1647 def("relabelConsecutive", registerConverters(&pythonRelabelConsecutive<2, npy_uint64, npy_uint64>), (arg("labels"), arg("start_label")=1, arg("keep_zeros")=true, arg("out")=python::object()));
1648 def("relabelConsecutive", registerConverters(&pythonRelabelConsecutive<2, npy_uint32, npy_uint32>), (arg("labels"), arg("start_label")=1, arg("keep_zeros")=true, arg("out")=python::object()));
1649 def("relabelConsecutive", registerConverters(&pythonRelabelConsecutive<2, npy_uint8, npy_uint8>), (arg("labels"), arg("start_label")=1, arg("keep_zeros")=true, arg("out")=python::object()));
1650
1651 //-- 1D relabelConsecutive
1652 def("relabelConsecutive", registerConverters(&pythonRelabelConsecutive<1, npy_uint64, npy_uint32>), (arg("labels"), arg("start_label")=1, arg("keep_zeros")=true, arg("out")=python::object()));
1653 def("relabelConsecutive", registerConverters(&pythonRelabelConsecutive<1, npy_uint64, npy_uint64>), (arg("labels"), arg("start_label")=1, arg("keep_zeros")=true, arg("out")=python::object()));
1654 def("relabelConsecutive", registerConverters(&pythonRelabelConsecutive<1, npy_uint32, npy_uint32>), (arg("labels"), arg("start_label")=1, arg("keep_zeros")=true, arg("out")=python::object()));
1655 def("relabelConsecutive", registerConverters(&pythonRelabelConsecutive<1, npy_uint8, npy_uint8>), (arg("labels"), arg("start_label")=1, arg("keep_zeros")=true, arg("out")=python::object()));
1656
1657
1658 // Lots of overloads here to allow mapping between arrays of different dtypes.
1659 // -- 3D
1660 // 64 --> 32
1661 def("applyMapping", registerConverters(&pythonApplyMapping<3, npy_uint64, npy_uint32>),
1662 (arg("labels"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()),
1663 "Map all values in `labels` to new values using the given mapping (a dict).\n"
1664 "Useful for maps with large values, for which a numpy index array would need too much RAM.\n"
1665 "To relabel in-place, set `out=labels`.\n"
1666 "\n"
1667 "Parameters\n"
1668 "----------\n"
1669 "labels: ndarray\n"
1670 "mapping: dict of ``{old_label : new_label}``\n"
1671 "allow_incomplete_mapping: If True, then any voxel values in the original data that are missing\n"
1672 " from the mapping dict will be copied (and casted) into the output.\n"
1673 " Otherwise, an ``IndexError`` will be raised if the map is incomplete\n"
1674 " for the input data.\n"
1675 "out: ndarray to hold the data. If None, it will be allocated for you.\n"
1676 " The dtype of ``out`` is allowed to be smaller (or bigger) than the dtype of ``labels``.\n"
1677 "\n"
1678 "Note: As with other vigra functions, you should provide accurate axistags for optimal performance.\n");
1679
1680 // 8 <--> 32
1681 def("applyMapping", registerConverters(&pythonApplyMapping<3, npy_uint8, npy_uint32>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1682 def("applyMapping", registerConverters(&pythonApplyMapping<3, npy_uint32, npy_uint8>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1683
1684 // 32 <--> 64
1685 def("applyMapping", registerConverters(&pythonApplyMapping<3, npy_uint32, npy_uint64>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1686 //def("applyMapping", registerConverters(&pythonApplyMapping<3, npy_uint64, npy_uint32>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1687
1688 // 8 <--> 64
1689 def("applyMapping", registerConverters(&pythonApplyMapping<3, npy_uint8, npy_uint64>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1690 def("applyMapping", registerConverters(&pythonApplyMapping<3, npy_uint64, npy_uint8>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1691
1692 // Cases for same input/output dtypes must come last, so they are chosen by default!
1693 // 8 <--> 8, 32 <--> 32, 64 <--> 64
1694 def("applyMapping", registerConverters(&pythonApplyMapping<3, npy_uint64, npy_uint64>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1695 def("applyMapping", registerConverters(&pythonApplyMapping<3, npy_uint32, npy_uint32>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1696 def("applyMapping", registerConverters(&pythonApplyMapping<3, npy_uint8, npy_uint8>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1697
1698 // -- 2D
1699 // 8 <--> 32
1700 def("applyMapping", registerConverters(&pythonApplyMapping<2, npy_uint8, npy_uint32>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1701 def("applyMapping", registerConverters(&pythonApplyMapping<2, npy_uint32, npy_uint8>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1702
1703 // 32 <--> 64
1704 def("applyMapping", registerConverters(&pythonApplyMapping<2, npy_uint32, npy_uint64>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1705 def("applyMapping", registerConverters(&pythonApplyMapping<2, npy_uint64, npy_uint32>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1706
1707 // 8 <--> 64
1708 def("applyMapping", registerConverters(&pythonApplyMapping<2, npy_uint8, npy_uint64>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1709 def("applyMapping", registerConverters(&pythonApplyMapping<2, npy_uint64, npy_uint8>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1710
1711 // Cases for same input/output dtypes must come last, so they are chosen by default!
1712 // 8 <--> 8, 32 <--> 32, 64 <--> 64
1713 def("applyMapping", registerConverters(&pythonApplyMapping<2, npy_uint32, npy_uint32>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1714 def("applyMapping", registerConverters(&pythonApplyMapping<2, npy_uint64, npy_uint64>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1715 def("applyMapping", registerConverters(&pythonApplyMapping<2, npy_uint8, npy_uint8>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1716
1717 // -- 1D
1718
1719 // 8 <--> 32
1720 def("applyMapping", registerConverters(&pythonApplyMapping<1, npy_uint8, npy_uint32>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1721 def("applyMapping", registerConverters(&pythonApplyMapping<1, npy_uint32, npy_uint8>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1722
1723 // 32 <--> 64
1724 def("applyMapping", registerConverters(&pythonApplyMapping<1, npy_uint32, npy_uint64>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1725 def("applyMapping", registerConverters(&pythonApplyMapping<1, npy_uint64, npy_uint32>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1726
1727 // 8 <--> 64
1728 def("applyMapping", registerConverters(&pythonApplyMapping<1, npy_uint8, npy_uint64>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1729 def("applyMapping", registerConverters(&pythonApplyMapping<1, npy_uint64, npy_uint8>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1730
1731 // Cases for same input/output dtypes must come last, so they are chosen by default!
1732 // 8 <--> 8, 32 <--> 32, 64 <--> 64
1733 def("applyMapping", registerConverters(&pythonApplyMapping<1, npy_uint32, npy_uint32>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1734 def("applyMapping", registerConverters(&pythonApplyMapping<1, npy_uint64, npy_uint64>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
1735 def("applyMapping", registerConverters(&pythonApplyMapping<1, npy_uint8, npy_uint8>), (arg("src"), arg("mapping"), arg("allow_incomplete_mapping")=false, arg("out")=python::object()));
14541736 }
14551737
14561738 void defineEdgedetection();
0 INCLUDE_DIRECTORIES(${VIGRANUMPY_INCLUDE_DIRS} ${FFTW3_INCLUDE_DIR})
0 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${VIGRANUMPY_INCLUDE_DIRS} ${FFTW3_INCLUDE_DIR})
11
22 VIGRA_ADD_NUMPY_MODULE(fourier
33 SOURCES
0 INCLUDE_DIRECTORIES(${VIGRANUMPY_INCLUDE_DIRS})
0 INCLUDE_DIRECTORIES(${SUPPRESS_WARNINGS} ${VIGRANUMPY_INCLUDE_DIRS})
11
22 SET(TEST_SCRIPTS
33 test_arraytypes.py
3232 _impl_test_labelMultiArray(numpy.uint8)
3333 _impl_test_labelMultiArray(numpy.uint32)
3434 _impl_test_labelMultiArray(numpy.float32)
35
36
37 def _impl_test_applyMapping(dtype):
38 original = numpy.arange(100, dtype=dtype ).reshape(10,10)
39 mapping = dict( zip( original.flat[:], original.flat[:] + 100 ) )
40
41 # Not in-place
42 remapped = vigra.analysis.applyMapping(original, mapping)
43 assert remapped.dtype == original.dtype, "Default output dtype did not match input dtype!"
44 assert (remapped == original+100).all()
45
46 # in-place
47 original_copy = original.copy()
48 vigra.analysis.applyMapping(original_copy, mapping, out=original_copy)
49 assert (original_copy == original+100).all()
50
51 # Different dtypes
52 mapping = dict( zip( original.flat[:], (original.flat[:] + 100).astype(numpy.uint64) ) )
53
54 result = numpy.zeros_like( original, dtype=numpy.uint64 )
55 vigra.analysis.applyMapping(original, mapping, out=result)
56 assert (result == original+100).all()
57
58 mapping = dict( zip( original.flat[:], (original.flat[:] + 100).astype(numpy.uint8) ) )
59
60 result = numpy.zeros_like( original, dtype=numpy.uint8 )
61 vigra.analysis.applyMapping(original, mapping, out=result)
62 assert (result == original+100).all()
63
64 # Incomplete mapping
65 for i in range(10):
66 del mapping[i]
67
68 remapped = vigra.analysis.applyMapping(original, mapping, allow_incomplete_mapping=True)
69 assert (remapped[0] == original[0]).all()
70 assert (remapped[1:] == original[1:]+100).all()
3571
72 try:
73 remapped = vigra.analysis.applyMapping(original, mapping, allow_incomplete_mapping=False)
74 except KeyError:
75 pass
76 else:
77 assert False, "Expected to get an exception due to the incomplete mapping!"
78
79 def test_applyMapping():
80 _impl_test_applyMapping(numpy.uint8)
81 _impl_test_applyMapping(numpy.uint32)
82 _impl_test_applyMapping(numpy.uint64)
83
84 def _impl_test_unique(dtype):
85 a = numpy.array([2,3,5,7,11,13,17,19,23,29] + [2,3,5,7,11,13,17,19,23,29], dtype=dtype)
86 u = vigra.analysis.unique(a, sort=True)
87 assert (u == [2,3,5,7,11,13,17,19,23,29]).all()
88
89 def test_unique():
90 _impl_test_unique(numpy.uint8)
91 _impl_test_unique(numpy.uint32)
92 _impl_test_unique(numpy.uint64)
93
94 def _impl_relabelConsecutive(dtype):
95 start = 17
96 a = numpy.random.randint(start,start+100, size=(100,100) ).astype(dtype)
97 a[-1] = numpy.arange(start,start+100, dtype=dtype) # Make sure every number is used
98 a[:] *= 3
99 consecutive, maxlabel, mapping = vigra.analysis.relabelConsecutive(a, start)
100
101 assert consecutive.dtype == a.dtype, "Default output dtype did not match input dtype!"
102 assert maxlabel == consecutive.max()
103 assert (vigra.analysis.applyMapping(a, mapping) == consecutive).all()
104
105 assert (numpy.unique(consecutive) == numpy.arange(start,start+100)).all(), \
106 "relabeled array does not have consecutive labels"
107
108 first_label_a = a[0,0]
109 first_label_c = consecutive[0,0]
110 assert ((a == first_label_a) == (consecutive == first_label_c)).all()
111
112 # Now in-place
113 orig = a.copy()
114 consecutive, maxlabel, mapping = vigra.analysis.relabelConsecutive(a, start, out=a)
115 assert consecutive is a
116 assert maxlabel == consecutive.max()
117 assert (vigra.analysis.applyMapping(orig, mapping) == consecutive).all()
118
119 assert (numpy.unique(a) == numpy.arange(start,start+100)).all(), \
120 "relabeled array does not have consecutive labels"
121
122 first_label_orig = orig[0,0]
123 first_label_a = a[0,0]
124 assert ((orig == first_label_orig) == (a == first_label_a)).all()
125
126 def test_relabelConsecutive():
127 _impl_relabelConsecutive(numpy.uint8)
128 _impl_relabelConsecutive(numpy.uint32)
129 _impl_relabelConsecutive(numpy.uint64)
130
131 def test_relabelConsecutive_keep_zeros():
132 a = numpy.arange(1,10, dtype=numpy.uint8)
133 a[1::2] = 0 # replace even numbers with zeros
134
135 # Check keep_zeros=True
136 consecutive, maxlabel, mapping = vigra.analysis.relabelConsecutive(a, start_label=100, keep_zeros=True)
137 assert (consecutive[1::2] == 0).all(), \
138 "Zeros were not left untouched!"
139 assert set(consecutive[0::2]) == set(range(100,100+len(consecutive[0::2]))), \
140 "Non-zero items were not correctly consecutivized!"
141 assert maxlabel == consecutive.max()
142 assert (vigra.analysis.applyMapping(a, mapping) == consecutive).all()
143
144 # Check keep_zeros=False
145 consecutive, maxlabel, mapping = vigra.analysis.relabelConsecutive(a, start_label=100, keep_zeros=False)
146 assert (numpy.unique(consecutive) == numpy.arange(100, 100+1+len(a[::2]))).all(), \
147 "relabeled array does not have consecutive labels: {}".format(consecutive)
148 assert maxlabel == consecutive.max()
149 assert (vigra.analysis.applyMapping(a, mapping) == consecutive).all()
150
36151 def ok_():
37152 print(".", file=sys.stderr)