From e5664f476cbd375f4fc53abe055f2552d8e87a02 Mon Sep 17 00:00:00 2001 From: oskar-maier-ms Date: Fri, 15 Dec 2023 11:51:02 +0000 Subject: [PATCH 1/3] Introduced pre-commit hooks --- .pre-commit-config.yaml | 33 +++++++++++++++++++++++++++++++++ README.md | 6 ++++++ 2 files changed, 39 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..c44cabe6 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,33 @@ +default_stages: [commit] +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-added-large-files + - id: check-merge-conflict + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace + - id: debug-statements + + - repo: https://github.com/pycqa/isort + rev: "5.13.2" + hooks: + - id: isort + args: ["--profile", "black", "--line-length=88"] + + - repo: https://github.com/psf/black + rev: 23.12.0 + hooks: + - id: black + + - repo: https://github.com/hadialqattan/pycln + rev: "v2.4.0" + hooks: + - id: pycln + args: [--all] + + - repo: https://github.com/Yelp/detect-secrets + rev: v1.4.0 + hooks: + - id: detect-secrets diff --git a/README.md b/README.md index 64fc6121..ced727e3 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,12 @@ MedPy is an image processing library and collection of scripts targeted towards - Download (development version): https://github.com/loli/medpy - HTML documentation and installation instruction (development version): create this from doc/ folder following instructions in contained README file +## Contribute + +- Clone `master` branch from [github](https://github.com/loli/medpy) +- Install [pre-commit] hooks +- Submit your change as a PR request + ## Python 2 version Python 2 is no longer supported. But you can still use the older releases `<=0.3.0`. From 1a2ae6e8b87a197bf76e950c02c55b56fa6cff4c Mon Sep 17 00:00:00 2001 From: oskar-maier-ms Date: Fri, 15 Dec 2023 11:55:57 +0000 Subject: [PATCH 2/3] Remove travis build plan --- .travis.yml | 28 ---------------------------- 1 file changed, 28 deletions(-) delete mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 88a94f2a..00000000 --- a/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -# config for travis-ci -language: python -sudo: false -dist: trusty -python: - - "2.7" - - "3.4" - - "3.5" - - "3.6" -install: - # gcut version - - "ls /usr/lib/x86_64-linux-gnu/libboost_*" - - "pip install -r requirements-dev.txt" - - "pip install -v -e ." -script: true -addons: - apt: - packages: - - build-essential - - libboost-python-dev -cache: -- apt -- directories: - - "$HOME/.cache/pip" -script: - - nosetests tests/filter_/ -# - nosetests tests/io_/ -# - nosetests tests/features_/ From 292b68dbd84cd47a2850b2ed7339eee696fea29d Mon Sep 17 00:00:00 2001 From: oskar-maier-ms Date: Fri, 15 Dec 2023 12:23:06 +0000 Subject: [PATCH 3/3] Code formatting and cleaning. --- .gitignore | 39 +- .pre-commit-config.yaml | 3 +- README_PYPI.md | 2 +- bin/medpy_anisotropic_diffusion.py | 104 ++- bin/medpy_apparent_diffusion_coefficient.py | 109 ++- bin/medpy_binary_resampling.py | 112 ++- bin/medpy_convert.py | 65 +- bin/medpy_create_empty_volume_by_example.py | 54 +- bin/medpy_dicom_slices_to_volume.py | 65 +- bin/medpy_dicom_to_4D.py | 114 ++- bin/medpy_diff.py | 86 +- bin/medpy_extract_contour.py | 105 ++- bin/medpy_extract_min_max.py | 89 +- bin/medpy_extract_sub_volume.py | 139 ++- bin/medpy_extract_sub_volume_auto.py | 159 ++-- bin/medpy_extract_sub_volume_by_example.py | 162 ++-- bin/medpy_fit_into_shape.py | 84 +- bin/medpy_gradient.py | 79 +- bin/medpy_graphcut_label.py | 162 ++-- bin/medpy_graphcut_label_bgreduced.py | 205 +++-- bin/medpy_graphcut_label_w_regional.py | 199 +++-- bin/medpy_graphcut_label_wsplit.py | 113 ++- bin/medpy_graphcut_voxel.py | 210 +++-- bin/medpy_grid.py | 180 ++-- bin/medpy_info.py | 80 +- bin/medpy_intensity_range_standardization.py | 199 +++-- bin/medpy_intersection.py | 102 ++- bin/medpy_join_masks.py | 106 ++- bin/medpy_join_xd_to_xplus1d.py | 130 ++- bin/medpy_label_count.py | 65 +- bin/medpy_label_fit_to_mask.py | 62 +- bin/medpy_label_superimposition.py | 158 +++- bin/medpy_merge.py | 70 +- bin/medpy_morphology.py | 121 ++- bin/medpy_resample.py | 102 ++- bin/medpy_reslice_3d_to_4d.py | 95 ++- bin/medpy_set_pixel_spacing.py | 52 +- bin/medpy_shrink_image.py | 91 +- bin/medpy_split_xd_to_xminus1d.py | 87 +- bin/medpy_stack_sub_volumes.py | 119 ++- bin/medpy_swap_dimensions.py | 85 +- bin/medpy_watershed.py | 60 +- bin/medpy_zoom_image.py | 112 ++- doc/README | 13 +- doc/numpydoc/LICENSE.txt | 1 - doc/numpydoc/numpydoc/__init__.py | 4 +- doc/numpydoc/numpydoc/comment_eater.py | 73 +- doc/numpydoc/numpydoc/compiler_unparse.py | 245 +++--- doc/numpydoc/numpydoc/docscrape.py | 346 ++++---- doc/numpydoc/numpydoc/docscrape_sphinx.py | 200 +++-- doc/numpydoc/numpydoc/linkcode.py | 47 +- doc/numpydoc/numpydoc/numpydoc.py | 141 +-- doc/numpydoc/numpydoc/phantom_import.py | 112 ++- doc/numpydoc/numpydoc/plot_directive.py | 287 ++++--- doc/numpydoc/numpydoc/tests/test_docscrape.py | 807 ------------------ doc/numpydoc/numpydoc/tests/test_linkcode.py | 5 - .../numpydoc/tests/test_phantom_import.py | 12 - .../numpydoc/tests/test_plot_directive.py | 11 - doc/numpydoc/numpydoc/tests/test_traitsdoc.py | 11 - doc/numpydoc/numpydoc/traitsdoc.py | 107 ++- doc/numpydoc/setup.py | 17 +- doc/scipy-sphinx-theme/README.rst | 2 +- .../_theme/scipy/layout.html | 1 - .../_theme/scipy/static/js/copybutton.js | 1 - .../scipy/static/less/bootstrap/close.less | 2 +- .../scipy/static/less/bootstrap/code.less | 2 +- .../scipy/static/less/bootstrap/layouts.less | 2 +- .../scipy/static/less/bootstrap/pager.less | 2 +- .../less/bootstrap/responsive-navbar.less | 4 +- .../scipy/static/less/spc-bootstrap.less | 2 +- .../_theme/scipy/static/less/spc-content.less | 10 +- .../_theme/scipy/static/less/spc-extend.less | 2 +- .../_theme/scipy/static/less/spc-footer.less | 2 +- .../_theme/scipy/static/less/spc-header.less | 6 +- .../scipy/static/less/spc-rightsidebar.less | 2 +- .../_theme/scipy/static/scipy.css_t | 2 +- doc/scipy-sphinx-theme/conf.py | 85 +- doc/scipy-sphinx-theme/index.rst | 1 - doc/scipy-sphinx-theme/test_autodoc_3.rst | 1 - doc/source/conf.py | 202 ++--- doc/source/features.rst | 1 - doc/source/filter.rst | 1 - doc/source/graphcut.rst | 1 - doc/source/index.rst | 11 +- .../information/commandline_tools_listing.rst | 12 +- doc/source/information/imageformats.rst | 4 +- doc/source/installation/asroot.rst | 11 +- doc/source/installation/asuser.rst | 12 +- doc/source/installation/conda.rst | 2 +- doc/source/installation/fastpath.rst | 1 - doc/source/installation/graphcutsupport.rst | 2 +- doc/source/io.rst | 1 - doc/source/iterators.rst | 1 - doc/source/metric.rst | 1 - doc/source/neighbours.rst | 1 - doc/source/utilities.rst | 1 - lib/maxflow/src/BUILD | 2 - lib/maxflow/src/CMakeLists.txt | 3 +- lib/maxflow/src/Jamroot | 2 +- lib/maxflow/src/block.h | 3 +- lib/maxflow/src/get_edge_test.py | 114 +-- lib/maxflow/src/graph.cpp | 26 +- lib/maxflow/src/graph.h | 82 +- lib/maxflow/src/instances.inc | 5 +- lib/maxflow/src/maxflow.cpp | 28 +- lib/maxflow/src/pythongraph.h | 1 - lib/maxflow/src/sum_edge_test.py | 115 +-- lib/maxflow/src/wrapper.cpp | 6 +- medpy/__init__.py | 2 +- medpy/core/__init__.py | 22 +- medpy/core/exceptions.py | 42 +- medpy/core/logger.py | 90 +- medpy/features/__init__.py | 50 +- medpy/features/histogram.py | 313 ++++--- medpy/features/intensity.py | 302 +++++-- medpy/features/texture.py | 158 ++-- medpy/features/utilities.py | 13 +- medpy/filter/IntensityRangeStandardization.py | 162 ++-- medpy/filter/__init__.py | 47 +- medpy/filter/binary.py | 16 +- medpy/filter/houghtransform.py | 73 +- medpy/filter/image.py | 229 +++-- medpy/filter/label.py | 92 +- medpy/filter/noise.py | 102 ++- medpy/filter/smoothing.py | 35 +- medpy/filter/utilities.py | 227 +++-- medpy/graphcut/__init__.py | 11 +- medpy/graphcut/energy_label.py | 266 +++--- medpy/graphcut/energy_voxel.py | 323 +++---- medpy/graphcut/generate.py | 257 +++--- medpy/graphcut/graph.py | 289 ++++--- medpy/graphcut/wrapper.py | 207 +++-- medpy/graphcut/write.py | 61 +- medpy/io/__init__.py | 26 +- medpy/io/header.py | 114 ++- medpy/io/load.py | 39 +- medpy/io/save.py | 42 +- medpy/iterators/__init__.py | 19 +- medpy/iterators/patchwise.py | 214 +++-- medpy/metric/__init__.py | 38 +- medpy/metric/binary.py | 136 ++- medpy/metric/histogram.py | 683 ++++++++------- medpy/metric/image.py | 62 +- medpy/neighbours/__init__.py | 17 +- medpy/neighbours/knn.py | 17 +- medpy/utilities/__init__.py | 6 +- medpy/utilities/argparseu.py | 72 +- setup.py | 245 +++--- tests/__init__.py | 2 +- tests/features_/__init__.py | 8 +- tests/features_/histogram.py | 333 ++++++-- tests/features_/intensity.py | 705 +++++++++------ tests/features_/texture.py | 172 ++-- .../filter_/IntensityRangeStandardization.py | 176 ++-- tests/filter_/__init__.py | 8 +- tests/filter_/anisotropic_diffusion.py | 20 +- tests/filter_/houghtransform.py | 392 ++++++--- tests/filter_/image.py | 351 +++----- tests/filter_/utilities.py | 174 ++-- tests/graphcut_/__init__.py | 10 +- tests/graphcut_/cut.py | 205 +++-- tests/graphcut_/energy_label.py | 347 +++++--- tests/graphcut_/energy_voxel.py | 210 +++-- tests/graphcut_/graph.py | 47 +- tests/io_/__init__.py | 6 +- tests/io_/loadsave.py | 376 +++++--- tests/io_/metadata.py | 437 ++++++---- tests/metric_/histogram.py | 52 +- tests/support.py | 22 +- 169 files changed, 9844 insertions(+), 7083 deletions(-) delete mode 100644 doc/numpydoc/numpydoc/tests/test_docscrape.py delete mode 100644 doc/numpydoc/numpydoc/tests/test_linkcode.py delete mode 100644 doc/numpydoc/numpydoc/tests/test_phantom_import.py delete mode 100644 doc/numpydoc/numpydoc/tests/test_plot_directive.py delete mode 100644 doc/numpydoc/numpydoc/tests/test_traitsdoc.py diff --git a/.gitignore b/.gitignore index 4d105b96..0788d4c5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,38 +1,34 @@ TODO.txt -# Images # -########## +# Images *.nii *.mhd *.raw -# DOC dirs # -############ +# Local virtual envs +.venv/ + +# DOC dirs doc/build/ doc/generated/ doc/source/generated/ -# Notebooks dirs # -################## +# Notebooks dirs .ipynb_checkpoints -# BUILD dirs # -############## +# BUILD dirs build/ dist/ MedPy.egg-info/ -# Only locally used, temporary .py scripts. # -############################################# +# Only locally used, temporary .py scripts. _*.py !__init__.py -# Backup files # -################ +# Backup files *.bak -# Compiled source # -################### +# Compiled source *.com *.class *.dll @@ -42,8 +38,7 @@ _*.py *.pyc *.pyo -# Packages # -############ +# Packages # it's better to unpack these files and commit the raw source # git has its own built in compression methods *.7z @@ -55,29 +50,25 @@ _*.py *.tar *.zip -# Logs and databases # -###################### +# Logs and databases *.log *.sql *.sqlite -# OS generated files # -###################### +# OS generated files .DS_Store* ehthumbs.db Icon? Thumbs.db *~ -# Eclipse and PyDev project files # -################################### +# Eclipse and PyDev project files .project .pydevproject .settings/ .metadata/ -# Suggestions by GitHub for Python projects # -############################################# +# Suggestions by GitHub for Python projects # Packages *.egg *.egg-info diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c44cabe6..2d9fc208 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -25,9 +25,10 @@ repos: rev: "v2.4.0" hooks: - id: pycln - args: [--all] + args: ["--all"] - repo: https://github.com/Yelp/detect-secrets rev: v1.4.0 hooks: - id: detect-secrets + args: ["--exclude-files", ".*\\.ipynb"] diff --git a/README_PYPI.md b/README_PYPI.md index fe6abd9b..e936bd4e 100644 --- a/README_PYPI.md +++ b/README_PYPI.md @@ -123,7 +123,7 @@ The supported image file formats should include at least the following. Note tha * Analyze (plain, SPM99, SPM2) (.hdr/.img, .img.gz) * Digital Imaging and Communications in Medicine (DICOM) (.dcm, .dicom) * Digital Imaging and Communications in Medicine (DICOM) series (/) -* Nearly Raw Raster Data (Nrrd) (.nrrd, .nhdr) +* Nearly Raw Raster Data (Nrrd) (.nrrd, .nhdr) * Medical Imaging NetCDF (MINC) (.mnc, .MNC) * Guys Image Processing Lab (GIPL) (.gipl, .gipl.gz) diff --git a/bin/medpy_anisotropic_diffusion.py b/bin/medpy_anisotropic_diffusion.py index 3f96daeb..d2b32b04 100755 --- a/bin/medpy_anisotropic_diffusion.py +++ b/bin/medpy_anisotropic_diffusion.py @@ -24,15 +24,16 @@ import logging import os +from medpy.core import Logger +from medpy.filter.smoothing import anisotropic_diffusion + +# own modules +from medpy.io import get_pixel_spacing, load, save + # third-party modules # path changes -# own modules -from medpy.io import load, save, get_pixel_spacing -from medpy.core import Logger -from medpy.filter.smoothing import anisotropic_diffusion - # information __author__ = "Oskar Maier" @@ -42,61 +43,106 @@ __description__ = """ Executes gradient anisotropic diffusion filter over an image. This smoothing algorithm is edges preserving. - + Note that the images voxel-spacing will be taken into account. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # check if output image exists (will also be performed before saving, but as the smoothing might be very time intensity, a initial check can save frustration) if not args.force: if os.path.exists(args.output): - raise parser.error('The output image {} already exists.'.format(args.output)) - + raise parser.error( + "The output image {} already exists.".format(args.output) + ) + # loading image data_input, header_input = load(args.input) - + # apply the watershed - logger.info('Applying anisotropic diffusion with settings: niter={} / kappa={} / gamma={}...'.format(args.iterations, args.kappa, args.gamma)) - data_output = anisotropic_diffusion(data_input, args.iterations, args.kappa, args.gamma, get_pixel_spacing(header_input)) + logger.info( + "Applying anisotropic diffusion with settings: niter={} / kappa={} / gamma={}...".format( + args.iterations, args.kappa, args.gamma + ) + ) + data_output = anisotropic_diffusion( + data_input, + args.iterations, + args.kappa, + args.gamma, + get_pixel_spacing(header_input), + ) # save file save(data_output, args.output, header_input, args.force) - - logger.info('Successfully terminated.') + + logger.info("Successfully terminated.") + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input', help='Source volume.') - parser.add_argument('output', help='Target volume.') - parser.add_argument('-i', '--iterations', type=int, default=1, help='The number of smoothing iterations. Strong parameter.') - parser.add_argument('-k', '--kappa', type=int, default=50, help='The algorithms kappa parameter. The higher the more edges are smoothed over.') - parser.add_argument('-g', '--gamma', type=float, default=0.1, help='The algorithms gamma parameter. The higher, the stronger the plateaus between edges are smeared.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - + parser.add_argument("input", help="Source volume.") + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "-i", + "--iterations", + type=int, + default=1, + help="The number of smoothing iterations. Strong parameter.", + ) + parser.add_argument( + "-k", + "--kappa", + type=int, + default=50, + help="The algorithms kappa parameter. The higher the more edges are smoothed over.", + ) + parser.add_argument( + "-g", + "--gamma", + type=float, + default=0.1, + help="The algorithms gamma parameter. The higher, the stronger the plateaus between edges are smeared.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + return parser - + + if __name__ == "__main__": main() diff --git a/bin/medpy_apparent_diffusion_coefficient.py b/bin/medpy_apparent_diffusion_coefficient.py index 14df7088..ddfd9e20 100755 --- a/bin/medpy_apparent_diffusion_coefficient.py +++ b/bin/medpy_apparent_diffusion_coefficient.py @@ -25,17 +25,16 @@ # third-party modules import numpy -from scipy.ndimage import binary_fill_holes, binary_dilation,\ - binary_erosion - -# path changes +from scipy.ndimage import binary_dilation, binary_erosion, binary_fill_holes # own modules from medpy.core import Logger -from medpy.io import load, save, header -from medpy.filter import otsu from medpy.core.exceptions import ArgumentError +from medpy.filter import otsu from medpy.filter.binary import largest_connected_component +from medpy.io import header, load, save + +# path changes # information @@ -84,14 +83,17 @@ the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) # loading input images b0img, b0hdr = load(args.b0image) @@ -103,28 +105,42 @@ def main(): # check if image are compatible if not b0img.shape == bximg.shape: - raise ArgumentError('The input images shapes differ i.e. {} != {}.'.format(b0img.shape, bximg.shape)) + raise ArgumentError( + "The input images shapes differ i.e. {} != {}.".format( + b0img.shape, bximg.shape + ) + ) if not header.get_pixel_spacing(b0hdr) == header.get_pixel_spacing(bxhdr): - raise ArgumentError('The input images voxel spacing differs i.e. {} != {}.'.format(header.get_pixel_spacing(b0hdr), header.get_pixel_spacing(bxhdr))) + raise ArgumentError( + "The input images voxel spacing differs i.e. {} != {}.".format( + header.get_pixel_spacing(b0hdr), header.get_pixel_spacing(bxhdr) + ) + ) # check if supplied threshold value as well as the b value is above 0 if args.threshold is not None and not args.threshold >= 0: - raise ArgumentError('The supplied threshold value must be greater than 0, otherwise a division through 0 might occur.') + raise ArgumentError( + "The supplied threshold value must be greater than 0, otherwise a division through 0 might occur." + ) if not args.b > 0: - raise ArgumentError('The supplied b-value must be greater than 0.') + raise ArgumentError("The supplied b-value must be greater than 0.") # compute threshold value if not supplied if args.threshold is None: - b0thr = otsu(b0img, 32) / 4. # divide by 4 to decrease impact - bxthr = otsu(bximg, 32) / 4. + b0thr = otsu(b0img, 32) / 4.0 # divide by 4 to decrease impact + bxthr = otsu(bximg, 32) / 4.0 if 0 >= b0thr: - raise ArgumentError('The supplied b0image seems to contain negative values.') + raise ArgumentError( + "The supplied b0image seems to contain negative values." + ) if 0 >= bxthr: - raise ArgumentError('The supplied bximage seems to contain negative values.') + raise ArgumentError( + "The supplied bximage seems to contain negative values." + ) else: b0thr = bxthr = args.threshold - logger.debug('thresholds={}/{}, b-value={}'.format(b0thr, bxthr, args.b)) + logger.debug("thresholds={}/{}, b-value={}".format(b0thr, bxthr, args.b)) # threshold b0 + bx DW image to obtain a mask # b0 mask avoid division through 0, bx mask avoids a zero in the ln(x) computation @@ -135,11 +151,15 @@ def main(): mask = largest_connected_component(mask) mask = binary_dilation(mask, iterations=1) - logger.debug('excluding {} of {} voxels from the computation and setting them to zero'.format(numpy.count_nonzero(mask), numpy.prod(mask.shape))) + logger.debug( + "excluding {} of {} voxels from the computation and setting them to zero".format( + numpy.count_nonzero(mask), numpy.prod(mask.shape) + ) + ) # compute the ADC adc = numpy.zeros(b0img.shape, b0img.dtype) - adc[mask] = -1. * args.b * numpy.log(bximg[mask] / b0img[mask]) + adc[mask] = -1.0 * args.b * numpy.log(bximg[mask] / b0img[mask]) adc[adc < 0] = 0 # saving the resulting image @@ -150,20 +170,49 @@ def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('b0image', help='the diffusion weighted image required with b=0') - parser.add_argument('bximage', help='the diffusion weighted image required with b=x') - parser.add_argument('b', type=int, help='the b-value used to acquire the bx-image (i.e. x)') - parser.add_argument('output', help='the computed apparent diffusion coefficient image') - - parser.add_argument('-t', '--threshold', type=int, dest='threshold', help='set a fixed threshold for the input images to mask the computation') - - parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='verbose output') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', '--force', dest='force', action='store_true', help='overwrite existing files') + parser = argparse.ArgumentParser( + description=__description__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "b0image", help="the diffusion weighted image required with b=0" + ) + parser.add_argument( + "bximage", help="the diffusion weighted image required with b=x" + ) + parser.add_argument( + "b", type=int, help="the b-value used to acquire the bx-image (i.e. x)" + ) + parser.add_argument( + "output", help="the computed apparent diffusion coefficient image" + ) + + parser.add_argument( + "-t", + "--threshold", + type=int, + dest="threshold", + help="set a fixed threshold for the input images to mask the computation", + ) + + parser.add_argument( + "-v", "--verbose", dest="verbose", action="store_true", help="verbose output" + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + "--force", + dest="force", + action="store_true", + help="overwrite existing files", + ) return parser + if __name__ == "__main__": main() diff --git a/bin/medpy_binary_resampling.py b/bin/medpy_binary_resampling.py index 50d488fe..c5f5d90b 100755 --- a/bin/medpy_binary_resampling.py +++ b/bin/medpy_binary_resampling.py @@ -19,22 +19,21 @@ along with this program. If not, see . """ +import argparse +import logging + # build-in modules import os -import logging -import argparse # third-party modules import numpy -from scipy.ndimage import zoom -from scipy.ndimage import distance_transform_edt, binary_erosion -from scipy.ndimage import label +from scipy.ndimage import binary_erosion, distance_transform_edt, label, zoom # own modules from medpy.core import Logger -from medpy.filter import resample, bounding_box +from medpy.filter import resample +from medpy.io import header, load, save from medpy.utilities import argparseu -from medpy.io import load, save, header # information __author__ = "Oskar Maier" @@ -61,6 +60,7 @@ the LICENSE file or for details. """ + # code def main(): parser = getParser() @@ -68,8 +68,10 @@ def main(): # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) # loading input images img, hdr = load(args.input) @@ -77,44 +79,53 @@ def main(): # check spacing values if not len(args.spacing) == img.ndim: - parser.error('The image has {} dimensions, but {} spacing parameters have been supplied.'.format(img.ndim, len(args.spacing))) + parser.error( + "The image has {} dimensions, but {} spacing parameters have been supplied.".format( + img.ndim, len(args.spacing) + ) + ) # check if output image exists if not args.force: if os.path.exists(args.output): - parser.error('The output image {} already exists.'.format(args.output)) + parser.error("The output image {} already exists.".format(args.output)) - logger.debug('target voxel spacing: {}'.format(args.spacing)) + logger.debug("target voxel spacing: {}".format(args.spacing)) # determine number of required complete slices for up-sampling vs = header.get_pixel_spacing(hdr) - rcss = [int(y // x - 1) for x, y in zip(args.spacing, vs)] # TODO: For option b, remove the - 1; better: no option b, since I am rounding later anyway + rcss = [ + int(y // x - 1) for x, y in zip(args.spacing, vs) + ] # TODO: For option b, remove the - 1; better: no option b, since I am rounding later anyway # remove negatives and round up to next even number rcss = [x if x > 0 else 0 for x in rcss] rcss = [x if 0 == x % 2 else x + 1 for x in rcss] - logger.debug('intermediate slices to add per dimension: {}'.format(rcss)) + logger.debug("intermediate slices to add per dimension: {}".format(rcss)) # for each dimension requiring up-sampling, from the highest down, perform shape based slice interpolation - logger.info('Adding required slices using shape based interpolation.') + logger.info("Adding required slices using shape based interpolation.") for dim, rcs in enumerate(rcss): if rcs > 0: - logger.debug('adding {} intermediate slices to dimension {}'.format(rcs, dim)) + logger.debug( + "adding {} intermediate slices to dimension {}".format(rcs, dim) + ) img = shape_based_slice_interpolation(img, dim, rcs) - logger.debug('resulting new image shape: {}'.format(img.shape)) + logger.debug("resulting new image shape: {}".format(img.shape)) # compute and set new voxel spacing - nvs = [x / (y + 1.) for x, y in zip(vs, rcss)] + nvs = [x / (y + 1.0) for x, y in zip(vs, rcss)] header.set_pixel_spacing(hdr, nvs) - logger.debug('intermediate voxel spacing: {}'.format(nvs)) + logger.debug("intermediate voxel spacing: {}".format(nvs)) # interpolate with nearest neighbour - logger.info('Re-sampling the image with a b-spline order of {}.'.format(args.order)) - img, hdr = resample(img, hdr, args.spacing, args.order, mode='nearest') + logger.info("Re-sampling the image with a b-spline order of {}.".format(args.order)) + img, hdr = resample(img, hdr, args.spacing, args.order, mode="nearest") # saving the resulting image save(img, args.output, hdr, args.force) + def shape_based_slice_interpolation(img, dim, nslices): """ Adds `nslices` slices between all slices of the binary image `img` along dimension @@ -139,7 +150,7 @@ def shape_based_slice_interpolation(img, dim, nslices): """ # check arguments if not 0 == nslices % 2: - raise ValueError('nslices must be an even number') + raise ValueError("nslices must be an even number") out = None slicer = [slice(None)] * img.ndim @@ -168,6 +179,7 @@ def shape_based_slice_interpolation(img, dim, nslices): return out + def shape_based_slice_insertation_object_wise(sl1, sl2, dim, nslices, order=3): """ Wrapper to apply `shape_based_slice_insertation()` for each binary object @@ -185,6 +197,7 @@ def shape_based_slice_insertation_object_wise(sl1, sl2, dim, nslices, order=3): out |= _out return out + def shape_based_slice_insertation(sl1, sl2, dim, nslices, order=3): """ Insert `nslices` new slices between `sl1` and `sl2` along dimension `dim` using shape @@ -224,8 +237,8 @@ def shape_based_slice_insertation(sl1, sl2, dim, nslices, order=3): slices.append(binary_erosion(sl2, iterations=i)) slices.append(sl2) return numpy.rollaxis(numpy.asarray(slices), 0, dim + 1) - #return numpy.asarray([sl.T for sl in slices]).T - elif 0 ==numpy.count_nonzero(sl2): + # return numpy.asarray([sl.T for sl in slices]).T + elif 0 == numpy.count_nonzero(sl2): slices = [sl1] for i in range(1, nslices / 2 + 1): slices.append(binary_erosion(sl1, iterations=i)) @@ -233,7 +246,7 @@ def shape_based_slice_insertation(sl1, sl2, dim, nslices, order=3): slices.append(numpy.zeros_like(sl2)) slices.append(sl2) return numpy.rollaxis(numpy.asarray(slices), 0, dim + 1) - #return numpy.asarray([sl.T for sl in slices]).T + # return numpy.asarray([sl.T for sl in slices]).T # interpolation shape based # note: distance_transform_edt shows strange behaviour for ones-arrays @@ -244,30 +257,57 @@ def shape_based_slice_insertation(sl1, sl2, dim, nslices, order=3): slicer = slicer[:dim] + [numpy.newaxis] + slicer[dim:] out = numpy.concatenate((dt1[slicer], dt2[slicer]), axis=dim) zoom_factors = [1] * dt1.ndim - zoom_factors = zoom_factors[:dim] + [(nslices + 2)/2.] + zoom_factors[dim:] + zoom_factors = zoom_factors[:dim] + [(nslices + 2) / 2.0] + zoom_factors[dim:] out = zoom(out, zoom_factors, order=order) return out <= 0 + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." args = parser.parse_args() if args.order < 0 or args.order > 5: - parser.error('The order has to be a number between 0 and 5.') + parser.error("The order has to be a number between 0 and 5.") return args + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__description__) - parser.add_argument('input', help='the input image') - parser.add_argument('output', help='the output image') - parser.add_argument('spacing', type=argparseu.sequenceOfFloatsGt, help='the desired voxel spacing in colon-separated values, e.g. 1.2,1.2,5.0') - parser.add_argument('-o', '--order', type=int, default=0, dest='order', help='the bspline order, default is 0 (= nearest neighbour)') - - parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='verbose output') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', '--force', dest='force', action='store_true', help='overwrite existing files') + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description=__description__, + ) + parser.add_argument("input", help="the input image") + parser.add_argument("output", help="the output image") + parser.add_argument( + "spacing", + type=argparseu.sequenceOfFloatsGt, + help="the desired voxel spacing in colon-separated values, e.g. 1.2,1.2,5.0", + ) + parser.add_argument( + "-o", + "--order", + type=int, + default=0, + dest="order", + help="the bspline order, default is 0 (= nearest neighbour)", + ) + + parser.add_argument( + "-v", "--verbose", dest="verbose", action="store_true", help="verbose output" + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + "--force", + dest="force", + action="store_true", + help="overwrite existing files", + ) return parser + if __name__ == "__main__": main() diff --git a/bin/medpy_convert.py b/bin/medpy_convert.py index 966147f4..24159214 100755 --- a/bin/medpy_convert.py +++ b/bin/medpy_convert.py @@ -23,14 +23,14 @@ import argparse import logging -# third-party modules - -# path changes - # own modules from medpy.core import Logger from medpy.io import load, save +# third-party modules + +# path changes + # information __author__ = "Oskar Maier" @@ -40,47 +40,68 @@ __description__ = """ Convert an image from one format into another. The image type is determined by the file suffixes. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # load input image data_input, header_input = load(args.input) - + # eventually empty data - if args.empty: data_input.fill(False) + if args.empty: + data_input.fill(False) # save resulting volume save(data_input, args.output, header_input, args.force) - - logger.info("Successfully terminated.") - + + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input', help='Source volume.') - parser.add_argument('output', help='Target volume.') - parser.add_argument('-e', dest='empty', action='store_true', help='Instead of copying the voxel data, create an empty copy conserving all meta-data if possible.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - return parser + parser.add_argument("input", help="Source volume.") + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "-e", + dest="empty", + action="store_true", + help="Instead of copying the voxel data, create an empty copy conserving all meta-data if possible.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + return parser + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/bin/medpy_create_empty_volume_by_example.py b/bin/medpy_create_empty_volume_by_example.py index 1e11e339..484bdb9d 100755 --- a/bin/medpy_create_empty_volume_by_example.py +++ b/bin/medpy_create_empty_volume_by_example.py @@ -25,12 +25,12 @@ # third-party modules import scipy -# path changes - # own modules from medpy.core import Logger from medpy.io import load, save +# path changes + # information __author__ = "Oskar Maier" @@ -39,47 +39,63 @@ __status__ = "Release" __description__ = """ Creates an empty volume with the same attributes as the passes example image. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # loading input image input_data, input_header = load(args.example) - + # create empty volume with same attributes output_data = scipy.zeros(input_data.shape, dtype=input_data.dtype) - + # save resulting image save(output_data, args.output, input_header, args.force) - + logger.info("Successfully terminated.") - + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=argparse.RawTextHelpFormatter) - parser.add_argument('example', help='The example volume.') - parser.add_argument('output', help='Target volume.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') + parser = argparse.ArgumentParser( + description=__description__, formatter_class=argparse.RawTextHelpFormatter + ) + parser.add_argument("example", help="The example volume.") + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) return parser + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/bin/medpy_dicom_slices_to_volume.py b/bin/medpy_dicom_slices_to_volume.py index 4186ec02..9d41e75a 100755 --- a/bin/medpy_dicom_slices_to_volume.py +++ b/bin/medpy_dicom_slices_to_volume.py @@ -22,14 +22,14 @@ import argparse import logging -# third-party modules - -# path changes - # own modules from medpy.core import Logger from medpy.io import load, save +# third-party modules + +# path changes + # information __author__ = "Oskar Maier" @@ -40,49 +40,66 @@ Converts a collection of DICOM slices (a DICOM series) into a proper image volume. Note that this operation does not preserve header information. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + img, hdr = load(args.input) - + if args.spacing: - print('{}'.format(hdr.get_voxel_spacing())) + print("{}".format(hdr.get_voxel_spacing())) return 0 - - logger.debug('Resulting shape is {}.'.format(img.shape)) + + logger.debug("Resulting shape is {}.".format(img.shape)) # save resulting volume save(img, args.output, hdr, args.force) - - logger.info("Successfully terminated.") - + + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input', help='Source folder.') - parser.add_argument('output', help='Target volume.') - parser.add_argument('-s', dest='spacing', action='store_true', help='Just print spacing and exit.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - return parser + parser.add_argument("input", help="Source folder.") + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "-s", dest="spacing", action="store_true", help="Just print spacing and exit." + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + return parser + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/bin/medpy_dicom_to_4D.py b/bin/medpy_dicom_to_4D.py index b004c88a..1abb4814 100755 --- a/bin/medpy_dicom_to_4D.py +++ b/bin/medpy_dicom_to_4D.py @@ -25,12 +25,12 @@ # third-party modules import scipy -# path changes - # own modules from medpy.core import Logger -from medpy.io import load, save from medpy.core.exceptions import ArgumentError +from medpy.io import load, save + +# path changes # information @@ -43,7 +43,7 @@ The supplied target dimension parameter determines the dimension along which to split the original image and the consecutive slices parameter determines the offset after which to split. - + A typical use-case are DICOM images, which often come with the temporal and third spatial dimension stacked on top of each other. Let us assume a (5000, 200, 190) 3D image. In reality this file contains a number of 50 @@ -51,85 +51,121 @@ slices of the first dimension show the transformation of a 2D image in time. Then occurs a visible jump, when the view changes in space from the 50th to the 51th slice. The following 50 slices are the temporal transformation of this new spatial slice and then - occur another jump, and so on. - + occur another jump, and so on. + Calling this script with a target dimension of 0 (meaning the first dimension of the image containing the 5000 slices) and a consecutive slices parameter of 50 (which is used to tell how many consecutive slices belong together), will result in a 4D image of the shape (100, 50, 200, 190) containing the spatial volumes separated by an additional time dimension. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + data_3d, _ = load(args.input) - + # check parameters if args.dimension >= data_3d.ndim or args.dimension < 0: - raise ArgumentError('The image has only {} dimensions. The supplied target dimension {} exceeds this number.'.format( - data_3d.ndim, - args.dimension)) + raise ArgumentError( + "The image has only {} dimensions. The supplied target dimension {} exceeds this number.".format( + data_3d.ndim, args.dimension + ) + ) if not 0 == data_3d.shape[args.dimension] % args.offset: - raise ArgumentError('The number of slices {} in the target dimension {} of the image shape {} is not dividable by the supplied number of consecutive slices {}.'.format( - data_3d.shape[args.dimension], - args.dimension, - data_3d.shape, - args.offset)) - + raise ArgumentError( + "The number of slices {} in the target dimension {} of the image shape {} is not dividable by the supplied number of consecutive slices {}.".format( + data_3d.shape[args.dimension], + args.dimension, + data_3d.shape, + args.offset, + ) + ) + # prepare empty target volume volumes_3d = data_3d.shape[args.dimension] / args.offset shape_4d = list(data_3d.shape) shape_4d[args.dimension] = volumes_3d data_4d = scipy.zeros([args.offset] + shape_4d, dtype=data_3d.dtype) - - logger.debug('Separating {} slices into {} 3D volumes of thickness {}.'.format(data_3d.shape[args.dimension], volumes_3d, args.offset)) - + + logger.debug( + "Separating {} slices into {} 3D volumes of thickness {}.".format( + data_3d.shape[args.dimension], volumes_3d, args.offset + ) + ) + # iterate over 3D image and create sub volumes which are then added to the 4d volume for idx in range(args.offset): # collect the slices for sl in range(volumes_3d): idx_from = [slice(None), slice(None), slice(None)] - idx_from[args.dimension] = slice(idx + sl * args.offset, idx + sl * args.offset + 1) + idx_from[args.dimension] = slice( + idx + sl * args.offset, idx + sl * args.offset + 1 + ) idx_to = [slice(None), slice(None), slice(None)] - idx_to[args.dimension] = slice(sl, sl+1) - #print 'Slice {} to {}.'.format(idx_from, idx_to) + idx_to[args.dimension] = slice(sl, sl + 1) + # print 'Slice {} to {}.'.format(idx_from, idx_to) data_4d[idx][idx_to] = data_3d[idx_from] - + # flip dimensions such that the newly created is the last data_4d = scipy.swapaxes(data_4d, 0, 3) - + # save resulting 4D volume save(data_4d, args.output, False, args.force) - + logger.info("Successfully terminated.") + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=argparse.RawTextHelpFormatter) - parser.add_argument('input', help='Source directory.') - parser.add_argument('output', help='Target volume.') - parser.add_argument('dimension', type=int, help='The dimension in which to perform the cut (starting from 0).') - parser.add_argument('offset', type=int, help='How many consecutive slices belong together before a shift occurs. / The offset between the volumes.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - return parser + parser = argparse.ArgumentParser( + description=__description__, formatter_class=argparse.RawTextHelpFormatter + ) + parser.add_argument("input", help="Source directory.") + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "dimension", + type=int, + help="The dimension in which to perform the cut (starting from 0).", + ) + parser.add_argument( + "offset", + type=int, + help="How many consecutive slices belong together before a shift occurs. / The offset between the volumes.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + return parser + if __name__ == "__main__": main() diff --git a/bin/medpy_diff.py b/bin/medpy_diff.py index c3599c5b..fd40cfdd 100755 --- a/bin/medpy_diff.py +++ b/bin/medpy_diff.py @@ -18,20 +18,21 @@ You should have received a copy of the GNU General Public License along with this program. If not, see .""" -# build-in modules -import sys import argparse import logging +# build-in modules +import sys +from functools import reduce + # third-party modules import scipy -# path changes - # own modules from medpy.core import Logger from medpy.io import load -from functools import reduce + +# path changes # information @@ -41,60 +42,83 @@ __status__ = "Release" __description__ = """ Compares the pixel values of two images and gives a measure of the difference. - + Also compares the dtype and shape. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # load input image1 data_input1, _ = load(args.input1) - + # load input image2 data_input2, _ = load(args.input2) - + # compare dtype and shape - if not data_input1.dtype == data_input2.dtype: print('Dtype differs: {} to {}'.format(data_input1.dtype, data_input2.dtype)) + if not data_input1.dtype == data_input2.dtype: + print("Dtype differs: {} to {}".format(data_input1.dtype, data_input2.dtype)) if not data_input1.shape == data_input2.shape: - print('Shape differs: {} to {}'.format(data_input1.shape, data_input2.shape)) - print('The voxel content of images of different shape can not be compared. Exiting.') + print("Shape differs: {} to {}".format(data_input1.shape, data_input2.shape)) + print( + "The voxel content of images of different shape can not be compared. Exiting." + ) sys.exit(-1) - + # compare image data - voxel_total = reduce(lambda x, y: x*y, data_input1.shape) + voxel_total = reduce(lambda x, y: x * y, data_input1.shape) voxel_difference = len((data_input1 != data_input2).nonzero()[0]) if not 0 == voxel_difference: - print('Voxel differ: {} of {} total voxels'.format(voxel_difference, voxel_total)) - print('Max difference: {}'.format(scipy.absolute(data_input1 - data_input2).max())) - else: print('No other difference.') - - logger.info("Successfully terminated.") - + print( + "Voxel differ: {} of {} total voxels".format(voxel_difference, voxel_total) + ) + print( + "Max difference: {}".format(scipy.absolute(data_input1 - data_input2).max()) + ) + else: + print("No other difference.") + + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input1', help='Source volume one.') - parser.add_argument('input2', help='Source volume two.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - return parser + parser.add_argument("input1", help="Source volume one.") + parser.add_argument("input2", help="Source volume two.") + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + return parser + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/bin/medpy_extract_contour.py b/bin/medpy_extract_contour.py index 04c4de28..171dc9a0 100755 --- a/bin/medpy_extract_contour.py +++ b/bin/medpy_extract_contour.py @@ -26,15 +26,14 @@ # third-party modules import numpy -from scipy.ndimage import binary_erosion, binary_dilation,\ - generate_binary_structure - -# path changes +from scipy.ndimage import binary_dilation, binary_erosion, generate_binary_structure # own modules from medpy.core import Logger from medpy.io import load, save +# path changes + # information __author__ = "Oskar Maier" @@ -58,14 +57,17 @@ the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) # load input image data_input, header_input = load(args.input) @@ -74,56 +76,109 @@ def main(): data_input = data_input.astype(numpy.bool_) # check dimension argument - if args.dimension and (not args.dimension >= 0 or not args.dimension < data_input.ndim): - argparse.ArgumentError(args.dimension, 'Invalid dimension of {} supplied. Image has only {} dimensions.'.format(args.dimension, data_input.ndim)) + if args.dimension and ( + not args.dimension >= 0 or not args.dimension < data_input.ndim + ): + argparse.ArgumentError( + args.dimension, + "Invalid dimension of {} supplied. Image has only {} dimensions.".format( + args.dimension, data_input.ndim + ), + ) # compute erosion and dilation steps - erosions = int(math.ceil(args.width / 2.)) - dilations = int(math.floor(args.width / 2.)) - logger.debug("Performing {} erosions and {} dilations to achieve a contour of width {}.".format(erosions, dilations, args.width)) + erosions = int(math.ceil(args.width / 2.0)) + dilations = int(math.floor(args.width / 2.0)) + logger.debug( + "Performing {} erosions and {} dilations to achieve a contour of width {}.".format( + erosions, dilations, args.width + ) + ) # erode, dilate and compute contour if not args.dimension: - eroded = binary_erosion(data_input, iterations=erosions) if not 0 == erosions else data_input - dilated = binary_dilation(data_input, iterations=dilations) if not 0 == dilations else data_input + eroded = ( + binary_erosion(data_input, iterations=erosions) + if not 0 == erosions + else data_input + ) + dilated = ( + binary_dilation(data_input, iterations=dilations) + if not 0 == dilations + else data_input + ) data_output = dilated - eroded else: slicer = [slice(None)] * data_input.ndim bs_slicer = [slice(None)] * data_input.ndim data_output = numpy.zeros_like(data_input) for sl in range(data_input.shape[args.dimension]): - slicer[args.dimension] = slice(sl, sl+1) + slicer[args.dimension] = slice(sl, sl + 1) bs_slicer[args.dimension] = slice(1, 2) bs = generate_binary_structure(data_input.ndim, 1) - eroded = binary_erosion(data_input[slicer], structure=bs[bs_slicer], iterations=erosions) if not 0 == erosions else data_input[slicer] - dilated = binary_dilation(data_input[slicer], structure=bs[bs_slicer], iterations=dilations) if not 0 == dilations else data_input[slicer] + eroded = ( + binary_erosion( + data_input[slicer], structure=bs[bs_slicer], iterations=erosions + ) + if not 0 == erosions + else data_input[slicer] + ) + dilated = ( + binary_dilation( + data_input[slicer], structure=bs[bs_slicer], iterations=dilations + ) + if not 0 == dilations + else data_input[slicer] + ) data_output[slicer] = dilated - eroded - logger.debug("Contour image contains {} contour voxels.".format(numpy.count_nonzero(data_output))) + logger.debug( + "Contour image contains {} contour voxels.".format( + numpy.count_nonzero(data_output) + ) + ) # save resulting volume save(data_output, args.output, header_input, args.force) logger.info("Successfully terminated.") + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." args = parser.parse_args() if args.width <= 0: - raise argparse.ArgumentError(args.width, 'The contour width must be a positive number.') + raise argparse.ArgumentError( + args.width, "The contour width must be a positive number." + ) return args + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input', help='Source volume.') - parser.add_argument('output', help='Target volume.') - parser.add_argument('-w', '--width', dest='width', type=int, default=1, help='Width of the contour.') - parser.add_argument('--dimension', type=int, help='Extract contours only along this dimension.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') + parser.add_argument("input", help="Source volume.") + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "-w", "--width", dest="width", type=int, default=1, help="Width of the contour." + ) + parser.add_argument( + "--dimension", type=int, help="Extract contours only along this dimension." + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) return parser + if __name__ == "__main__": main() diff --git a/bin/medpy_extract_min_max.py b/bin/medpy_extract_min_max.py index 35d80b6a..43597204 100755 --- a/bin/medpy_extract_min_max.py +++ b/bin/medpy_extract_min_max.py @@ -22,17 +22,17 @@ # build-in modules import argparse import logging -import sys import os - -# third-party modules - -# path changes +import sys # own modules from medpy.core import Logger from medpy.io import load +# third-party modules + +# path changes + # information __author__ = "Oskar Maier" @@ -42,71 +42,86 @@ __description__ = """ Extracts and displays the min/max values of a number of images and prints the results to the stdout in csv format. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # build output file name - file_csv_name = args.csv + '.csv' - + file_csv_name = args.csv + ".csv" + # check if output file exists if not args.force: if os.path.exists(file_csv_name): - logger.warning('The output file {} already exists. Skipping.'.format(file_csv_name)) + logger.warning( + "The output file {} already exists. Skipping.".format(file_csv_name) + ) sys.exit(0) - + # write header line - print('image;min;max\n') - + print("image;min;max\n") + # iterate over input images for image in args.images: - # get and prepare image data - logger.info('Processing image {}...'.format(image)) + logger.info("Processing image {}...".format(image)) image_data, _ = load(image) - + # count number of labels and flag a warning if they reach the ushort border min_value = image_data.min() - max_value = image_data.max() - + max_value = image_data.max() + # count number of labels and write - print('{};{};{}\n'.format(image.split('/')[-1], min_value, max_value)) - + print("{};{};{}\n".format(image.split("/")[-1], min_value, max_value)) + sys.stdout.flush() - - logger.info('Successfully terminated.') - + + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('csv', help='The file to store the results in (\wo suffix).') - parser.add_argument('images', nargs='+', help='One or more images.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - - return parser - + parser.add_argument("csv", help="The file to store the results in (\wo suffix).") + parser.add_argument("images", nargs="+", help="One or more images.") + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + + return parser + + if __name__ == "__main__": - main() - \ No newline at end of file + main() diff --git a/bin/medpy_extract_sub_volume.py b/bin/medpy_extract_sub_volume.py index f27fc95e..dfbb16a6 100755 --- a/bin/medpy_extract_sub_volume.py +++ b/bin/medpy_extract_sub_volume.py @@ -19,22 +19,24 @@ along with this program. If not, see . """ -# build-in modules -from argparse import RawTextHelpFormatter import argparse import logging -import sys import os +import sys + +# build-in modules +from argparse import RawTextHelpFormatter # third-party modules import scipy -# path changes - # own modules from medpy.core import ArgumentError, Logger from medpy.io import load, save +# path changes + + # information __author__ = "Oskar Maier" __version__ = "r0.3.0, 2011-12-11" @@ -44,7 +46,7 @@ Takes a medical image of arbitrary dimensions and the dimensions of a sub-volume that lies inside the dimensions of this images. Extracts the sub-volume from the supplied image and saves it. - + The volume to be extracted is defined by its slices, the syntax is the same as for numpy array indexes (i.e. starting with zero-index, the first literal (x) of any x:y included and the second (y) excluded). @@ -56,90 +58,133 @@ Note here the trailing colon. Note to take into account the input images orientation when supplying the sub-volume. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # check if output image exists if not args.force: if os.path.exists(args.output + args.image[-4:]): - logger.warning('The output file {} already exists. Breaking.'.format(args.output + args.image[-4:])) + logger.warning( + "The output file {} already exists. Breaking.".format( + args.output + args.image[-4:] + ) + ) exit(1) - + # load images image_data, image_header = load(args.image) - + # check image dimensions against sub-volume dimensions if len(image_data.shape) != len(args.volume): - logger.critical('The supplied input image is of different dimension as the sub volume requested ({} to {})'.format(len(image_data.shape), len(args.volume))) - raise ArgumentError('The supplied input image is of different dimension as the sub volume requested ({} to {})'.format(len(image_data.shape), len(args.volume))) - - # execute extraction of the sub-area - logger.info('Extracting sub-volume...') + logger.critical( + "The supplied input image is of different dimension as the sub volume requested ({} to {})".format( + len(image_data.shape), len(args.volume) + ) + ) + raise ArgumentError( + "The supplied input image is of different dimension as the sub volume requested ({} to {})".format( + len(image_data.shape), len(args.volume) + ) + ) + + # execute extraction of the sub-area + logger.info("Extracting sub-volume...") index = [slice(x[0], x[1]) for x in args.volume] volume = image_data[index] - + # check if the output image contains data if 0 == len(volume): - logger.exception('The extracted sub-volume is of zero-size. This usual means that the supplied volume coordinates and the image coordinates do not intersect. Exiting the application.') + logger.exception( + "The extracted sub-volume is of zero-size. This usual means that the supplied volume coordinates and the image coordinates do not intersect. Exiting the application." + ) sys.exit(-1) - + # squeeze extracted sub-volume for the case in which one dimensions has been eliminated volume = scipy.squeeze(volume) - - logger.debug('Extracted volume is of shape {}.'.format(volume.shape)) - + + logger.debug("Extracted volume is of shape {}.".format(volume.shape)) + # save results in same format as input image save(volume, args.output, image_header, args.force) - - logger.info('Successfully terminated.') - + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." args = parser.parse_args() # parse volume and adapt to zero-indexing try: + def _to_int_or_none(string): - if 0 == len(string): return None + if 0 == len(string): + return None return int(string) - def _to_int_or_none_double (string): - if 0 == len(string): return [None, None] - return list(map(_to_int_or_none, string.split(':'))) - args.volume = list(map(_to_int_or_none_double, args.volume.split(','))) + + def _to_int_or_none_double(string): + if 0 == len(string): + return [None, None] + return list(map(_to_int_or_none, string.split(":"))) + + args.volume = list(map(_to_int_or_none_double, args.volume.split(","))) args.volume = [(x[0], x[1]) for x in args.volume] except (ValueError, IndexError) as e: - raise ArgumentError('Maleformed volume parameter "{}", see description with -h flag.'.format(args.volume), e) + raise ArgumentError( + 'Maleformed volume parameter "{}", see description with -h flag.'.format( + args.volume + ), + e, + ) return args + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=RawTextHelpFormatter) - - parser.add_argument('image', help='The source volume.') - parser.add_argument('output', help='The target volume.') - parser.add_argument('volume', help='The coordinated of the sub-volume of the images that should be extracted.\nExample: 30:59,40:67,45:75 for a 3D image.\nSee -h for more information.') - parser.add_argument('-f', dest='force', action='store_true', help='Set this flag to silently override files that exist.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - - return parser - + parser = argparse.ArgumentParser( + description=__description__, formatter_class=RawTextHelpFormatter + ) + + parser.add_argument("image", help="The source volume.") + parser.add_argument("output", help="The target volume.") + parser.add_argument( + "volume", + help="The coordinated of the sub-volume of the images that should be extracted.\nExample: 30:59,40:67,45:75 for a 3D image.\nSee -h for more information.", + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Set this flag to silently override files that exist.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + + return parser + + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/bin/medpy_extract_sub_volume_auto.py b/bin/medpy_extract_sub_volume_auto.py index 7bdee618..ec9a302f 100755 --- a/bin/medpy_extract_sub_volume_auto.py +++ b/bin/medpy_extract_sub_volume_auto.py @@ -19,20 +19,22 @@ along with this program. If not, see . """ -# build-in modules -from argparse import RawTextHelpFormatter import argparse import logging import os -# third-party modules - -# path changes +# build-in modules +from argparse import RawTextHelpFormatter # own modules from medpy.core import ArgumentError, Logger from medpy.io import load, save +# third-party modules + +# path changes + + # information __author__ = "Oskar Maier" __version__ = "r0.2.1, 2012-05-17" @@ -42,95 +44,146 @@ Takes a medical image of arbitrary dimensions and splits it into a number of sub-volumes along the supplied dimensions. The maximum size of each such created volume can be supplied. - + Note to take into account the input images orientation when supplying the cut dimension. Note that the image offsets are not preserved. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # load input image - logger.info('Loading {}...'.format(args.image)) + logger.info("Loading {}...".format(args.image)) image_data, image_header = load(args.image) - + # check if supplied cut dimension is inside the input images dimensions if args.dimension < 0 or args.dimension >= image_data.ndim: - logger.critical('The supplied cut-dimensions {} is invalid. The input image has only {} dimensions.'.format(args.dimension, image_data.ndim)) - raise ArgumentError('The supplied cut-dimensions {} is invalid. The input image has only {} dimensions.'.format(args.dimension, image_data.ndim)) - + logger.critical( + "The supplied cut-dimensions {} is invalid. The input image has only {} dimensions.".format( + args.dimension, image_data.ndim + ) + ) + raise ArgumentError( + "The supplied cut-dimensions {} is invalid. The input image has only {} dimensions.".format( + args.dimension, image_data.ndim + ) + ) + # prepare output filenames - name_output = args.output.replace('{}', '{:03d}') - + name_output = args.output.replace("{}", "{:03d}") + # determine cut lines - no_sub_volumes = image_data.shape[args.dimension] / args.maxsize + 1 # int-division is desired - slices_per_volume = image_data.shape[args.dimension] / no_sub_volumes # int-division is desired - + no_sub_volumes = ( + image_data.shape[args.dimension] / args.maxsize + 1 + ) # int-division is desired + slices_per_volume = ( + image_data.shape[args.dimension] / no_sub_volumes + ) # int-division is desired + # construct processing dict for each sub-volume processing_array = [] for i in range(no_sub_volumes): processing_array.append( - {'path': name_output.format(i+1), - 'cut': (i * slices_per_volume, (i + 1) * slices_per_volume)}) - if no_sub_volumes - 1 == i: # last volume has to have increased cut end - processing_array[i]['cut'] = (processing_array[i]['cut'][0], image_data.shape[args.dimension]) + { + "path": name_output.format(i + 1), + "cut": (i * slices_per_volume, (i + 1) * slices_per_volume), + } + ) + if no_sub_volumes - 1 == i: # last volume has to have increased cut end + processing_array[i]["cut"] = ( + processing_array[i]["cut"][0], + image_data.shape[args.dimension], + ) # construct base indexing list index = [slice(None) for _ in range(image_data.ndim)] - + # execute extraction of the sub-volumes - logger.info('Extracting sub-volumes...') + logger.info("Extracting sub-volumes...") for dic in processing_array: # check if output images exists if not args.force: - if os.path.exists(dic['path']): - logger.warning('The output file {} already exists. Skipping this volume.'.format(dic['path'])) + if os.path.exists(dic["path"]): + logger.warning( + "The output file {} already exists. Skipping this volume.".format( + dic["path"] + ) + ) continue - + # extracting sub-volume - index[args.dimension] = slice(dic['cut'][0], dic['cut'][1]) + index[args.dimension] = slice(dic["cut"][0], dic["cut"][1]) volume = image_data[index] - - logger.debug('Extracted volume is of shape {}.'.format(volume.shape)) - + + logger.debug("Extracted volume is of shape {}.".format(volume.shape)) + # saving sub-volume in same format as input image - logger.info('Saving cut {} as {}...'.format(dic['cut'], dic['path'])) - save(volume, dic['path'], image_header, args.force) - - logger.info('Successfully terminated.') + logger.info("Saving cut {} as {}...".format(dic["cut"], dic["path"])) + save(volume, dic["path"], image_header, args.force) + + logger.info("Successfully terminated.") + - def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=RawTextHelpFormatter) - - parser.add_argument('image', help='An image of arbitrary dimensions that should be split.') - parser.add_argument('output', help='Output volumes. Has to include the sequence "{}" in the place where the volume number should be placed.') - parser.add_argument('dimension', type=int, help='The dimension in which direction to split (starting from 0:x).') - parser.add_argument('maxsize', type=int, help='The produced volumes will always be smaller than this size (in terms of slices in the cut-dimension).') - parser.add_argument('-f', dest='force', action='store_true', help='Set this flag to silently override files that exist.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - - return parser - + parser = argparse.ArgumentParser( + description=__description__, formatter_class=RawTextHelpFormatter + ) + + parser.add_argument( + "image", help="An image of arbitrary dimensions that should be split." + ) + parser.add_argument( + "output", + help='Output volumes. Has to include the sequence "{}" in the place where the volume number should be placed.', + ) + parser.add_argument( + "dimension", + type=int, + help="The dimension in which direction to split (starting from 0:x).", + ) + parser.add_argument( + "maxsize", + type=int, + help="The produced volumes will always be smaller than this size (in terms of slices in the cut-dimension).", + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Set this flag to silently override files that exist.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + + return parser + + if __name__ == "__main__": - main() + main() diff --git a/bin/medpy_extract_sub_volume_by_example.py b/bin/medpy_extract_sub_volume_by_example.py index 909e24bf..4d7faba6 100755 --- a/bin/medpy_extract_sub_volume_by_example.py +++ b/bin/medpy_extract_sub_volume_by_example.py @@ -19,22 +19,24 @@ along with this program. If not, see . """ -# build-in modules -from argparse import RawTextHelpFormatter import argparse import logging -import sys import os +import sys + +# build-in modules +from argparse import RawTextHelpFormatter # third-party modules import numpy -# path changes - # own modules from medpy.core import ArgumentError, Logger from medpy.io import load, save +# path changes + + # information __author__ = "Oskar Maier" __version__ = "r0.2.0, 2011-12-11" @@ -47,111 +49,151 @@ for the extraction of a sub-volume that lies inside the dimensions of the medical images. Extracts the sub-volume from the supplied image and saves it. - + Note that both images must be of the same dimensionality, otherwise an exception is thrown. Note that the input images offset is not taken into account. Note to take into account the input images orientation. - + This is a convenience script, combining the functionalities of extract_mask_position and extract_sub_volume. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # load mask - logger.info('Loading mask {}...'.format(args.mask)) + logger.info("Loading mask {}...".format(args.mask)) mask_image, _ = load(args.mask) - + # store mask images shape for later check against the input image - mask_image_shape = mask_image.shape - + mask_image_shape = mask_image.shape + # extract the position of the foreground object in the mask image - logger.info('Extract the position of the foreground object...') + logger.info("Extract the position of the foreground object...") positions = mask_image.nonzero() - positions = [(max(0, positions[i].min() - args.offset), positions[i].max() + 1 + args.offset) - for i in range(len(positions))] # crop negative values - logger.debug('Extracted position is {}.'.format(positions)) + positions = [ + (max(0, positions[i].min() - args.offset), positions[i].max() + 1 + args.offset) + for i in range(len(positions)) + ] # crop negative values + logger.debug("Extracted position is {}.".format(positions)) # load image - logger.info('Loading image {}...'.format(args.image)) + logger.info("Loading image {}...".format(args.image)) image_data, image_header = load(args.image) - + # check if the mask image and the input image are of the same shape if mask_image_shape != image_data.shape: - raise ArgumentError('The two input images are of different shape (mask: {} and image: {}).'.format(mask_image_shape, image_data.shape)) - - # execute extraction of the sub-area - logger.info('Extracting sub-volume...') + raise ArgumentError( + "The two input images are of different shape (mask: {} and image: {}).".format( + mask_image_shape, image_data.shape + ) + ) + + # execute extraction of the sub-area + logger.info("Extracting sub-volume...") index = tuple([slice(x[0], x[1]) for x in positions]) volume = image_data[index] - + # check if the output image contains data if 0 == len(volume): - logger.exception('The extracted sub-volume is of zero-size. This usual means that the mask image contained no foreground object.') + logger.exception( + "The extracted sub-volume is of zero-size. This usual means that the mask image contained no foreground object." + ) sys.exit(0) - - logger.debug('Extracted volume is of shape {}.'.format(volume.shape)) - + + logger.debug("Extracted volume is of shape {}.".format(volume.shape)) + # get base origin of the image - origin_base = numpy.array([0] * image_data.ndim) # for backwards compatibility - + origin_base = numpy.array([0] * image_data.ndim) # for backwards compatibility + # modify the volume offset to imitate numpy behavior (e.g. wrap negative values) offset = numpy.array([x[0] for x in positions]) for i in range(0, len(offset)): - if None == offset[i]: offset[i] = 0 - offset[offset<0] += numpy.array(image_data.shape)[offset<0] # wrap around - offset[offset<0] = 0 # set negative to zero - + if None == offset[i]: + offset[i] = 0 + offset[offset < 0] += numpy.array(image_data.shape)[offset < 0] # wrap around + offset[offset < 0] = 0 # set negative to zero + # calculate final new origin origin = origin_base + offset - - logger.debug('Final origin created as {} + {} = {}.'.format(origin_base, offset, origin)) - + + logger.debug( + "Final origin created as {} + {} = {}.".format(origin_base, offset, origin) + ) + # save results in same format as input image - logger.info('Saving extracted volume...') + logger.info("Saving extracted volume...") save(volume, args.output, image_header, args.force) - - logger.info('Successfully terminated.') - + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." - args = parser.parse_args() + args = parser.parse_args() # check output image exists if override not forced if not args.force: if os.path.exists(args.output + args.image[-4:]): - raise ArgumentError('The supplied output file {} already exists. Run -f/force flag to override.'.format(args.output)) + raise ArgumentError( + "The supplied output file {} already exists. Run -f/force flag to override.".format( + args.output + ) + ) return args + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=RawTextHelpFormatter) - - parser.add_argument('image', help='The input image.') - parser.add_argument('output', help='The resulting sub-volume.') - parser.add_argument('mask', help='A mask image containing a single foreground object (non-zero).') - parser.add_argument('-o', '--offset', dest='offset', default=0, type=int, help='Set an offset by which the extracted sub-volume size should be increased in all directions.') - parser.add_argument('-f', dest='force', action='store_true', help='Set this flag to silently override files that exist.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - - return parser - + parser = argparse.ArgumentParser( + description=__description__, formatter_class=RawTextHelpFormatter + ) + + parser.add_argument("image", help="The input image.") + parser.add_argument("output", help="The resulting sub-volume.") + parser.add_argument( + "mask", help="A mask image containing a single foreground object (non-zero)." + ) + parser.add_argument( + "-o", + "--offset", + dest="offset", + default=0, + type=int, + help="Set an offset by which the extracted sub-volume size should be increased in all directions.", + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Set this flag to silently override files that exist.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + + return parser + + if __name__ == "__main__": - main() + main() diff --git a/bin/medpy_fit_into_shape.py b/bin/medpy_fit_into_shape.py index 81d89ad8..db5c0c6b 100755 --- a/bin/medpy_fit_into_shape.py +++ b/bin/medpy_fit_into_shape.py @@ -19,22 +19,19 @@ along with this program. If not, see . """ +import argparse +import logging + # build-in modules import os -import logging -import argparse # third-party modules import numpy -from scipy.ndimage import zoom -from scipy.ndimage import distance_transform_edt, binary_erosion -from scipy.ndimage import label # own modules from medpy.core import Logger -from medpy.filter import resample, bounding_box +from medpy.io import load, save from medpy.utilities import argparseu -from medpy.io import load, save, header # information __author__ = "Oskar Maier" @@ -50,9 +47,10 @@ Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see -the LICENSE file or for details. +the LICENSE file or for details. """ + # code def main(): parser = getParser() @@ -60,20 +58,26 @@ def main(): # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # loading input images img, hdr = load(args.input) - + # check shape dimensionality if not len(args.shape) == img.ndim: - parser.error('The image has {} dimensions, but {} shape parameters have been supplied.'.format(img.ndim, len(args.shape))) - + parser.error( + "The image has {} dimensions, but {} shape parameters have been supplied.".format( + img.ndim, len(args.shape) + ) + ) + # check if output image exists if not args.force and os.path.exists(args.output): - parser.error('The output image {} already exists.'.format(args.output)) - + parser.error("The output image {} already exists.".format(args.output)) + # compute required cropping and extention slicers_cut = [] slicers_extend = [] @@ -88,32 +92,52 @@ def main(): slicers_extend[-1] = slice(cutoff_left, -1 * cutoff_right) else: slicers_cut[-1] = slice(cutoff_left, -1 * cutoff_right) - + # crop original image img = img[slicers_cut] - + # create output image and place input image centered out = numpy.zeros(args.shape, img.dtype) out[slicers_extend] = img - + # saving the resulting image save(out, args.output, hdr, args.force) - + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__description__) - parser.add_argument('input', help='the input image') - parser.add_argument('output', help='the output image') - parser.add_argument('shape', type=argparseu.sequenceOfIntegersGt, help='the desired shape in colon-separated values, e.g. 255,255,32') - - parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='verbose output') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', '--force', dest='force', action='store_true', help='overwrite existing files') + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description=__description__, + ) + parser.add_argument("input", help="the input image") + parser.add_argument("output", help="the output image") + parser.add_argument( + "shape", + type=argparseu.sequenceOfIntegersGt, + help="the desired shape in colon-separated values, e.g. 255,255,32", + ) + + parser.add_argument( + "-v", "--verbose", dest="verbose", action="store_true", help="verbose output" + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + "--force", + dest="force", + action="store_true", + help="overwrite existing files", + ) return parser - + + if __name__ == "__main__": - main() + main() diff --git a/bin/medpy_gradient.py b/bin/medpy_gradient.py index 3a6cd51f..8470c57a 100755 --- a/bin/medpy_gradient.py +++ b/bin/medpy_gradient.py @@ -27,12 +27,12 @@ import scipy from scipy.ndimage import generic_gradient_magnitude, prewitt -# path changes +from medpy.core import Logger # own modules from medpy.io import load, save -from medpy.core import Logger +# path changes # information @@ -44,60 +44,77 @@ Creates a height map of the input images using the gradient magnitude filter. The pixel type of the resulting image will be float. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # laod input image data_input, header_input = load(args.input) - -# # check if output image exists -# if not args.force: -# if os.path.exists(image_gradient_name): -# logger.warning('The output image {} already exists. Skipping this step.'.format(image_gradient_name)) -# continue - + + # # check if output image exists + # if not args.force: + # if os.path.exists(image_gradient_name): + # logger.warning('The output image {} already exists. Skipping this step.'.format(image_gradient_name)) + # continue + # prepare result image data_output = scipy.zeros(data_input.shape, dtype=scipy.float32) - + # apply the gradient magnitude filter - logger.info('Computing the gradient magnitude with Prewitt operator...') - generic_gradient_magnitude(data_input, prewitt, output=data_output) # alternative to prewitt is sobel - + logger.info("Computing the gradient magnitude with Prewitt operator...") + generic_gradient_magnitude( + data_input, prewitt, output=data_output + ) # alternative to prewitt is sobel + # save resulting mask save(data_output, args.output, header_input, args.force) - - logger.info('Successfully terminated.') - + + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input', help='Source volume.') - parser.add_argument('output', help='Target volume.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - - return parser - + parser.add_argument("input", help="Source volume.") + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + + return parser + + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/bin/medpy_graphcut_label.py b/bin/medpy_graphcut_label.py index 6a921945..eb5cd496 100755 --- a/bin/medpy_graphcut_label.py +++ b/bin/medpy_graphcut_label.py @@ -19,24 +19,24 @@ along with this program. If not, see . """ -# build-in modules -from argparse import RawTextHelpFormatter import argparse import logging import os +# build-in modules +from argparse import RawTextHelpFormatter + # third-party modules import scipy -# path changes +from medpy import filter, graphcut # own modules from medpy.core import ArgumentError, Logger -from medpy.io import load, save -from medpy import graphcut -from medpy import filter from medpy.graphcut.wrapper import split_marker +from medpy.io import load, save +# path changes # information @@ -46,123 +46,167 @@ __status__ = "Release" __description__ = """ Perform a binary graph cut using Boykov's max-flow/min-cut algorithm. - + This implementation does only compute a boundary term and does not use any regional term. The desired boundary term can be selected via the --boundary argument. Depending on the selected term, an additional image has to be supplied as badditional. - + In the case of the stawiaski boundary term, this is the gradient image. In the case of the difference of means, it is the original image. - + Furthermore the algorithm requires the region map of the original image and an integer image with foreground and background markers. - + Additionally a filename for the created binary mask marking foreground and background has to be supplied. - + Note that the input images must be of the same dimensionality, otherwise an exception is thrown. Note to take into account the input images orientation. Note that the quality of the resulting segmentations depends also on the quality of the supplied markers. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # check if output image exists if not args.force: if os.path.exists(args.output): - logger.warning('The output image {} already exists. Exiting.'.format(args.output)) + logger.warning( + "The output image {} already exists. Exiting.".format(args.output) + ) exit(-1) - + # select boundary term - if args.boundary == 'stawiaski': + if args.boundary == "stawiaski": boundary_term = graphcut.energy_label.boundary_stawiaski - logger.info('Selected boundary term: stawiaski') + logger.info("Selected boundary term: stawiaski") else: boundary_term = graphcut.energy_label.boundary_difference_of_means - logger.info('Selected boundary term: difference of means') + logger.info("Selected boundary term: difference of means") # load input images region_image_data, reference_header = load(args.region) badditional_image_data, _ = load(args.badditional) markers_image_data, _ = load(args.markers) - + # split marker image into fg and bg images fgmarkers_image_data, bgmarkers_image_data = split_marker(markers_image_data) - + # check if all images dimensions are the same - if not (badditional_image_data.shape == region_image_data.shape == fgmarkers_image_data.shape == bgmarkers_image_data.shape): - logger.critical('Not all of the supplied images are of the same shape.') - raise ArgumentError('Not all of the supplied images are of the same shape.') - + if not ( + badditional_image_data.shape + == region_image_data.shape + == fgmarkers_image_data.shape + == bgmarkers_image_data.shape + ): + logger.critical("Not all of the supplied images are of the same shape.") + raise ArgumentError("Not all of the supplied images are of the same shape.") + # recompute the label ids to start from id = 1 - logger.info('Relabel input image...') + logger.info("Relabel input image...") region_image_data = filter.relabel(region_image_data) # generate graph - logger.info('Preparing graph...') - gcgraph = graphcut.graph_from_labels(region_image_data, - fgmarkers_image_data, - bgmarkers_image_data, - boundary_term = boundary_term, - boundary_term_args = (badditional_image_data)) # second is directedness of graph , 0) - - logger.info('Removing images that are not longer required from memory...') + logger.info("Preparing graph...") + gcgraph = graphcut.graph_from_labels( + region_image_data, + fgmarkers_image_data, + bgmarkers_image_data, + boundary_term=boundary_term, + boundary_term_args=(badditional_image_data), + ) # second is directedness of graph , 0) + + logger.info("Removing images that are not longer required from memory...") del fgmarkers_image_data del bgmarkers_image_data del badditional_image_data - + # execute min-cut - logger.info('Executing min-cut...') + logger.info("Executing min-cut...") maxflow = gcgraph.maxflow() - logger.debug('Maxflow is {}'.format(maxflow)) - + logger.debug("Maxflow is {}".format(maxflow)) + # apply results to the region image - logger.info('Applying results...') - mapping = [0] # no regions with id 1 exists in mapping, entry used as padding - mapping.extend([0 if gcgraph.termtype.SINK == gcgraph.what_segment(int(x) - 1) else 1 for x in scipy.unique(region_image_data)]) + logger.info("Applying results...") + mapping = [0] # no regions with id 1 exists in mapping, entry used as padding + mapping.extend( + [ + 0 if gcgraph.termtype.SINK == gcgraph.what_segment(int(x) - 1) else 1 + for x in scipy.unique(region_image_data) + ] + ) region_image_data = filter.relabel_map(region_image_data, mapping) - + # save resulting mask - save(region_image_data.astype(scipy.bool_), args.output, reference_header, args.force) + save( + region_image_data.astype(scipy.bool_), args.output, reference_header, args.force + ) + + logger.info("Successfully terminated.") - logger.info('Successfully terminated.') def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=RawTextHelpFormatter) - - parser.add_argument('badditional', help='The additional image required by the boundary term. See there for details.') - parser.add_argument('region', help='The region image of the image to segment.') - parser.add_argument('markers', help='Binary image containing the foreground (=1) and background (=2) markers.') - parser.add_argument('output', help='The output image containing the segmentation.') - parser.add_argument('--boundary', default='stawiaski', help='The boundary term to use. Note that difference of means (means) requires the original image, while stawiaski requires the gradient image of the original image to be passed to badditional.', choices=['means', 'stawiaski']) - parser.add_argument('-f', dest='force', action='store_true', help='Set this flag to silently override files that exist.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - - return parser + parser = argparse.ArgumentParser( + description=__description__, formatter_class=RawTextHelpFormatter + ) + + parser.add_argument( + "badditional", + help="The additional image required by the boundary term. See there for details.", + ) + parser.add_argument("region", help="The region image of the image to segment.") + parser.add_argument( + "markers", + help="Binary image containing the foreground (=1) and background (=2) markers.", + ) + parser.add_argument("output", help="The output image containing the segmentation.") + parser.add_argument( + "--boundary", + default="stawiaski", + help="The boundary term to use. Note that difference of means (means) requires the original image, while stawiaski requires the gradient image of the original image to be passed to badditional.", + choices=["means", "stawiaski"], + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Set this flag to silently override files that exist.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + + return parser + if __name__ == "__main__": main() diff --git a/bin/medpy_graphcut_label_bgreduced.py b/bin/medpy_graphcut_label_bgreduced.py index f354ff60..28f2ef34 100755 --- a/bin/medpy_graphcut_label_bgreduced.py +++ b/bin/medpy_graphcut_label_bgreduced.py @@ -19,26 +19,26 @@ along with this program. If not, see . """ -# build-in modules -from argparse import RawTextHelpFormatter import argparse -import logging import itertools +import logging import os +# build-in modules +from argparse import RawTextHelpFormatter + # third-party modules import scipy from scipy import ndimage -# path changes +from medpy import filter, graphcut # own modules from medpy.core import ArgumentError, Logger -from medpy.io import load, save -from medpy import graphcut -from medpy import filter from medpy.graphcut.wrapper import split_marker +from medpy.io import load, save +# path changes # information @@ -49,176 +49,223 @@ __description__ = """ !Modified version of original GC label, as reduces the volume sizes using the background markers. - + Perform a binary graph cut using Boykov's max-flow/min-cut algorithm. - + This implementation does only compute a boundary term and does not use any regional term. The desired boundary term can be selected via the --boundary argument. Depending on the selected term, an additional image has to be supplied as badditional. - + In the case of the stawiaski boundary term, this is the gradient image. In the case of the difference of means, it is the original image. - + Furthermore the algorithm requires the region map of the original image, a binary image with foreground markers and a binary image with background markers. - + Additionally a filename for the created binary mask marking foreground and background has to be supplied. - + Note that the input images must be of the same dimensionality, otherwise an exception is thrown. Note to take into account the input images orientation. Note that the quality of the resulting segmentations depends also on the quality of the supplied markers. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # check if output image exists if not args.force: if os.path.exists(args.output): - logger.warning('The output image {} already exists. Exiting.'.format(args.output)) + logger.warning( + "The output image {} already exists. Exiting.".format(args.output) + ) exit(-1) # load input images region_image_data, reference_header = load(args.region) markers_image_data, _ = load(args.markers) gradient_image_data, _ = load(args.gradient) - + # split marker image into fg and bg images - logger.info('Extracting foreground and background markers...') + logger.info("Extracting foreground and background markers...") fgmarkers_image_data, bgmarkers_image_data = split_marker(markers_image_data) - + # check if all images dimensions are the same shape - if not (gradient_image_data.shape == region_image_data.shape == fgmarkers_image_data.shape == bgmarkers_image_data.shape): - logger.critical('Not all of the supplied images are of the same shape.') - raise ArgumentError('Not all of the supplied images are of the same shape.') - + if not ( + gradient_image_data.shape + == region_image_data.shape + == fgmarkers_image_data.shape + == bgmarkers_image_data.shape + ): + logger.critical("Not all of the supplied images are of the same shape.") + raise ArgumentError("Not all of the supplied images are of the same shape.") + # collect cut objects cut_xy = __get_bg_bounding_pipe(bgmarkers_image_data) - + # cut volumes old_size = region_image_data.shape gradient_image_data = gradient_image_data[cut_xy] region_image_data = region_image_data[cut_xy] fgmarkers_image_data = fgmarkers_image_data[cut_xy] bgmarkers_image_data = bgmarkers_image_data[cut_xy] - + # recompute the label ids to start from id = 1 - logger.info('Relabel input image...') + logger.info("Relabel input image...") region_image_data = filter.relabel(region_image_data) # generate graph - logger.info('Preparing graph...') - gcgraph = graphcut.graph_from_labels(region_image_data, - fgmarkers_image_data, - bgmarkers_image_data, - boundary_term = graphcut.energy_label.boundary_stawiaski, - boundary_term_args = (gradient_image_data)) # second is directedness of graph , 0) - - logger.info('Removing images that are not longer required from memory...') + logger.info("Preparing graph...") + gcgraph = graphcut.graph_from_labels( + region_image_data, + fgmarkers_image_data, + bgmarkers_image_data, + boundary_term=graphcut.energy_label.boundary_stawiaski, + boundary_term_args=(gradient_image_data), + ) # second is directedness of graph , 0) + + logger.info("Removing images that are not longer required from memory...") del fgmarkers_image_data del bgmarkers_image_data del gradient_image_data - + # execute min-cut - logger.info('Executing min-cut...') + logger.info("Executing min-cut...") maxflow = gcgraph.maxflow() - logger.debug('Maxflow is {}'.format(maxflow)) - + logger.debug("Maxflow is {}".format(maxflow)) + # apply results to the region image - logger.info('Applying results...') - mapping = [0] # no regions with id 1 exists in mapping, entry used as padding - mapping.extend([0 if gcgraph.termtype.SINK == gcgraph.what_segment(int(x) - 1) else 1 for x in scipy.unique(region_image_data)]) + logger.info("Applying results...") + mapping = [0] # no regions with id 1 exists in mapping, entry used as padding + mapping.extend( + [ + 0 if gcgraph.termtype.SINK == gcgraph.what_segment(int(x) - 1) else 1 + for x in scipy.unique(region_image_data) + ] + ) region_image_data = filter.relabel_map(region_image_data, mapping) - + # generating final image by increasing the size again output_image_data = scipy.zeros(old_size, dtype=scipy.bool_) output_image_data[cut_xy] = region_image_data - + # save resulting mask save(output_image_data, args.output, reference_header, args.force) - logger.info('Successfully terminated.') + logger.info("Successfully terminated.") + def __get_bg_bounding_pipe(bgmarkers): # constants xdim = 0 ydim = 1 - + # compute biggest bb in direction bb = __xd_iterator_pass_on(bgmarkers, (xdim, ydim), __extract_bbox) - + slicer = [slice(None)] * bgmarkers.ndim slicer[xdim] = bb[0] slicer[ydim] = bb[1] - + return slicer - - + + def __xd_iterator_pass_on(arr, view, fun): """ Like xd_iterator, but the fun return values are always passed on to the next and only the last returned. """ # create list of iterations - iterations = [[None] if dim in view else list(range(arr.shape[dim])) for dim in range(arr.ndim)] - + iterations = [ + [None] if dim in view else list(range(arr.shape[dim])) + for dim in range(arr.ndim) + ] + # iterate, create slicer, execute function and collect results passon = None for indices in itertools.product(*iterations): - slicer = [slice(None) if idx is None else slice(idx, idx + 1) for idx in indices] + slicer = [ + slice(None) if idx is None else slice(idx, idx + 1) for idx in indices + ] passon = fun(scipy.squeeze(arr[slicer]), passon) - + return passon - + + def __extract_bbox(arr, bb_old): "Extracts the bounding box of an binary objects hole (assuming only one in existence)." - hole = ndimage.binary_fill_holes(arr)- arr - bb_list = ndimage.find_objects(ndimage.binary_dilation(hole, iterations = 1)) - if 0 == len(bb_list): return bb_old - else: bb = bb_list[0] - - if not bb_old: return list(bb) - + hole = ndimage.binary_fill_holes(arr) - arr + bb_list = ndimage.find_objects(ndimage.binary_dilation(hole, iterations=1)) + if 0 == len(bb_list): + return bb_old + else: + bb = bb_list[0] + + if not bb_old: + return list(bb) + for i in range(len(bb_old)): - bb_old[i] = slice(min(bb_old[i].start, bb[i].start), - max(bb_old[i].stop, bb[i].stop)) + bb_old[i] = slice( + min(bb_old[i].start, bb[i].start), max(bb_old[i].stop, bb[i].stop) + ) return bb_old + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=RawTextHelpFormatter) - - parser.add_argument('gradient', help='The gradient magnitude image of the image to segment.') - parser.add_argument('region', help='The region image of the image to segment.') - parser.add_argument('markers', help='Binary image containing the foreground (=1) and background (=2) markers.') - parser.add_argument('output', help='The output image containing the segmentation.') - parser.add_argument('-f', dest='force', action='store_true', help='Set this flag to silently override files that exist.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - - return parser + parser = argparse.ArgumentParser( + description=__description__, formatter_class=RawTextHelpFormatter + ) + + parser.add_argument( + "gradient", help="The gradient magnitude image of the image to segment." + ) + parser.add_argument("region", help="The region image of the image to segment.") + parser.add_argument( + "markers", + help="Binary image containing the foreground (=1) and background (=2) markers.", + ) + parser.add_argument("output", help="The output image containing the segmentation.") + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Set this flag to silently override files that exist.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + + return parser + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/bin/medpy_graphcut_label_w_regional.py b/bin/medpy_graphcut_label_w_regional.py index 3e6ae5fc..66092f41 100755 --- a/bin/medpy_graphcut_label_w_regional.py +++ b/bin/medpy_graphcut_label_w_regional.py @@ -19,24 +19,24 @@ along with this program. If not, see . """ -# build-in modules -from argparse import RawTextHelpFormatter import argparse import logging import os +# build-in modules +from argparse import RawTextHelpFormatter + # third-party modules import scipy -# path changes +from medpy import filter, graphcut # own modules from medpy.core import ArgumentError, Logger -from medpy.io import load, save -from medpy import graphcut -from medpy import filter from medpy.graphcut.wrapper import split_marker +from medpy.io import load, save +# path changes # information @@ -46,7 +46,7 @@ __status__ = "Development" __description__ = """ Perform a binary graph cut using Boykov's max-flow/min-cut algorithm. - + This implementation does not only compute a boundary term but also a regional term which. The only available implementation up till now is the use of an atalas (i.e. a probability image of float values). The @@ -54,56 +54,61 @@ probability that the object is situated at this position. The desired boundary term can be selected via the --boundary argument. Depending on the selected term, an additional image has to be supplied as badditional. - + In the case of the stawiaski boundary term, this is the gradient image. In the case of the difference of means, it is the original image. - + Furthermore the algorithm requires the region map of the original image and an integer image with foreground and background markers. - + Additionally a filename for the created binary mask marking foreground and background has to be supplied. - + Note that the input images must be of the same dimensionality, otherwise an exception is thrown. Note to take into account the input images orientation. Note that the quality of the resulting segmentations depends also on the quality of the supplied markers. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # check if output image exists if not args.force: if os.path.exists(args.output): - logger.warning('The output image {} already exists. Exiting.'.format(args.output)) + logger.warning( + "The output image {} already exists. Exiting.".format(args.output) + ) exit(-1) - + # select boundary term - if args.boundary == 'stawiaski': + if args.boundary == "stawiaski": boundary_term = graphcut.energy_label.boundary_stawiaski - logger.info('Selected boundary term: stawiaski') + logger.info("Selected boundary term: stawiaski") else: boundary_term = graphcut.energy_label.boundary_difference_of_means - logger.info('Selected boundary term: difference of means') - + logger.info("Selected boundary term: difference of means") + # select regional term - if args.regional == 'atlas': + if args.regional == "atlas": regional_term = graphcut.energy_label.regional_atlas else: regional_term = None @@ -111,84 +116,134 @@ def main(): # load input images region_image_data, reference_header = load(args.region) markers_image_data, _ = load(args.markers) - + # loading and splitting the marker image fgmarkers_image_data, bgmarkers_image_data = split_marker(markers_image_data) - + badditional_image_data, _ = load(args.badditional) - - if 'radditional' in args: + + if "radditional" in args: radditional_image_data, _ = load(args.radditional) else: radditional_image_data = False - - + # check if all images dimensions are the same - if not (badditional_image_data.shape == region_image_data.shape == fgmarkers_image_data.shape == bgmarkers_image_data.shape): - logger.critical('Not all of the supplied images are of the same shape.') - raise ArgumentError('Not all of the supplied images are of the same shape.') + if not ( + badditional_image_data.shape + == region_image_data.shape + == fgmarkers_image_data.shape + == bgmarkers_image_data.shape + ): + logger.critical("Not all of the supplied images are of the same shape.") + raise ArgumentError("Not all of the supplied images are of the same shape.") if not bool == type(radditional_image_data): if not (badditional_image_data.shape == radditional_image_data.shape): - logger.critical('Not all of the supplied images are of the same shape.') - raise ArgumentError('Not all of the supplied images are of the same shape.') - + logger.critical("Not all of the supplied images are of the same shape.") + raise ArgumentError("Not all of the supplied images are of the same shape.") + # recompute the label ids to start from id = 1 - logger.info('Relabel input image...') + logger.info("Relabel input image...") region_image_data = filter.relabel(region_image_data) # generate graph - logger.info('Preparing graph...') - gcgraph = graphcut.graph_from_labels(region_image_data, - fgmarkers_image_data, - bgmarkers_image_data, - regional_term = regional_term, - boundary_term = boundary_term, - regional_term_args = (radditional_image_data, args.alpha), - boundary_term_args = (badditional_image_data)) # second (optional) parameter is directedness of graph , 0) - - logger.info('Removing images that are not longer required from memory...') + logger.info("Preparing graph...") + gcgraph = graphcut.graph_from_labels( + region_image_data, + fgmarkers_image_data, + bgmarkers_image_data, + regional_term=regional_term, + boundary_term=boundary_term, + regional_term_args=(radditional_image_data, args.alpha), + boundary_term_args=(badditional_image_data), + ) # second (optional) parameter is directedness of graph , 0) + + logger.info("Removing images that are not longer required from memory...") del fgmarkers_image_data del bgmarkers_image_data del radditional_image_data del badditional_image_data - + # execute min-cut - logger.info('Executing min-cut...') + logger.info("Executing min-cut...") maxflow = gcgraph.maxflow() - logger.debug('Maxflow is {}'.format(maxflow)) - + logger.debug("Maxflow is {}".format(maxflow)) + # apply results to the region image - logger.info('Applying results...') - mapping = [0] # no regions with id 1 exists in mapping, entry used as padding - mapping.extend([0 if gcgraph.termtype.SINK == gcgraph.what_segment(int(x) - 1) else 1 for x in scipy.unique(region_image_data)]) + logger.info("Applying results...") + mapping = [0] # no regions with id 1 exists in mapping, entry used as padding + mapping.extend( + [ + 0 if gcgraph.termtype.SINK == gcgraph.what_segment(int(x) - 1) else 1 + for x in scipy.unique(region_image_data) + ] + ) region_image_data = filter.relabel_map(region_image_data, mapping) - + # save resulting mask - save(region_image_data.astype(scipy.bool_), args.output, reference_header, args.force) + save( + region_image_data.astype(scipy.bool_), args.output, reference_header, args.force + ) + + logger.info("Successfully terminated.") - logger.info('Successfully terminated.') def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=RawTextHelpFormatter) - - parser.add_argument('badditional', help='The additional image required by the boundary term. See there for details.') - parser.add_argument('region', help='The region image of the image to segment.') - parser.add_argument('markers', help='Binary image containing the foreground (=1) and background (=2) markers.') - parser.add_argument('output', help='The output image containing the segmentation.') - parser.add_argument('--boundary', default='stawiaski', help='The boundary term to use. Note that difference of means (means) requires the original image, while stawiaski requires the gradient image of the original image to be passed to badditional.', choices=['means', 'stawiaski']) - parser.add_argument('--regional', default='none', help='The regional term to use. Note that the atlas requires to provide an atlas image.', choices=['none', 'atlas']) - parser.add_argument('--radditional', help='The additional image required by the regional term. See there for details.') - parser.add_argument('--alpha', type=float, help='The weight of the regional term compared to the boundary term.') - parser.add_argument('-f', dest='force', action='store_true', help='Set this flag to silently override files that exist.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - - return parser + parser = argparse.ArgumentParser( + description=__description__, formatter_class=RawTextHelpFormatter + ) + + parser.add_argument( + "badditional", + help="The additional image required by the boundary term. See there for details.", + ) + parser.add_argument("region", help="The region image of the image to segment.") + parser.add_argument( + "markers", + help="Binary image containing the foreground (=1) and background (=2) markers.", + ) + parser.add_argument("output", help="The output image containing the segmentation.") + parser.add_argument( + "--boundary", + default="stawiaski", + help="The boundary term to use. Note that difference of means (means) requires the original image, while stawiaski requires the gradient image of the original image to be passed to badditional.", + choices=["means", "stawiaski"], + ) + parser.add_argument( + "--regional", + default="none", + help="The regional term to use. Note that the atlas requires to provide an atlas image.", + choices=["none", "atlas"], + ) + parser.add_argument( + "--radditional", + help="The additional image required by the regional term. See there for details.", + ) + parser.add_argument( + "--alpha", + type=float, + help="The weight of the regional term compared to the boundary term.", + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Set this flag to silently override files that exist.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + + return parser + if __name__ == "__main__": main() diff --git a/bin/medpy_graphcut_label_wsplit.py b/bin/medpy_graphcut_label_wsplit.py index 3073560c..fbf83d17 100755 --- a/bin/medpy_graphcut_label_wsplit.py +++ b/bin/medpy_graphcut_label_wsplit.py @@ -19,22 +19,21 @@ along with this program. If not, see . """ -# build-in modules -from argparse import RawTextHelpFormatter import argparse import logging import os -# third-party modules - -# path changes +# build-in modules +from argparse import RawTextHelpFormatter # own modules from medpy.core import Logger +from medpy.graphcut.wrapper import graphcut_split, graphcut_stawiaski, split_marker from medpy.io import load, save -from medpy.graphcut.wrapper import split_marker, graphcut_split,\ - graphcut_stawiaski +# third-party modules + +# path changes # information @@ -45,97 +44,123 @@ __description__ = """ !Modified version of original GC label, as splits the volumes into more handy sizes before processing them. Also uses multiple subprocesses. - + Perform a binary graph cut using Boykov's max-flow/min-cut algorithm. - + This implementation does only compute a boundary term and does not use any regional term. The desired boundary term can be selected via the --boundary argument. Depending on the selected term, an additional image has to be supplied as badditional. - + In the case of the stawiaski boundary term, this is the gradient image. In the case of the difference of means, it is the original image. - + Furthermore the algorithm requires the region map of the original image, a binary image with foreground markers and a binary image with background markers. - + Additionally a filename for the created binary mask marking foreground and background has to be supplied. - + Note that the input images must be of the same dimensionality, otherwise an exception is thrown. Note to take into account the input images orientation. Note that the quality of the resulting segmentations depends also on the quality of the supplied markers. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # check if output image exists if not args.force: if os.path.exists(args.output): - logger.warning('The output image {} already exists. Exiting.'.format(args.output)) + logger.warning( + "The output image {} already exists. Exiting.".format(args.output) + ) exit(-1) - + # constants # the minimal edge length of a subvolume-cube ! has to be of type int! minimal_edge_length = 200 overlap = 20 - + # load input images region_image_data, reference_header = load(args.region) markers_image_data, _ = load(args.markers) gradient_image_data, _ = load(args.gradient) - + # split marker image into fg and bg images fgmarkers_image_data, bgmarkers_image_data = split_marker(markers_image_data) - + # execute distributed graph cut - output_volume = graphcut_split(graphcut_stawiaski, - region_image_data, - gradient_image_data, - fgmarkers_image_data, - bgmarkers_image_data, - minimal_edge_length, - overlap) - + output_volume = graphcut_split( + graphcut_stawiaski, + region_image_data, + gradient_image_data, + fgmarkers_image_data, + bgmarkers_image_data, + minimal_edge_length, + overlap, + ) + # save resulting mask save(output_volume, args.output, reference_header, args.force) - logger.info('Successfully terminated.') + logger.info("Successfully terminated.") + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=RawTextHelpFormatter) - parser.add_argument('gradient', help='The gradient magnitude image of the image to segment.') - parser.add_argument('region', help='The region image of the image to segment.') - parser.add_argument('markers', help='Binary image containing the foreground (=1) and background (=2) markers.') - parser.add_argument('output', help='The output image containing the segmentation.') - parser.add_argument('-f', dest='force', action='store_true', help='Set this flag to silently override files that exist.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - - return parser + parser = argparse.ArgumentParser( + description=__description__, formatter_class=RawTextHelpFormatter + ) + parser.add_argument( + "gradient", help="The gradient magnitude image of the image to segment." + ) + parser.add_argument("region", help="The region image of the image to segment.") + parser.add_argument( + "markers", + help="Binary image containing the foreground (=1) and background (=2) markers.", + ) + parser.add_argument("output", help="The output image containing the segmentation.") + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Set this flag to silently override files that exist.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + + return parser + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/bin/medpy_graphcut_voxel.py b/bin/medpy_graphcut_voxel.py index dd135a6d..bd7dd93b 100755 --- a/bin/medpy_graphcut_voxel.py +++ b/bin/medpy_graphcut_voxel.py @@ -19,23 +19,24 @@ along with this program. If not, see . """ -# build-in modules -from argparse import RawTextHelpFormatter import argparse import logging import os +# build-in modules +from argparse import RawTextHelpFormatter + # third-party modules import scipy -# path changes +from medpy import graphcut # own modules from medpy.core import ArgumentError, Logger -from medpy.io import load, save, header -from medpy import graphcut from medpy.graphcut.wrapper import split_marker +from medpy.io import header, load, save +# path changes # information @@ -45,139 +46,208 @@ __status__ = "Release" __description__ = """ Perform a binary graph cut using Boykov's max-flow/min-cut algorithm. - + This implementation does only compute a boundary term and does not use any regional term. The desired boundary term can be selected via the --boundary argument. Depending on the selected term, an additional image has to be supplied as badditional. - + In the case of the difference of means, it is the original image. - + Furthermore the algorithm requires a binary image with foreground markers and a binary image with background markers. - + Additionally a filename for the created binary mask marking foreground and background has to be supplied. - + Note that the input images must be of the same dimensionality, otherwise an exception is thrown. Note to take into account the input images orientation. Note that the quality of the resulting segmentations depends also on the quality of the supplied markers. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # check if output image exists if not args.force: if os.path.exists(args.output): - logger.warning('The output image {} already exists. Exiting.'.format(args.output)) + logger.warning( + "The output image {} already exists. Exiting.".format(args.output) + ) exit(-1) - + # select boundary term - ['diff_linear', 'diff_exp', 'diff_div', 'diff_pow', 'max_linear', 'max_exp', 'max_div', 'max_pow'] - if 'diff_linear' == args.boundary: + [ + "diff_linear", + "diff_exp", + "diff_div", + "diff_pow", + "max_linear", + "max_exp", + "max_div", + "max_pow", + ] + if "diff_linear" == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_linear - logger.info('Selected boundary term: linear difference of intensities') - elif 'diff_exp' == args.boundary: + logger.info("Selected boundary term: linear difference of intensities") + elif "diff_exp" == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_exponential - logger.info('Selected boundary term: exponential difference of intensities') - elif 'diff_div' == args.boundary: + logger.info("Selected boundary term: exponential difference of intensities") + elif "diff_div" == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_division - logger.info('Selected boundary term: divided difference of intensities') - elif 'diff_pow' == args.boundary: + logger.info("Selected boundary term: divided difference of intensities") + elif "diff_pow" == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_power - logger.info('Selected boundary term: power based / raised difference of intensities') - elif 'max_linear' == args.boundary: + logger.info( + "Selected boundary term: power based / raised difference of intensities" + ) + elif "max_linear" == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_linear - logger.info('Selected boundary term: linear maximum of intensities') - elif 'max_exp' == args.boundary: + logger.info("Selected boundary term: linear maximum of intensities") + elif "max_exp" == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_exponential - logger.info('Selected boundary term: exponential maximum of intensities') - elif 'max_div' == args.boundary: + logger.info("Selected boundary term: exponential maximum of intensities") + elif "max_div" == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_division - logger.info('Selected boundary term: divided maximum of intensities') - elif 'max_pow' == args.boundary: + logger.info("Selected boundary term: divided maximum of intensities") + elif "max_pow" == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_power - logger.info('Selected boundary term: power based / raised maximum of intensities') + logger.info( + "Selected boundary term: power based / raised maximum of intensities" + ) # load input images badditional_image_data, reference_header = load(args.badditional) markers_image_data, _ = load(args.markers) - + # split marker image into fg and bg images fgmarkers_image_data, bgmarkers_image_data = split_marker(markers_image_data) - + # check if all images dimensions are the same - if not (badditional_image_data.shape == fgmarkers_image_data.shape == bgmarkers_image_data.shape): - logger.critical('Not all of the supplied images are of the same shape.') - raise ArgumentError('Not all of the supplied images are of the same shape.') + if not ( + badditional_image_data.shape + == fgmarkers_image_data.shape + == bgmarkers_image_data.shape + ): + logger.critical("Not all of the supplied images are of the same shape.") + raise ArgumentError("Not all of the supplied images are of the same shape.") # extract spacing if required if args.spacing: spacing = header.get_pixel_spacing(reference_header) - logger.info('Taking spacing of {} into account.'.format(spacing)) + logger.info("Taking spacing of {} into account.".format(spacing)) else: spacing = False # generate graph - logger.info('Preparing BK_MFMC C++ graph...') - gcgraph = graphcut.graph_from_voxels(fgmarkers_image_data, - bgmarkers_image_data, - boundary_term = boundary_term, - boundary_term_args = (badditional_image_data, args.sigma, spacing)) - + logger.info("Preparing BK_MFMC C++ graph...") + gcgraph = graphcut.graph_from_voxels( + fgmarkers_image_data, + bgmarkers_image_data, + boundary_term=boundary_term, + boundary_term_args=(badditional_image_data, args.sigma, spacing), + ) + # execute min-cut - logger.info('Executing min-cut...') + logger.info("Executing min-cut...") maxflow = gcgraph.maxflow() - logger.debug('Maxflow is {}'.format(maxflow)) - + logger.debug("Maxflow is {}".format(maxflow)) + # reshape results to form a valid mask - logger.info('Applying results...') + logger.info("Applying results...") result_image_data = scipy.zeros(bgmarkers_image_data.size, dtype=scipy.bool_) for idx in range(len(result_image_data)): - result_image_data[idx] = 0 if gcgraph.termtype.SINK == gcgraph.what_segment(idx) else 1 + result_image_data[idx] = ( + 0 if gcgraph.termtype.SINK == gcgraph.what_segment(idx) else 1 + ) result_image_data = result_image_data.reshape(bgmarkers_image_data.shape) - - # save resulting mask - save(result_image_data.astype(scipy.bool_), args.output, reference_header, args.force) - logger.info('Successfully terminated.') + # save resulting mask + save( + result_image_data.astype(scipy.bool_), args.output, reference_header, args.force + ) + + logger.info("Successfully terminated.") + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=RawTextHelpFormatter) - - parser.add_argument('sigma', type=float, help='The sigma required for the boundary terms.') - parser.add_argument('badditional', help='The additional image required by the boundary term. See there for details.') - parser.add_argument('markers', help='Image containing the foreground (=1) and background (=2) markers.') - parser.add_argument('output', help='The output image containing the segmentation.') - parser.add_argument('--boundary', default='diff_exp', help='The boundary term to use. Note that the ones prefixed with diff_ require the original image, while the ones prefixed with max_ require the gradient image.', choices=['diff_linear', 'diff_exp', 'diff_div', 'diff_pow', 'max_linear', 'max_exp', 'max_div', 'max_pow']) - parser.add_argument('-s', dest='spacing', action='store_true', help='Set this flag to take the pixel spacing of the image into account. The spacing data will be extracted from the baddtional image.') - parser.add_argument('-f', dest='force', action='store_true', help='Set this flag to silently override files that exist.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - - return parser + parser = argparse.ArgumentParser( + description=__description__, formatter_class=RawTextHelpFormatter + ) + + parser.add_argument( + "sigma", type=float, help="The sigma required for the boundary terms." + ) + parser.add_argument( + "badditional", + help="The additional image required by the boundary term. See there for details.", + ) + parser.add_argument( + "markers", + help="Image containing the foreground (=1) and background (=2) markers.", + ) + parser.add_argument("output", help="The output image containing the segmentation.") + parser.add_argument( + "--boundary", + default="diff_exp", + help="The boundary term to use. Note that the ones prefixed with diff_ require the original image, while the ones prefixed with max_ require the gradient image.", + choices=[ + "diff_linear", + "diff_exp", + "diff_div", + "diff_pow", + "max_linear", + "max_exp", + "max_div", + "max_pow", + ], + ) + parser.add_argument( + "-s", + dest="spacing", + action="store_true", + help="Set this flag to take the pixel spacing of the image into account. The spacing data will be extracted from the baddtional image.", + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Set this flag to silently override files that exist.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + + return parser + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/bin/medpy_grid.py b/bin/medpy_grid.py index 4079ae87..a9d5f0b9 100755 --- a/bin/medpy_grid.py +++ b/bin/medpy_grid.py @@ -20,20 +20,22 @@ along with this program. If not, see . """ -# build-in modules -import os import argparse import logging + +# build-in modules +import os import tempfile # third-party modules import scipy -# path changes - # own modules from medpy.core import Logger -from medpy.io import load, save, header +from medpy.io import header, load, save + +# path changes + # information __author__ = "Oskar Maier" @@ -44,21 +46,24 @@ Create an image volume containing a regular grid that can e.g. be used to visualize deformation fields. The grid volume can be generated either by supplying an example volume (-e) or by directly defining its shape (-s). - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # copy the example image or generate empty image, depending on the modus if args.example: grid_image = scipy.zeros(args.example_image.shape, scipy.bool_) @@ -68,7 +73,7 @@ def main(): # !TODO: Find another solution for this # Saving and loading image once to generate a valid header tmp_dir = tempfile.mkdtemp() - tmp_image = '{}/{}'.format(tmp_dir, args.output.split('/')[-1]) + tmp_image = "{}/{}".format(tmp_dir, args.output.split("/")[-1]) save(grid_image, tmp_image) _, grid_header = load(tmp_image) try: @@ -76,48 +81,59 @@ def main(): os.rmdir(tmp_dir) except Exception: pass - + # set the image attributes if supplied if args.pixelspacing: header.set_pixel_spacing(grid_header, args.pixelspacing) if args.offset: header.set_offset(grid_header, args.offset) - + # compute the right grid spacing for each dimension if args.real: - grid_spacing = [int(round(sp / float(ps))) for sp, ps in zip(args.spacing, header.get_pixel_spacing(grid_header))] + grid_spacing = [ + int(round(sp / float(ps))) + for sp, ps in zip(args.spacing, header.get_pixel_spacing(grid_header)) + ] else: grid_spacing = args.spacing - + # paint the grid into the empty image volume for dim in range(grid_image.ndim): - if 0 == grid_spacing[dim]: continue # skip dimension of 0 grid spacing supplied + if 0 == grid_spacing[dim]: + continue # skip dimension of 0 grid spacing supplied for offset in range(0, grid_image.shape[dim], grid_spacing[dim]): slicer = [slice(None)] * grid_image.ndim slicer[dim] = slice(offset, offset + 1) grid_image[slicer] = True - + # saving resulting grid volume save(grid_image, args.output, grid_header, args.force) - -def list_of_integers_or_int(string, separator=','): + +def list_of_integers_or_int(string, separator=","): if string.isdigit(): return int(string) return list_of_integers(string, separator) -def list_of_integers(string, separator=','): + +def list_of_integers(string, separator=","): values = string.split(separator) if not scipy.all(list(map(str.isdigit, values))): - raise argparse.ArgumentTypeError('{} is not a "{}" separated list of integers'.format(string, separator)) + raise argparse.ArgumentTypeError( + '{} is not a "{}" separated list of integers'.format(string, separator) + ) return list(map(int, values)) -def list_of_floats(string, separator=','): + +def list_of_floats(string, separator=","): values = string.split(separator) try: return list(map(float, values)) except ValueError: - raise argparse.ArgumentTypeError('{} is not a "{}" separated list of floats'.format(string, separator)) + raise argparse.ArgumentTypeError( + '{} is not a "{}" separated list of floats'.format(string, separator) + ) + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." @@ -128,25 +144,38 @@ def getArguments(parser): dimensions = args.example_image.ndim else: dimensions = len(args.shape) - + # check and, if required, modify the spacing argument if isinstance(args.spacing, int): args.spacing = [args.spacing] * dimensions elif len(args.spacing) != dimensions: - raise argparse.ArgumentTypeError('the grid spacing ({}) must contain the same number of elements as the output image has dimensions ({})'.format(','.join(map(str, args.spacing)), dimensions)) - + raise argparse.ArgumentTypeError( + "the grid spacing ({}) must contain the same number of elements as the output image has dimensions ({})".format( + ",".join(map(str, args.spacing)), dimensions + ) + ) + # check further arguments if args.offset and len(args.offset) != dimensions: - raise argparse.ArgumentTypeError('the offset ({}) must contain the same number of elements as the output image has dimensions ({})'.format(','.join(map(str, args.offset)), dimensions)) + raise argparse.ArgumentTypeError( + "the offset ({}) must contain the same number of elements as the output image has dimensions ({})".format( + ",".join(map(str, args.offset)), dimensions + ) + ) if args.pixelspacing and len(args.pixelspacing) != dimensions: - raise argparse.ArgumentTypeError('the supplied pixel spacing ({}) must contain the same number of elements as the output image has dimensions ({})'.format(','.join(map(str, args.pixelspacing)), dimensions)) - + raise argparse.ArgumentTypeError( + "the supplied pixel spacing ({}) must contain the same number of elements as the output image has dimensions ({})".format( + ",".join(map(str, args.pixelspacing)), dimensions + ) + ) + return args + def getParser(): "Creates and returns the argparse parser object." # text - epilog =""" + epilog = """ examples: %(prog)s -e example.nii grid.nii 10 Generates an empty image with the same attributes as example.nii, overlays it @@ -154,32 +183,79 @@ def getParser(): %(prog)s -e example.nii grid.nii 10,11,12 -r Same as above, but with an irregular grid and using real world coordinates (i.e. taking the voxel spacing of the image into account). - %(prog)s -s 100,200 grid.nii 10,2 -p 0.5,3 + %(prog)s -s 100,200 grid.nii 10,2 -p 0.5,3 Generates a 10x2 spaced grid in a 100x200 image with a voxel spacing of 0.5x3. - %(prog)s -s 100,100,50 grid.nii 5,5,0 + %(prog)s -s 100,100,50 grid.nii 5,5,0 Generates a 100x100x50 3D volume but fills it only with a regular 5x5 2D grid - over the first two dimensions. + over the first two dimensions. """ - + # command line argument parser - parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, - description=__description__, epilog=epilog) - parser.add_argument('output', help='Generated grid volume.') - parser.add_argument('spacing', type=list_of_integers_or_int, help='The grid spacing. Can be a single digit for regular spacing in all dimensions or a colon-separated list of N integers, where N is the number of dimension in the generated volume. To skip the grid in one dimension, simply supply a 0 for it.') - + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description=__description__, + epilog=epilog, + ) + parser.add_argument("output", help="Generated grid volume.") + parser.add_argument( + "spacing", + type=list_of_integers_or_int, + help="The grid spacing. Can be a single digit for regular spacing in all dimensions or a colon-separated list of N integers, where N is the number of dimension in the generated volume. To skip the grid in one dimension, simply supply a 0 for it.", + ) + group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('-e', '--example', dest='example', help='Option 1/2: Supply an image to create the grid volume by example (i.e. with same shape, voxel spacing and offset).') - group.add_argument('-s', '--shape', type=list_of_integers, dest='shape', help='Option 2/2: Supply a colon-separated list of integers that constitute the target volumes shape.') - - parser.add_argument('-p', '--pixel-spacing', type=list_of_floats, dest='pixelspacing', help='Set the pixel spacing of the target volume by supplying a colon-separated list of N numbers, where N is the number of dimension in the generated volume.') - parser.add_argument('-o', '--offset', type=list_of_floats, dest='offset', help='Set offset of the target volume by supplying a colon-separated list of N numbers, where N is the number of dimension in the generated volume.') - - parser.add_argument('-r', '--real', dest='real', action='store_true', help='Spacing is given in real world coordinates, rather than voxels. For this to make a difference, either the -e switch or the -p switch must be set.') - - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', '--force', dest='force', action='store_true', help='Silently override existing output images.') - return parser + group.add_argument( + "-e", + "--example", + dest="example", + help="Option 1/2: Supply an image to create the grid volume by example (i.e. with same shape, voxel spacing and offset).", + ) + group.add_argument( + "-s", + "--shape", + type=list_of_integers, + dest="shape", + help="Option 2/2: Supply a colon-separated list of integers that constitute the target volumes shape.", + ) + + parser.add_argument( + "-p", + "--pixel-spacing", + type=list_of_floats, + dest="pixelspacing", + help="Set the pixel spacing of the target volume by supplying a colon-separated list of N numbers, where N is the number of dimension in the generated volume.", + ) + parser.add_argument( + "-o", + "--offset", + type=list_of_floats, + dest="offset", + help="Set offset of the target volume by supplying a colon-separated list of N numbers, where N is the number of dimension in the generated volume.", + ) + + parser.add_argument( + "-r", + "--real", + dest="real", + action="store_true", + help="Spacing is given in real world coordinates, rather than voxels. For this to make a difference, either the -e switch or the -p switch must be set.", + ) + + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + "--force", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + return parser + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/bin/medpy_info.py b/bin/medpy_info.py index 22e56795..56a197ff 100755 --- a/bin/medpy_info.py +++ b/bin/medpy_info.py @@ -23,14 +23,15 @@ import argparse import logging +from medpy.core import Logger + +# own modules +from medpy.io import get_offset, get_pixel_spacing, load + # third-party modules # path changes -# own modules -from medpy.io import load, get_pixel_spacing, get_offset -from medpy.core import Logger - # information __author__ = "Oskar Maier" @@ -39,63 +40,78 @@ __status__ = "Release" __description__ = """ Prints information about an image volume to the command line. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # load input image input_data, input_header = load(args.input) - + # print information about the image printInfo(input_data, input_header) - - logger.info('Successfully terminated.') - + + logger.info("Successfully terminated.") + + def printInfo(data, header): # print image information - print('\nInformations obtained from image header:') - print('header type={}'.format(type(header))) + print("\nInformations obtained from image header:") + print("header type={}".format(type(header))) try: - print('voxel spacing={}'.format(get_pixel_spacing(header))) + print("voxel spacing={}".format(get_pixel_spacing(header))) except AttributeError: - print('Failed to retrieve voxel spacing.') + print("Failed to retrieve voxel spacing.") try: - print('offset={}'.format(get_offset(header))) + print("offset={}".format(get_offset(header))) except AttributeError: - print('Failed to retrieve offset.') - - print('\nInformations obtained from image array:') - print('datatype={},dimensions={},shape={}'.format(data.dtype, data.ndim, data.shape)) - print('first and last element: {} / {}'.format(data.flatten()[0], data.flatten()[-1])) - + print("Failed to retrieve offset.") + + print("\nInformations obtained from image array:") + print( + "datatype={},dimensions={},shape={}".format(data.dtype, data.ndim, data.shape) + ) + print( + "first and last element: {} / {}".format(data.flatten()[0], data.flatten()[-1]) + ) + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input', help='The image to analyse.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - - return parser - + parser.add_argument("input", help="The image to analyse.") + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + + return parser + + if __name__ == "__main__": - main() + main() diff --git a/bin/medpy_intensity_range_standardization.py b/bin/medpy_intensity_range_standardization.py index a30951b6..02e1a299 100755 --- a/bin/medpy_intensity_range_standardization.py +++ b/bin/medpy_intensity_range_standardization.py @@ -19,23 +19,24 @@ along with this program. If not, see . """ +import argparse +import logging + # build-in modules import os import pickle -import argparse -import logging # third-party modules import numpy -# path changes - # own modules from medpy.core import Logger from medpy.core.exceptions import ArgumentError +from medpy.filter import IntensityRangeStandardization from medpy.io import load, save from medpy.utilities.argparseu import sequenceOfIntegersGeAscendingStrict -from medpy.filter import IntensityRangeStandardization + +# path changes # information @@ -78,14 +79,17 @@ the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) # loading input images (as image, header pairs) images = [] @@ -103,31 +107,46 @@ def main(): # if in application mode, load the supplied model and apply it to the images if args.lmodel: - logger.info('Loading the model and transforming images...') - with open(args.lmodel, 'r') as f: + logger.info("Loading the model and transforming images...") + with open(args.lmodel, "r") as f: trained_model = pickle.load(f) if not isinstance(trained_model, IntensityRangeStandardization): - raise ArgumentError('{} does not seem to be a valid pickled instance of an IntensityRangeStandardization object'.format(args.lmodel)) - transformed_images = [trained_model.transform(i[m], surpress_mapping_check = args.ignore) for i, m in zip(images, masks)] + raise ArgumentError( + "{} does not seem to be a valid pickled instance of an IntensityRangeStandardization object".format( + args.lmodel + ) + ) + transformed_images = [ + trained_model.transform(i[m], surpress_mapping_check=args.ignore) + for i, m in zip(images, masks) + ] # in in training mode, train the model, apply it to the images and save it else: - logger.info('Training the average intensity model...') + logger.info("Training the average intensity model...") irs = IntensityRangeStandardization() - trained_model, transformed_images = irs.train_transform([i[m] for i, m in zip(images, masks)], surpress_mapping_check = args.ignore) - logger.info('Saving the trained model as {}...'.format(args.smodel)) - with open(args.smodel, 'wb') as f: - pickle.dump(trained_model, f) + trained_model, transformed_images = irs.train_transform( + [i[m] for i, m in zip(images, masks)], surpress_mapping_check=args.ignore + ) + logger.info("Saving the trained model as {}...".format(args.smodel)) + with open(args.smodel, "wb") as f: + pickle.dump(trained_model, f) # save the transformed images if args.simages: - logger.info('Saving intensity transformed images to {}...'.format(args.simages)) - for ti, i, m, h, image_name in zip(transformed_images, images, masks, headers, args.images): + logger.info("Saving intensity transformed images to {}...".format(args.simages)) + for ti, i, m, h, image_name in zip( + transformed_images, images, masks, headers, args.images + ): i[m] = ti - save(i, '{}/{}'.format(args.simages, image_name.split('/')[-1]), h, args.force) - - logger.info('Terminated.') + save( + i, + "{}/{}".format(args.simages, image_name.split("/")[-1]), + h, + args.force, + ) + logger.info("Terminated.") def getArguments(parser): @@ -136,59 +155,137 @@ def getArguments(parser): # check mutual exlusive and reaquired arguments if args.lmodel and args.smodel: - parser.error('only one of --load-model and --save-model can be supplied, as they decide on whether to apply the application or the training mode') + parser.error( + "only one of --load-model and --save-model can be supplied, as they decide on whether to apply the application or the training mode" + ) if not args.lmodel and not args.smodel: - parser.error('exactly one of --load-model or --save-model has to be supplied') + parser.error("exactly one of --load-model or --save-model has to be supplied") # application mode if args.lmodel: if not os.path.isfile(args.lmodel): - parser.error('the supplied model file {} does not exist'.format(args.lmodel)) + parser.error( + "the supplied model file {} does not exist".format(args.lmodel) + ) if not args.simages: - parser.error('--save-images must be supplied when running the application mode') + parser.error( + "--save-images must be supplied when running the application mode" + ) # training mode if args.smodel: - if not args.landmarkp in ('L2', 'L3', 'L4'): + if not args.landmarkp in ("L2", "L3", "L4"): args.landmarkp = sequenceOfIntegersGeAscendingStrict(args.landmarkp) - if not 'auto' == args.stdspace: + if not "auto" == args.stdspace: args.stdspace = sequenceOfIntegersGeAscendingStrict(args.stdspace) if not args.force and os.path.isfile(args.smodel): - parser.error('the target model file {} already exists'.format(args.smodel)) + parser.error("the target model file {} already exists".format(args.smodel)) # others if args.simages: if not os.path.isdir(args.simages): - parser.error('--save-images must be a valid directory') + parser.error("--save-images must be a valid directory") if args.masks and len(args.masks) != len(args.images): - parser.error('the same number of masks must be passed to --masks as images have been supplied') + parser.error( + "the same number of masks must be passed to --masks as images have been supplied" + ) return args + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('images', nargs='+', help='The images used for training (in the learning case) or to transform (in the transformation case)') - - apply_group = parser.add_argument_group('apply an existing model') - apply_group.add_argument('--load-model', dest='lmodel', default=False, help='Location of the pickled intensity range model to load. Activated application mode.') - - train_group = parser.add_argument_group('train a new model and save and/or apply it') - train_group.add_argument('--save-model', dest='smodel', default=False, help='Save the trained model under this name as a pickled object (should end in .pkl). Activates training mode.') - train_group.add_argument('--cutoffp', dest='cutoffp', type=sequenceOfIntegersGeAscendingStrict, default='1,99', help='Colon-separated lower and upper cut-off percentile values to exclude intensity outliers during the model training.') - train_group.add_argument('--landmarkp', dest='landmarkp', default='L4', help='The landmark percentiles, based on which to train the model. Can be L2, L3, L4 or a colon-separated, ordered list of percentiles.') - train_group.add_argument('--stdspace', dest='stdspace', default='auto', help='Two colon-separated intensity values to roughly define the average intensity space to learn. In most cases should be left set to \'auto\'') - - shared_group = parser.add_argument_group('shared arguments') - shared_group.add_argument('--save-images', dest='simages', default=False, help='Save the transformed images under this location. Required for the application mode, optional for the learning mode.') - shared_group.add_argument('--threshold', type=float, default=0, help='All voxel with an intensity > threshold are considered as foreground. Supply either this or a mask for each image.') - shared_group.add_argument('--masks', nargs='+', help='A number of binary foreground mask, one for each image. Alternative to supplying a threshold. Overrides the threshold parameter if supplied.') - shared_group.add_argument('--ignore', dest='ignore', action='store_true', help='Ignore possible loss of information during the intensity transformation. Should only be used when you know what you are doing.') - - parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='Verbose output') - parser.add_argument('-d', '--debug', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', '--force', dest='force', action='store_true', help='Overwrite existing files (both model and images)') + parser = argparse.ArgumentParser( + description=__description__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "images", + nargs="+", + help="The images used for training (in the learning case) or to transform (in the transformation case)", + ) + + apply_group = parser.add_argument_group("apply an existing model") + apply_group.add_argument( + "--load-model", + dest="lmodel", + default=False, + help="Location of the pickled intensity range model to load. Activated application mode.", + ) + + train_group = parser.add_argument_group( + "train a new model and save and/or apply it" + ) + train_group.add_argument( + "--save-model", + dest="smodel", + default=False, + help="Save the trained model under this name as a pickled object (should end in .pkl). Activates training mode.", + ) + train_group.add_argument( + "--cutoffp", + dest="cutoffp", + type=sequenceOfIntegersGeAscendingStrict, + default="1,99", + help="Colon-separated lower and upper cut-off percentile values to exclude intensity outliers during the model training.", + ) + train_group.add_argument( + "--landmarkp", + dest="landmarkp", + default="L4", + help="The landmark percentiles, based on which to train the model. Can be L2, L3, L4 or a colon-separated, ordered list of percentiles.", + ) + train_group.add_argument( + "--stdspace", + dest="stdspace", + default="auto", + help="Two colon-separated intensity values to roughly define the average intensity space to learn. In most cases should be left set to 'auto'", + ) + + shared_group = parser.add_argument_group("shared arguments") + shared_group.add_argument( + "--save-images", + dest="simages", + default=False, + help="Save the transformed images under this location. Required for the application mode, optional for the learning mode.", + ) + shared_group.add_argument( + "--threshold", + type=float, + default=0, + help="All voxel with an intensity > threshold are considered as foreground. Supply either this or a mask for each image.", + ) + shared_group.add_argument( + "--masks", + nargs="+", + help="A number of binary foreground mask, one for each image. Alternative to supplying a threshold. Overrides the threshold parameter if supplied.", + ) + shared_group.add_argument( + "--ignore", + dest="ignore", + action="store_true", + help="Ignore possible loss of information during the intensity transformation. Should only be used when you know what you are doing.", + ) + + parser.add_argument( + "-v", "--verbose", dest="verbose", action="store_true", help="Verbose output" + ) + parser.add_argument( + "-d", + "--debug", + dest="debug", + action="store_true", + help="Display debug information.", + ) + parser.add_argument( + "-f", + "--force", + dest="force", + action="store_true", + help="Overwrite existing files (both model and images)", + ) return parser + if __name__ == "__main__": main() diff --git a/bin/medpy_intersection.py b/bin/medpy_intersection.py index 33a1121c..dcbd1529 100755 --- a/bin/medpy_intersection.py +++ b/bin/medpy_intersection.py @@ -1,7 +1,7 @@ #!/usr/bin/env python """ -Extracts the intersecting parts of two volumes regarding offset and voxel-spacing. +Extracts the intersecting parts of two volumes regarding offset and voxel-spacing. Copyright (C) 2013 Oskar Maier @@ -24,15 +24,16 @@ import logging import os +from medpy.core import Logger +from medpy.filter.utilities import intersection + +# own modules +from medpy.io import header, load, save + # third-party modules # path changes -# own modules -from medpy.io import load, save, header -from medpy.core import Logger -from medpy.filter.utilities import intersection - # information __author__ = "Oskar Maier" @@ -42,72 +43,103 @@ __description__ = """ Extracts the intersecting parts of two volumes regarding offset and voxel-spacing. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # check if output image exists (will also be performed before saving, but as the smoothing might be very time intensity, a initial check can save frustration) if not args.force: if os.path.exists(args.output1): - raise parser.error('The output image {} already exists.'.format(args.output1)) + raise parser.error( + "The output image {} already exists.".format(args.output1) + ) if os.path.exists(args.output2): - raise parser.error('The output image {} already exists.'.format(args.output2)) - + raise parser.error( + "The output image {} already exists.".format(args.output2) + ) + # loading images data_input1, header_input1 = load(args.input1) data_input2, header_input2 = load(args.input2) - logger.debug('Original image sizes are {} and {}.'.format(data_input1.shape, data_input2.shape)) - + logger.debug( + "Original image sizes are {} and {}.".format( + data_input1.shape, data_input2.shape + ) + ) + # compute intersection volumes (punch) - logger.info('Computing the intersection.') - inters1, inters2, new_offset = intersection(data_input1, header_input1, data_input2, header_input2) - logger.debug('Punched images are of sizes {} and {} with new offset {}.'.format(inters1.shape, inters2.shape, new_offset)) - + logger.info("Computing the intersection.") + inters1, inters2, new_offset = intersection( + data_input1, header_input1, data_input2, header_input2 + ) + logger.debug( + "Punched images are of sizes {} and {} with new offset {}.".format( + inters1.shape, inters2.shape, new_offset + ) + ) + # check if any intersection could be found at all if 0 == inters1.size: - logger.warning('No intersection could be found between the images. Please check their meta-data e.g. with medpy_info') - + logger.warning( + "No intersection could be found between the images. Please check their meta-data e.g. with medpy_info" + ) + # update header informations header.set_offset(header_input1, new_offset) header.set_offset(header_input2, new_offset) - + # save punched images save(inters1, args.output1, header_input1, args.force) save(inters2, args.output2, header_input2, args.force) - - logger.info('Successfully terminated.') + + logger.info("Successfully terminated.") + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input1', help='First source volume.') - parser.add_argument('input2', help='Second source volume.') - parser.add_argument('output1', help='First target volume.') - parser.add_argument('output2', help='Second target volume.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - + parser.add_argument("input1", help="First source volume.") + parser.add_argument("input2", help="Second source volume.") + parser.add_argument("output1", help="First target volume.") + parser.add_argument("output2", help="Second target volume.") + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + return parser - + + if __name__ == "__main__": main() diff --git a/bin/medpy_join_masks.py b/bin/medpy_join_masks.py index 51f3a178..62da0b68 100755 --- a/bin/medpy_join_masks.py +++ b/bin/medpy_join_masks.py @@ -18,19 +18,20 @@ You should have received a copy of the GNU General Public License along with this program. If not, see .""" -# build-in modules -import sys import argparse import logging # third-party modules import numpy -# path changes - # own modules from medpy.core import Logger -from medpy.io import load, save, header +from medpy.io import header, load, save + +# build-in modules + + +# path changes # information @@ -40,76 +41,107 @@ __status__ = "Release" __description__ = """ Joins a number of binary images into a single conjunction. - + The available combinatorial operations are sum, avg, max and min. In the case of max and min, the output volumes are also binary images, in the case of sum they are uint8 and in the case of avg of type float. - + All input images must be of same shape and voxel spacing. - + WARNING: Does not consider image offset. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # load input images and cast to bool images = [] for input_ in args.inputs: t = load(input_) images.append((t[0], t[1])) - + # check if their shapes and voxel spacings are all equal s0 = images[0][0].shape if not numpy.all([i[0].shape == s0 for i in images[1:]]): - raise argparse.ArgumentError(args.input, 'At least one input image is of a different shape than the others.') + raise argparse.ArgumentError( + args.input, + "At least one input image is of a different shape than the others.", + ) vs0 = header.get_pixel_spacing(images[0][1]) if not numpy.all([header.get_pixel_spacing(i[1]) == vs0 for i in images[1:]]): - raise argparse.ArgumentError(args.input, 'At least one input image has a different voxel spacing than the others.') - + raise argparse.ArgumentError( + args.input, + "At least one input image has a different voxel spacing than the others.", + ) + # execute operation - logger.debug('Executing operation {} over {} images.'.format(args.operation, len(images))) - if 'max' == args.operation: + logger.debug( + "Executing operation {} over {} images.".format(args.operation, len(images)) + ) + if "max" == args.operation: out = numpy.maximum.reduce([t[0] for t in images]) - elif 'min' == args.operation: + elif "min" == args.operation: out = numpy.minimum.reduce([t[0] for t in images]) - elif 'sum' == args.operation: + elif "sum" == args.operation: out = numpy.sum([t[0] for t in images], 0).astype(numpy.uint8) - else: # avg + else: # avg out = numpy.average([t[0] for t in images], 0).astype(numpy.float32) - + # save output save(out, args.output, images[0][1], args.force) - - logger.info("Successfully terminated.") - + + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, - description=__description__) - parser.add_argument('output', help='Target volume.') - parser.add_argument('inputs', nargs='+', help='Source volume(s).') - parser.add_argument('-o', '--operation', dest='operation', choices=['sum', 'avg', 'max', 'min'], default='avg', help='Combinatorial operation to conduct.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - return parser + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description=__description__, + ) + parser.add_argument("output", help="Target volume.") + parser.add_argument("inputs", nargs="+", help="Source volume(s).") + parser.add_argument( + "-o", + "--operation", + dest="operation", + choices=["sum", "avg", "max", "min"], + default="avg", + help="Combinatorial operation to conduct.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + return parser + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/bin/medpy_join_xd_to_xplus1d.py b/bin/medpy_join_xd_to_xplus1d.py index 004d13be..dd0a7b98 100755 --- a/bin/medpy_join_xd_to_xplus1d.py +++ b/bin/medpy_join_xd_to_xplus1d.py @@ -21,19 +21,20 @@ # build-in modules import argparse -from argparse import RawTextHelpFormatter import logging +from argparse import RawTextHelpFormatter # third-party modules import scipy -# path changes - -# own modules -from medpy.io import load, save, header from medpy.core import Logger from medpy.core.exceptions import ArgumentError +# own modules +from medpy.io import header, load, save + +# path changes + # information __author__ = "Oskar Maier" @@ -42,82 +43,135 @@ __status__ = "Release" __description__ = """ Joins a number of XD volumes into a (X+1)D volume. - + One common use is when a number of 3D volumes, each representing a moment in time, are availabel. With this script they can be joined into a proper 4D volume. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - - # load first input image as example + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + + # load first input image as example example_data, example_header = load(args.inputs[0]) - + # test if the supplied position is valid if args.position > example_data.ndim or args.position < 0: - raise ArgumentError('The supplied position for the new dimension is invalid. It has to be between 0 and {}.'.format(example_data.ndim)) - + raise ArgumentError( + "The supplied position for the new dimension is invalid. It has to be between 0 and {}.".format( + example_data.ndim + ) + ) + # prepare empty output volume - output_data = scipy.zeros([len(args.inputs)] + list(example_data.shape), dtype=example_data.dtype) - + output_data = scipy.zeros( + [len(args.inputs)] + list(example_data.shape), dtype=example_data.dtype + ) + # add first image to output volume output_data[0] = example_data - + # load input images and add to output volume for idx, image in enumerate(args.inputs[1:]): image_data, _ = load(image) if not args.ignore and image_data.dtype != example_data.dtype: - raise ArgumentError('The dtype {} of image {} differs from the one of the first image {}, which is {}.'.format(image_data.dtype, image, args.inputs[0], example_data.dtype)) + raise ArgumentError( + "The dtype {} of image {} differs from the one of the first image {}, which is {}.".format( + image_data.dtype, image, args.inputs[0], example_data.dtype + ) + ) if image_data.shape != example_data.shape: - raise ArgumentError('The shape {} of image {} differs from the one of the first image {}, which is {}.'.format(image_data.shape, image, args.inputs[0], example_data.shape)) + raise ArgumentError( + "The shape {} of image {} differs from the one of the first image {}, which is {}.".format( + image_data.shape, image, args.inputs[0], example_data.shape + ) + ) output_data[idx + 1] = image_data - + # move new dimension to the end or to target position for dim in range(output_data.ndim - 1): - if dim >= args.position: break + if dim >= args.position: + break output_data = scipy.swapaxes(output_data, dim, dim + 1) - + # set pixel spacing spacing = list(header.get_pixel_spacing(example_header)) - spacing = tuple(spacing[:args.position] + [args.spacing] + spacing[args.position:]) + spacing = tuple( + spacing[: args.position] + [args.spacing] + spacing[args.position :] + ) example_header.set_voxel_spacing(spacing) - + # save created volume save(output_data, args.output, example_header, args.force) - + logger.info("Successfully terminated.") - + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=RawTextHelpFormatter) - parser.add_argument('output', help='Target volume.') - parser.add_argument('inputs', nargs='+', help='Source volumes of same shape and dtype.') - parser.add_argument('-s', dest='spacing', type=float, default=1, help='The voxel spacing of the newly created dimension. Default is 1.') - parser.add_argument('-p', dest='position', type=int, default=0, help='The position where to put the new dimension starting from 0. Standard behaviour is to place it in the first position.') - parser.add_argument('-i', dest='ignore', action='store_true', help='Ignore if the images datatypes differ.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - return parser + parser = argparse.ArgumentParser( + description=__description__, formatter_class=RawTextHelpFormatter + ) + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "inputs", nargs="+", help="Source volumes of same shape and dtype." + ) + parser.add_argument( + "-s", + dest="spacing", + type=float, + default=1, + help="The voxel spacing of the newly created dimension. Default is 1.", + ) + parser.add_argument( + "-p", + dest="position", + type=int, + default=0, + help="The position where to put the new dimension starting from 0. Standard behaviour is to place it in the first position.", + ) + parser.add_argument( + "-i", + dest="ignore", + action="store_true", + help="Ignore if the images datatypes differ.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + return parser + if __name__ == "__main__": main() diff --git a/bin/medpy_label_count.py b/bin/medpy_label_count.py index a7439dfd..0f879903 100755 --- a/bin/medpy_label_count.py +++ b/bin/medpy_label_count.py @@ -26,11 +26,12 @@ # third-party modules import numpy -# path changes +from medpy.core import Logger # own modules from medpy.io import load -from medpy.core import Logger + +# path changes # information @@ -41,58 +42,66 @@ __description__ = """ Counts the regions in a number of label images and prints the results to the stdout in csv syntax. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # write header line - print('image;labels\n') - + print("image;labels\n") + # iterate over input images for image in args.images: - # get and prepare image data - logger.info('Processing image {}...'.format(image)) + logger.info("Processing image {}...".format(image)) image_data, _ = load(image) - + # count number of labels and flag a warning if they reach the ushort border - count = len(numpy.unique(image_data)) - + count = len(numpy.unique(image_data)) + # count number of labels and write - print('{};{}\n'.format(image.split('/')[-1], count)) - + print("{};{}\n".format(image.split("/")[-1], count)) + sys.stdout.flush() - - logger.info('Successfully terminated.') - + + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('images', nargs='+', help='One or more label images.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - - return parser - + parser.add_argument("images", nargs="+", help="One or more label images.") + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + + return parser + + if __name__ == "__main__": - main() - \ No newline at end of file + main() diff --git a/bin/medpy_label_fit_to_mask.py b/bin/medpy_label_fit_to_mask.py index ef32cb35..ef81d161 100755 --- a/bin/medpy_label_fit_to_mask.py +++ b/bin/medpy_label_fit_to_mask.py @@ -27,12 +27,13 @@ # third-party modules import numpy -# path changes +from medpy.core import Logger +from medpy.filter import fit_labels_to_mask # own modules from medpy.io import load, save -from medpy.core import Logger -from medpy.filter import fit_labels_to_mask + +# path changes # information @@ -56,6 +57,7 @@ the LICENSE file or for details. """ + # code def main(): # parse cmd arguments @@ -65,49 +67,73 @@ def main(): # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) # load input image - logger.info('Loading image {}...'.format(args.input)) + logger.info("Loading image {}...".format(args.input)) image_labels_data, _ = load(args.image) # load mask image - logger.info('Loading mask {}...'.format(args.mask)) + logger.info("Loading mask {}...".format(args.mask)) image_mask_data, image_mask_data_header = load(args.mask) # check if output image exists if not args.force: if os.path.exists(args.output): - logger.warning('The output image {} already exists. Skipping this image.'.format(args.output)) + logger.warning( + "The output image {} already exists. Skipping this image.".format( + args.output + ) + ) # create a mask from the label image - logger.info('Reducing the label image...') + logger.info("Reducing the label image...") image_reduced_data = fit_labels_to_mask(image_labels_data, image_mask_data) # save resulting mask - logger.info('Saving resulting mask as {} in the same format as input mask, only with data-type int8...'.format(args.output)) - image_reduced_data = image_reduced_data.astype(numpy.bool_, copy=False) # bool sadly not recognized + logger.info( + "Saving resulting mask as {} in the same format as input mask, only with data-type int8...".format( + args.output + ) + ) + image_reduced_data = image_reduced_data.astype( + numpy.bool_, copy=False + ) # bool sadly not recognized save(image_reduced_data, args.output, image_mask_data_header, args.force) - logger.info('Successfully terminated.') + logger.info("Successfully terminated.") + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('image', nargs='+', help='The input label image.') - parser.add_argument('mask', help='The mask image to which to fit the label images.') - parser.add_argument('output', help='The output image.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') + parser.add_argument("image", nargs="+", help="The input label image.") + parser.add_argument("mask", help="The mask image to which to fit the label images.") + parser.add_argument("output", help="The output image.") + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) return parser + if __name__ == "__main__": main() diff --git a/bin/medpy_label_superimposition.py b/bin/medpy_label_superimposition.py index 6b2c01e6..32d435dd 100755 --- a/bin/medpy_label_superimposition.py +++ b/bin/medpy_label_superimposition.py @@ -19,20 +19,23 @@ along with this program. If not, see . """ -# build-in modules -from argparse import ArgumentError import argparse import logging import os +# build-in modules +from argparse import ArgumentError + # third-party modules import scipy -# path changes +from medpy.core import Logger # own modules from medpy.io import load, save -from medpy.core import Logger + +# path changes + # information __author__ = "Oskar Maier" @@ -43,94 +46,155 @@ Takes two label images as input and creates their superimposition i.e. all the regions borders are preserved and the resulting image contains more or the same number of regions as the respective input images. - + The resulting image has the same name as the first input image, just with a '_superimp' suffix. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) # build output image name - image_superimposition_name = args.folder + '/' + args.image1.split('/')[-1][:-4] + '_superimp' - image_superimposition_name += args.image1.split('/')[-1][-4:] - + image_superimposition_name = ( + args.folder + "/" + args.image1.split("/")[-1][:-4] + "_superimp" + ) + image_superimposition_name += args.image1.split("/")[-1][-4:] + # check if output image exists if not args.force: if os.path.exists(image_superimposition_name): - raise ArgumentError('The output image {} already exists. Please provide the -f/force flag, if you wish to override it.'.format(image_superimposition_name)) - + raise ArgumentError( + "The output image {} already exists. Please provide the -f/force flag, if you wish to override it.".format( + image_superimposition_name + ) + ) + # load image1 using - logger.info('Loading image {}...'.format(args.image1)) + logger.info("Loading image {}...".format(args.image1)) image1_data, image1_header = load(args.image1) - + # load image2 using - logger.info('Loading image {}...'.format(args.image2)) + logger.info("Loading image {}...".format(args.image2)) image2_data, _ = load(args.image2) - + # check input images to be valid - logger.info('Checking input images for correctness...') + logger.info("Checking input images for correctness...") if image1_data.shape != image2_data.shape: - raise ArgumentError('The two input images shape do not match with 1:{} and 2:{}'.format(image1_data.shape, image2_data.shape)) - int_types = (scipy.uint, scipy.uint0, scipy.uint8, scipy.uint16, scipy.uint32, scipy.uint64, scipy.uintc, scipy.uintp, - scipy.int_, scipy.int0, scipy.int8, scipy.int16, scipy.int32, scipy.int64, scipy.intc, scipy.intp) + raise ArgumentError( + "The two input images shape do not match with 1:{} and 2:{}".format( + image1_data.shape, image2_data.shape + ) + ) + int_types = ( + scipy.uint, + scipy.uint0, + scipy.uint8, + scipy.uint16, + scipy.uint32, + scipy.uint64, + scipy.uintc, + scipy.uintp, + scipy.int_, + scipy.int0, + scipy.int8, + scipy.int16, + scipy.int32, + scipy.int64, + scipy.intc, + scipy.intp, + ) if image1_data.dtype not in int_types: - raise ArgumentError('Input image 1 is of type {}, an int type is required.'.format(image1_data.dtype)) + raise ArgumentError( + "Input image 1 is of type {}, an int type is required.".format( + image1_data.dtype + ) + ) if image2_data.dtype not in int_types: - raise ArgumentError('Input image 2 is of type {}, an int type is required.'.format(image2_data.dtype)) - if 4294967295 < abs(image1_data.min()) + image1_data.max() + abs(image2_data.min()) + image2_data.max(): - raise ArgumentError('The input images contain so many (or not consecutive) labels, that they will not fit in a uint32 range.') - + raise ArgumentError( + "Input image 2 is of type {}, an int type is required.".format( + image2_data.dtype + ) + ) + if ( + 4294967295 + < abs(image1_data.min()) + + image1_data.max() + + abs(image2_data.min()) + + image2_data.max() + ): + raise ArgumentError( + "The input images contain so many (or not consecutive) labels, that they will not fit in a uint32 range." + ) + # create superimposition of the two label images - logger.info('Creating superimposition image...') + logger.info("Creating superimposition image...") image_superimposition_data = scipy.zeros(image1_data.shape, dtype=scipy.uint32) translation = {} label_id_counter = 0 for x in range(image1_data.shape[0]): for y in range(image1_data.shape[1]): for z in range(image1_data.shape[2]): - label1 = image1_data[x,y,z] - label2 = image2_data[x,y,z] + label1 = image1_data[x, y, z] + label2 = image2_data[x, y, z] if not (label1, label2) in translation: translation[(label1, label2)] = label_id_counter label_id_counter += 1 - image_superimposition_data[x,y,z] = translation[(label1, label2)] - + image_superimposition_data[x, y, z] = translation[(label1, label2)] + # save resulting superimposition image - logger.info('Saving superimposition image as {} in the same format as input image...'.format(image_superimposition_name)) + logger.info( + "Saving superimposition image as {} in the same format as input image...".format( + image_superimposition_name + ) + ) save(image_superimposition_data, args.output, image1_header, args.force) - - logger.info('Successfully terminated.') - + + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('image1', help='The first input label image.') - parser.add_argument('image2', help='The second input label image.') - parser.add_argument('output', help='The output image.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - - return parser - + parser.add_argument("image1", help="The first input label image.") + parser.add_argument("image2", help="The second input label image.") + parser.add_argument("output", help="The output image.") + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + + return parser + + if __name__ == "__main__": - main() + main() diff --git a/bin/medpy_merge.py b/bin/medpy_merge.py index e3c33dda..73ea129d 100755 --- a/bin/medpy_merge.py +++ b/bin/medpy_merge.py @@ -23,14 +23,14 @@ import argparse import logging -# third-party modules - -# path changes - # own modules from medpy.core import Logger from medpy.io import load, save +# third-party modules + +# path changes + # information __author__ = "Oskar Maier" @@ -39,56 +39,76 @@ __status__ = "Release" __description__ = """ Merges to images into one. - + All voxels of the first supplied image that equal False (e.g. zeros), are replaced by the corresponding voxels of the second image. - + A common use case is the merging of two marker images. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # load first input image data_input1, header_input1 = load(args.input1) - + # load second input image data_input2, _ = load(args.input2) - + # merge data_input1[data_input1 == False] += data_input2[data_input1 == False] # save resulting volume save(data_input1, args.output, header_input1, args.force) - - logger.info("Successfully terminated.") - + + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input1', help='Source volume one.') - parser.add_argument('input2', help='Source volume two.') - parser.add_argument('output', help='Target volume.') - parser.add_argument('-e', dest='empty', action='store_true', help='Instead of copying the voxel data, create an empty copy conserving all meta-data if possible.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - return parser + parser.add_argument("input1", help="Source volume one.") + parser.add_argument("input2", help="Source volume two.") + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "-e", + dest="empty", + action="store_true", + help="Instead of copying the voxel data, create an empty copy conserving all meta-data if possible.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + return parser + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/bin/medpy_morphology.py b/bin/medpy_morphology.py index 6d414b1b..6903f801 100755 --- a/bin/medpy_morphology.py +++ b/bin/medpy_morphology.py @@ -26,12 +26,12 @@ # third-party modules import scipy.ndimage -# path changes - # own modules from medpy.core import Logger from medpy.io import load, save +# path changes + # information __author__ = "Oskar Maier" @@ -40,71 +40,116 @@ __status__ = "Release" __description__ = """ Executes opening and closing morphological operations over the input image(s). - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) # load input image image_smoothed_data, image_header = load(args.input) - + # perform opening resp. closing # in 3D case: size 1 = 6-connectedness, 2 = 12-connectedness, 3 = 18-connectedness, etc. - footprint = scipy.ndimage.generate_binary_structure(image_smoothed_data.ndim, args.size) - if 'erosion' == args.type: - logger.info('Applying erosion...') - image_smoothed_data = scipy.ndimage.binary_erosion(image_smoothed_data, footprint, iterations=args.iterations) - elif 'dilation' == args.type: - logger.info('Applying dilation...') - image_smoothed_data = scipy.ndimage.binary_dilation(image_smoothed_data, footprint, iterations=args.iterations) - elif 'opening' == args.type: - logger.info('Applying opening...') - image_smoothed_data = scipy.ndimage.binary_opening(image_smoothed_data, footprint, iterations=args.iterations) - else: # closing - logger.info('Applying closing...') - image_smoothed_data = scipy.ndimage.binary_closing(image_smoothed_data, footprint, iterations=args.iterations) + footprint = scipy.ndimage.generate_binary_structure( + image_smoothed_data.ndim, args.size + ) + if "erosion" == args.type: + logger.info("Applying erosion...") + image_smoothed_data = scipy.ndimage.binary_erosion( + image_smoothed_data, footprint, iterations=args.iterations + ) + elif "dilation" == args.type: + logger.info("Applying dilation...") + image_smoothed_data = scipy.ndimage.binary_dilation( + image_smoothed_data, footprint, iterations=args.iterations + ) + elif "opening" == args.type: + logger.info("Applying opening...") + image_smoothed_data = scipy.ndimage.binary_opening( + image_smoothed_data, footprint, iterations=args.iterations + ) + else: # closing + logger.info("Applying closing...") + image_smoothed_data = scipy.ndimage.binary_closing( + image_smoothed_data, footprint, iterations=args.iterations + ) # apply additional hole closing step - logger.info('Closing holes...') + logger.info("Closing holes...") image_smoothed_data = scipy.ndimage.binary_fill_holes(image_smoothed_data) # save resulting mas save(image_smoothed_data, args.output, image_header, args.force) - - logger.info('Successfully terminated.') - + + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input', help='Source volume.') - parser.add_argument('output', help='Target volume.') - parser.add_argument('-t', '--type', dest='type', choices=['erosion', 'dilation', 'opening', 'closing'], default='erosion', help='The type of the morphological operation.') - parser.add_argument('-i', '--iterations', dest='iterations', default=0, type=int, help='The number of iteration to execute. Supply a value of 1 or higher to restrict the effect of the morphological operation. Otherwise it is applied until saturation.') - parser.add_argument('-s', '--size', dest='size', default=3, type=int, help='Size of the closing element (>=1). The higher this value, the bigger the wholes that get closed (closing) resp. unconnected elements that are removed (opening). In the 3D case, 1 equals a 6-connectedness, 2 a 12-connectedness, 3 a 18-connectedness, etc.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - - return parser - + parser.add_argument("input", help="Source volume.") + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "-t", + "--type", + dest="type", + choices=["erosion", "dilation", "opening", "closing"], + default="erosion", + help="The type of the morphological operation.", + ) + parser.add_argument( + "-i", + "--iterations", + dest="iterations", + default=0, + type=int, + help="The number of iteration to execute. Supply a value of 1 or higher to restrict the effect of the morphological operation. Otherwise it is applied until saturation.", + ) + parser.add_argument( + "-s", + "--size", + dest="size", + default=3, + type=int, + help="Size of the closing element (>=1). The higher this value, the bigger the wholes that get closed (closing) resp. unconnected elements that are removed (opening). In the 3D case, 1 equals a 6-connectedness, 2 a 12-connectedness, 3 a 18-connectedness, etc.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + + return parser + + if __name__ == "__main__": - main() - \ No newline at end of file + main() diff --git a/bin/medpy_resample.py b/bin/medpy_resample.py index 49824c9c..d7ded76b 100755 --- a/bin/medpy_resample.py +++ b/bin/medpy_resample.py @@ -19,21 +19,22 @@ along with this program. If not, see . """ -# build-in modules -import os import argparse import logging +# build-in modules +import os + # third-party modules import scipy.ndimage -# path changes - # own modules from medpy.core import Logger -from medpy.io import load, save, header +from medpy.io import header, load, save from medpy.utilities import argparseu +# path changes + # information __author__ = "Oskar Maier" @@ -44,16 +45,17 @@ Resamples an image according to a supplied voxel spacing. BSpline is used for interpolation. A order between 1 and 5 can be selected. - + Note that the pixel data type of the input image is respected, i.e. a integer input image leads to an integer output image etc. Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): parser = getParser() @@ -61,30 +63,39 @@ def main(): # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # loading input images img, hdr = load(args.input) # check spacing values if not len(args.spacing) == img.ndim: - parser.error('The image has {} dimensions, but {} spacing parameters have been supplied.'.format(img.ndim, len(args.spacing))) - + parser.error( + "The image has {} dimensions, but {} spacing parameters have been supplied.".format( + img.ndim, len(args.spacing) + ) + ) + # check if output image exists if not args.force: if os.path.exists(args.output): - parser.error('The output image {} already exists.'.format(args.output)) - - logger.debug('target voxel spacing: {}'.format(args.spacing)) + parser.error("The output image {} already exists.".format(args.output)) + + logger.debug("target voxel spacing: {}".format(args.spacing)) # compute zoom values - zoom_factors = [old / float(new) for new, old in zip(args.spacing, header.get_pixel_spacing(hdr))] - logger.debug('zoom-factors: {}'.format(zoom_factors)) + zoom_factors = [ + old / float(new) + for new, old in zip(args.spacing, header.get_pixel_spacing(hdr)) + ] + logger.debug("zoom-factors: {}".format(zoom_factors)) # zoom image img = scipy.ndimage.zoom(img, zoom_factors, order=args.order) - logger.debug('new image shape: {}'.format(img.shape)) + logger.debug("new image shape: {}".format(img.shape)) # set new voxel spacing header.set_pixel_spacing(hdr, args.spacing) @@ -92,30 +103,53 @@ def main(): # saving the resulting image save(img, args.output, hdr, args.force) - + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." args = parser.parse_args() if args.order < 0 or args.order > 5: - parser.error('The order has to be a number between 0 and 5.') + parser.error("The order has to be a number between 0 and 5.") return args + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input', help='the input image') - parser.add_argument('output', help='the output image') - parser.add_argument('spacing', type=argparseu.sequenceOfFloatsGt, help='the desired voxel spacing in colon-separated values, e.g. 1.2,1.2,5.0') - parser.add_argument('-o', '--order', type=int, default=2, dest='order', help='the bspline order, default is 2; means nearest neighbours; see also medpy_binary_resampling.py') - - #group = parser.add_mutually_exclusive_group(required=False) - #group.add_argument('--binary', action='store_true', dest='binary', help='enforce binary output image') - #group.add_argument('--float', action='store_true', dest='float', help='enforce floating point output image') - - parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='verbose output') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', '--force', dest='force', action='store_true', help='overwrite existing files') + parser.add_argument("input", help="the input image") + parser.add_argument("output", help="the output image") + parser.add_argument( + "spacing", + type=argparseu.sequenceOfFloatsGt, + help="the desired voxel spacing in colon-separated values, e.g. 1.2,1.2,5.0", + ) + parser.add_argument( + "-o", + "--order", + type=int, + default=2, + dest="order", + help="the bspline order, default is 2; means nearest neighbours; see also medpy_binary_resampling.py", + ) + + # group = parser.add_mutually_exclusive_group(required=False) + # group.add_argument('--binary', action='store_true', dest='binary', help='enforce binary output image') + # group.add_argument('--float', action='store_true', dest='float', help='enforce floating point output image') + + parser.add_argument( + "-v", "--verbose", dest="verbose", action="store_true", help="verbose output" + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + "--force", + dest="force", + action="store_true", + help="overwrite existing files", + ) return parser - + + if __name__ == "__main__": - main() + main() diff --git a/bin/medpy_reslice_3d_to_4d.py b/bin/medpy_reslice_3d_to_4d.py index 65cd4fa0..00eb8049 100755 --- a/bin/medpy_reslice_3d_to_4d.py +++ b/bin/medpy_reslice_3d_to_4d.py @@ -26,12 +26,12 @@ # third-party modules import scipy -# path changes - # own modules from medpy.core import Logger -from medpy.io import load, save from medpy.core.exceptions import ArgumentError +from medpy.io import load, save + +# path changes # information @@ -46,81 +46,114 @@ of the input 4D volume and then by combining them into a 3D volume. Then repeats the process starting from the second slice, etc. The new dimension will be appended to the already existing once. - + A typical use case are dicom images. These often come with the time dimension represented by stacking various 3D volumes on top of each other in one of the spatial dimensions. These can be converted in proper 4D volumes with this script. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # load 3d image data_3d, header_3d = load(args.input) - + # check if supplied dimension parameter is inside the images dimensions if args.dimension >= data_3d.ndim or args.dimension < 0: - raise ArgumentError('The supplied cut-dimension {} exceeds the number of input volume dimensions {}.'.format(args.dimension, data_3d.ndim)) - + raise ArgumentError( + "The supplied cut-dimension {} exceeds the number of input volume dimensions {}.".format( + args.dimension, data_3d.ndim + ) + ) + # check if the supplied offset parameter is a divider of the cut-dimensions slice number if not 0 == data_3d.shape[args.dimension] % args.offset: - raise ArgumentError('The offset is not a divider of the number of slices in cut dimension ({} / {}).'.format(data_3d.shape[args.dimension], args.offset)) - + raise ArgumentError( + "The offset is not a divider of the number of slices in cut dimension ({} / {}).".format( + data_3d.shape[args.dimension], args.offset + ) + ) + # prepare empty target volume volumes_3d = data_3d.shape[args.dimension] / args.offset shape_4d = list(data_3d.shape) shape_4d[args.dimension] = volumes_3d data_4d = scipy.zeros([args.offset] + shape_4d, dtype=data_3d.dtype) - - logger.debug('Separating {} slices into {} 3D volumes of thickness {}.'.format(data_3d.shape[args.dimension], volumes_3d, args.offset)) - + + logger.debug( + "Separating {} slices into {} 3D volumes of thickness {}.".format( + data_3d.shape[args.dimension], volumes_3d, args.offset + ) + ) + # iterate over 3D image and create sub volumes which are then added to the 4d volume for idx in range(args.offset): # collect the slices for sl in range(volumes_3d): idx_from = [slice(None), slice(None), slice(None)] - idx_from[args.dimension] = slice(idx + sl * args.offset, idx + sl * args.offset + 1) + idx_from[args.dimension] = slice( + idx + sl * args.offset, idx + sl * args.offset + 1 + ) idx_to = [slice(None), slice(None), slice(None)] - idx_to[args.dimension] = slice(sl, sl+1) - #print 'Slice {} to {}.'.format(idx_from, idx_to) + idx_to[args.dimension] = slice(sl, sl + 1) + # print 'Slice {} to {}.'.format(idx_from, idx_to) data_4d[idx][idx_to] = data_3d[idx_from] - + # flip dimensions such that the newly created is the last data_4d = scipy.swapaxes(data_4d, 0, args.dimension + 1) data_4d = scipy.rollaxis(data_4d, 0, 4) - + # save resulting 4D volume save(data_4d, args.output, header_3d, args.force) - + logger.info("Successfully terminated.") + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input', help='Source volume.') - parser.add_argument('output', help='Target volume.') - parser.add_argument('dimension', type=int, help='The dimension in which to perform the cut (starting from 0).') - parser.add_argument('offset', type=int, help='The offset between the slices.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - return parser + parser.add_argument("input", help="Source volume.") + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "dimension", + type=int, + help="The dimension in which to perform the cut (starting from 0).", + ) + parser.add_argument("offset", type=int, help="The offset between the slices.") + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + return parser + if __name__ == "__main__": main() diff --git a/bin/medpy_set_pixel_spacing.py b/bin/medpy_set_pixel_spacing.py index ae6505a3..0748831c 100755 --- a/bin/medpy_set_pixel_spacing.py +++ b/bin/medpy_set_pixel_spacing.py @@ -23,14 +23,14 @@ import argparse import logging +# own modules +from medpy.core import Logger +from medpy.io import header, load, save + # third-party modules # path changes -# own modules -from medpy.core import Logger -from medpy.io import load, header, save - # information __author__ = "Oskar Maier" @@ -39,46 +39,58 @@ __status__ = "Release" __description__ = """ Change an image's pixel spacing in-place. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # load input data_input, header_input = load(args.image) - + # change pixel spacing - logger.info('Setting pixel spacing along {} to {}...'.format(data_input.shape, args.spacing)) + logger.info( + "Setting pixel spacing along {} to {}...".format(data_input.shape, args.spacing) + ) header.set_pixel_spacing(header_input, args.spacing) - + # save file save(data_input.copy(), args.image, header_input, True) - - logger.info("Successfully terminated.") - + + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('image', help='Image volume.') - parser.add_argument('spacing', type=float, nargs='+', help='The spacing values.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - return parser + parser.add_argument("image", help="Image volume.") + parser.add_argument("spacing", type=float, nargs="+", help="The spacing values.") + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + return parser + if __name__ == "__main__": main() diff --git a/bin/medpy_shrink_image.py b/bin/medpy_shrink_image.py index 81177e68..73f55f73 100755 --- a/bin/medpy_shrink_image.py +++ b/bin/medpy_shrink_image.py @@ -26,11 +26,11 @@ # third-party modules import scipy -# path changes - # own modules from medpy.core import Logger -from medpy.io import load, save, header +from medpy.io import header, load, save + +# path changes # information @@ -42,83 +42,108 @@ Shrinks an image by discarding slices. Reverse operation of zoom_image.py. Reduces the image by keeping one slice, then discarding "discard" slices, then keeping the next and so on. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # load input data input_data, input_header = load(args.input) - - logger.debug('Old shape = {}.'.format(input_data.shape)) - + + logger.debug("Old shape = {}.".format(input_data.shape)) + # compute new shape new_shape = list(input_data.shape) new_shape[args.dimension] = 1 + (new_shape[args.dimension] - 1) / (args.discard + 1) - + # prepare output image output_data = scipy.zeros(new_shape, dtype=input_data.dtype) - + # prepare slicers slicer_in = [slice(None)] * input_data.ndim slicer_out = [slice(None)] * input_data.ndim - + # prepare skip-counter and output image slice counter skipc = 0 slicec = 0 - - logger.debug('Shrinking from {} to {}...'.format(input_data.shape, new_shape)) + + logger.debug("Shrinking from {} to {}...".format(input_data.shape, new_shape)) for idx in range(input_data.shape[args.dimension]): - if 0 == skipc: # transfer slice slicer_in[args.dimension] = slice(idx, idx + 1) - slicer_out[args.dimension] = slice(slicec, slicec + 1) + slicer_out[args.dimension] = slice(slicec, slicec + 1) output_data[slicer_out] = input_data[slicer_in] - + # resert resp. increase counter skipc = args.discard slicec += 1 - - else: # skip slice + + else: # skip slice # decrease skip counter skipc -= 1 - # set new pixel spacing new_spacing = list(header.get_pixel_spacing(input_header)) new_spacing[args.dimension] = new_spacing[args.dimension] * float(args.discard + 1) - logger.debug('Setting pixel spacing from {} to {}....'.format(header.get_pixel_spacing(input_header), new_spacing)) + logger.debug( + "Setting pixel spacing from {} to {}....".format( + header.get_pixel_spacing(input_header), new_spacing + ) + ) header.set_pixel_spacing(input_header, tuple(new_spacing)) - + save(output_data, args.output, input_header, args.force) - + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=argparse.RawTextHelpFormatter) - parser.add_argument('input', help='Source volume.') - parser.add_argument('output', help='Target volume.') - parser.add_argument('dimension', type=int, help='The dimension along which to discard the slices.') - parser.add_argument('discard', type=int, help='How many slices to discard between each two slices which are kept.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') + parser = argparse.ArgumentParser( + description=__description__, formatter_class=argparse.RawTextHelpFormatter + ) + parser.add_argument("input", help="Source volume.") + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "dimension", type=int, help="The dimension along which to discard the slices." + ) + parser.add_argument( + "discard", + type=int, + help="How many slices to discard between each two slices which are kept.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) return parser + if __name__ == "__main__": - main() + main() diff --git a/bin/medpy_split_xd_to_xminus1d.py b/bin/medpy_split_xd_to_xminus1d.py index e2315c8d..054b383d 100755 --- a/bin/medpy_split_xd_to_xminus1d.py +++ b/bin/medpy_split_xd_to_xminus1d.py @@ -26,13 +26,14 @@ # third-party modules import scipy -# path changes - -# own modules -from medpy.io import load, save, header from medpy.core import Logger from medpy.core.exceptions import ArgumentError +# own modules +from medpy.io import header, load, save + +# path changes + # information __author__ = "Oskar Maier" @@ -41,74 +42,102 @@ __status__ = "Release" __description__ = """ Splits a XD into a number of (X-1)D volumes. - + One common use case is the creation of manual markers for 4D images. This script allows to split a 4D into a number of either spatial or temporal 3D volumes, for which one then can create the markers. These can be rejoined using the join_xd_to_xplus1d.py script. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # load input image data_input, header_input = load(args.input) - + # check if the supplied dimension is valid if args.dimension >= data_input.ndim or args.dimension < 0: - raise ArgumentError('The supplied cut-dimension {} exceeds the image dimensionality of 0 to {}.'.format(args.dimension, data_input.ndim - 1)) - + raise ArgumentError( + "The supplied cut-dimension {} exceeds the image dimensionality of 0 to {}.".format( + args.dimension, data_input.ndim - 1 + ) + ) + # prepare output file string - name_output = args.output.replace('{}', '{:03d}') - + name_output = args.output.replace("{}", "{:03d}") + # compute the new the voxel spacing spacing = list(header.get_pixel_spacing(header_input)) del spacing[args.dimension] - + # iterate over the cut dimension slices = data_input.ndim * [slice(None)] for idx in range(data_input.shape[args.dimension]): - # cut the current slice from the original image + # cut the current slice from the original image slices[args.dimension] = slice(idx, idx + 1) data_output = scipy.squeeze(data_input[slices]) # update the header and set the voxel spacing header_input.set_voxel_spacing(spacing) # save current slice save(data_output, name_output.format(idx), header_input, args.force) - + logger.info("Successfully terminated.") - + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." args = parser.parse_args() - if not '{}' in args.output: - raise argparse.ArgumentError(args.output, 'The output argument string must contain the sequence "{}".') + if not "{}" in args.output: + raise argparse.ArgumentError( + args.output, 'The output argument string must contain the sequence "{}".' + ) return args + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input', help='Source volume.') - parser.add_argument('output', help='Target volumes. Has to include the sequence "{}" in the place where the volume number should be placed.') - parser.add_argument('dimension', type=int, help='The dimension along which to split (starting from 0).') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - return parser + parser.add_argument("input", help="Source volume.") + parser.add_argument( + "output", + help='Target volumes. Has to include the sequence "{}" in the place where the volume number should be placed.', + ) + parser.add_argument( + "dimension", + type=int, + help="The dimension along which to split (starting from 0).", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + return parser + if __name__ == "__main__": main() diff --git a/bin/medpy_stack_sub_volumes.py b/bin/medpy_stack_sub_volumes.py index 80c192e4..2fccbabf 100755 --- a/bin/medpy_stack_sub_volumes.py +++ b/bin/medpy_stack_sub_volumes.py @@ -19,20 +19,22 @@ along with this program. If not, see . """ -# build-in modules -from argparse import RawTextHelpFormatter import argparse import logging +# build-in modules +from argparse import RawTextHelpFormatter + # third-party modules import numpy -# path changes - # own modules from medpy.core import Logger from medpy.io import load, save +# path changes + + # information __author__ = "Oskar Maier" __version__ = "r0.3.1, 2011-03-29" @@ -43,82 +45,117 @@ all but one dimension. The images are then stacked on top of each other to produce a single result image. The dimension in which to stack is supplied by the dimension parameter. - + Note that the supplied images must be of the same data type. Note to take into account the input images orientations. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) - + # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # load first image as result image - logger.info('Loading {}...'.format(args.images[0])) + logger.info("Loading {}...".format(args.images[0])) result_data, result_header = load(args.images[0]) - + # check dimension argument if args.dimension >= result_data.ndim: - raise argparse.ArgumentError('The supplied stack-dimension {} exceeds the image dimensionality of 0 to {}.'.format(args.dimension, result_data.ndim - 1)) - + raise argparse.ArgumentError( + "The supplied stack-dimension {} exceeds the image dimensionality of 0 to {}.".format( + args.dimension, result_data.ndim - 1 + ) + ) + # reduce the image dimensions if args.zero and result_data.all(): result_data = numpy.zeros(result_data.shape, result_data.dtype) - + # iterate over remaining images and concatenate for image_name in args.images[1:]: - logger.info('Loading {}...'.format(image_name)) + logger.info("Loading {}...".format(image_name)) image_data, _ = load(image_name) - + # change to zero matrix if requested if args.zero and image_data.all(): image_data = numpy.zeros(image_data.shape, image_data.dtype) - - #concatenate + + # concatenate if args.reversed: result_data = numpy.concatenate((image_data, result_data), args.dimension) - else: + else: result_data = numpy.concatenate((result_data, image_data), args.dimension) - logger.debug('Final image is of shape {}.'.format(result_data.shape)) + logger.debug("Final image is of shape {}.".format(result_data.shape)) # save results in same format as input image - logger.info('Saving concatenated image as {}...'.format(args.output)) - + logger.info("Saving concatenated image as {}...".format(args.output)) + save(result_data, args.output, result_header, args.force) - - logger.info('Successfully terminated.') - + + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=RawTextHelpFormatter) - - parser.add_argument('dimension', type=int, help='The dimension in which direction to stack (starting from 0:x).') - parser.add_argument('output', help='The output image.') - parser.add_argument('images', nargs='+', help='The images to concatenate/stack.') - parser.add_argument('-f', dest='force', action='store_true', help='Set this flag to silently override files that exist.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-z', dest='zero', action='store_true', help='If supplied, all images containing only 1s are treated as empty image.') - parser.add_argument('-r', dest='reversed', action='store_true', help='Stack in resversed order as how the files are supplied.') - - return parser - + parser = argparse.ArgumentParser( + description=__description__, formatter_class=RawTextHelpFormatter + ) + + parser.add_argument( + "dimension", + type=int, + help="The dimension in which direction to stack (starting from 0:x).", + ) + parser.add_argument("output", help="The output image.") + parser.add_argument("images", nargs="+", help="The images to concatenate/stack.") + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Set this flag to silently override files that exist.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-z", + dest="zero", + action="store_true", + help="If supplied, all images containing only 1s are treated as empty image.", + ) + parser.add_argument( + "-r", + dest="reversed", + action="store_true", + help="Stack in resversed order as how the files are supplied.", + ) + + return parser + + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/bin/medpy_swap_dimensions.py b/bin/medpy_swap_dimensions.py index d2bd04e3..9e15c09a 100755 --- a/bin/medpy_swap_dimensions.py +++ b/bin/medpy_swap_dimensions.py @@ -26,12 +26,12 @@ # third-party modules import scipy -# path changes - # own modules from medpy.core import Logger -from medpy.io import load, save, header from medpy.core.exceptions import ArgumentError +from medpy.io import header, load, save + +# path changes # information @@ -42,33 +42,44 @@ __description__ = """ Two of the input images dimensions are swapped. A (200,100,10) image can such be turned into a (200,10,100) one. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # load input image data_input, header_input = load(args.input) - - logger.debug('Original shape = {}.'.format(data_input.shape)) - + + logger.debug("Original shape = {}.".format(data_input.shape)) + # check if supplied dimension parameters is inside the images dimensions if args.dimension1 >= data_input.ndim or args.dimension1 < 0: - raise ArgumentError('The first swap-dimension {} exceeds the number of input volume dimensions {}.'.format(args.dimension1, data_input.ndim)) + raise ArgumentError( + "The first swap-dimension {} exceeds the number of input volume dimensions {}.".format( + args.dimension1, data_input.ndim + ) + ) elif args.dimension2 >= data_input.ndim or args.dimension2 < 0: - raise ArgumentError('The second swap-dimension {} exceeds the number of input volume dimensions {}.'.format(args.dimension2, data_input.ndim)) - + raise ArgumentError( + "The second swap-dimension {} exceeds the number of input volume dimensions {}.".format( + args.dimension2, data_input.ndim + ) + ) + # swap axes data_output = scipy.swapaxes(data_input, args.dimension1, args.dimension2) # swap pixel spacing and offset @@ -78,29 +89,45 @@ def main(): os = list(header.get_offset(header_input)) os[args.dimension1], os[args.dimension2] = os[args.dimension2], os[args.dimension1] header.set_offset(header_input, os) - - logger.debug('Resulting shape = {}.'.format(data_output.shape)) - + + logger.debug("Resulting shape = {}.".format(data_output.shape)) + # save resulting volume save(data_output, args.output, header_input, args.force) - - logger.info("Successfully terminated.") - + + logger.info("Successfully terminated.") + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input', help='Source volume.') - parser.add_argument('output', help='Target volume.') - parser.add_argument('dimension1', type=int, help='First dimension to swap (starting from 0).') - parser.add_argument('dimension2', type=int, help='Second dimension to swap (starting from 0).') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') - return parser + parser.add_argument("input", help="Source volume.") + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "dimension1", type=int, help="First dimension to swap (starting from 0)." + ) + parser.add_argument( + "dimension2", type=int, help="Second dimension to swap (starting from 0)." + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) + return parser + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/bin/medpy_watershed.py b/bin/medpy_watershed.py index 0b0bff45..0d9d5f14 100755 --- a/bin/medpy_watershed.py +++ b/bin/medpy_watershed.py @@ -30,12 +30,13 @@ from scipy.ndimage import label from skimage.morphology import watershed -# path changes +from medpy.core import ArgumentError, Logger +from medpy.filter import local_minima # own modules from medpy.io import load, save -from medpy.core import Logger, ArgumentError -from medpy.filter import local_minima + +# path changes # information @@ -54,6 +55,7 @@ the LICENSE file or for details. """ + # code def main(): # parse cmd arguments @@ -63,13 +65,17 @@ def main(): # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) # check if output image exists (will also be performed before saving, but as the watershed might be very time intensity, a initial check can save frustration) if not args.force: if os.path.exists(args.output): - raise ArgumentError('The output image {} already exists.'.format(args.output)) + raise ArgumentError( + "The output image {} already exists.".format(args.output) + ) # loading images data_input, header_input = load(args.input) @@ -79,7 +85,9 @@ def main(): mask = None # extract local minima and convert to markers - logger.info('Extract local minima with minimum distance of {}...'.format(args.mindist)) + logger.info( + "Extract local minima with minimum distance of {}...".format(args.mindist) + ) lm, _ = local_minima(data_input, args.mindist) lm_indices = tuple([numpy.asarray(x) for x in lm.T]) minima_labels = numpy.zeros(data_input.shape, dtype=numpy.uint64) @@ -89,30 +97,50 @@ def main(): minima_labels, _ = label(minima_labels) # apply the watershed - logger.info('Watershedding...') + logger.info("Watershedding...") data_output = watershed(data_input, minima_labels, mask=mask) # save file save(data_output, args.output, header_input, args.force) - logger.info('Successfully terminated.') + logger.info("Successfully terminated.") + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__) - parser.add_argument('input', help='Source volume (usually a gradient image).') - parser.add_argument('output', help='Target volume.') - parser.add_argument('--mindist', type=int, default=2, help='The minimum distance between local minima in voxel units.') - parser.add_argument('--mask', help='Optional binary mask image denoting the area over which to compute the watershed.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') + parser.add_argument("input", help="Source volume (usually a gradient image).") + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "--mindist", + type=int, + default=2, + help="The minimum distance between local minima in voxel units.", + ) + parser.add_argument( + "--mask", + help="Optional binary mask image denoting the area over which to compute the watershed.", + ) + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) return parser + if __name__ == "__main__": main() diff --git a/bin/medpy_zoom_image.py b/bin/medpy_zoom_image.py index e851fbee..fa392dbd 100755 --- a/bin/medpy_zoom_image.py +++ b/bin/medpy_zoom_image.py @@ -27,11 +27,11 @@ # third-party modules from scipy.ndimage import interpolation -# path changes - # own modules from medpy.core import Logger -from medpy.io import load, save, header +from medpy.io import header, load, save + +# path changes # information @@ -43,41 +43,49 @@ Zoom into an image by adding new slices in the z-direction and filling them with interpolated data. Overall "enhancement" new slices will be created between every two original slices. - + If you want to zoom multiple binary objects in an image without interpolating between their values, use the -o switch. - + Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see - the LICENSE file or for details. + the LICENSE file or for details. """ + # code def main(): args = getArguments(getParser()) # prepare logger logger = Logger.getInstance() - if args.debug: logger.setLevel(logging.DEBUG) - elif args.verbose: logger.setLevel(logging.INFO) - + if args.debug: + logger.setLevel(logging.DEBUG) + elif args.verbose: + logger.setLevel(logging.INFO) + # check if output image exists if not args.force and os.path.exists(args.output): - logger.warning('The output image {} already exists. Exiting.'.format(args.output)) + logger.warning( + "The output image {} already exists. Exiting.".format(args.output) + ) exit(-1) - + # load input data input_data, input_header = load(args.input) - + # if normal mode, perform the zoom - logger.info('Performing normal zoom...') - output_data, output_header = zoom(input_data, args.enhancement, args.dimension, hdr=input_header) + logger.info("Performing normal zoom...") + output_data, output_header = zoom( + input_data, args.enhancement, args.dimension, hdr=input_header + ) # saving results save(output_data, args.output, output_header, args.force) - -def zoom(image, factor, dimension, hdr = False, order = 3): + + +def zoom(image, factor, dimension, hdr=False, order=3): """ Zooms the provided image by the supplied factor in the supplied dimension. The factor is an integer determining how many slices should be put between each @@ -87,45 +95,75 @@ def zoom(image, factor, dimension, hdr = False, order = 3): """ # check if supplied dimension is valid if dimension >= image.ndim: - raise argparse.ArgumentError('The supplied zoom-dimension {} exceeds the image dimensionality of 0 to {}.'.format(dimension, image.ndim - 1)) - + raise argparse.ArgumentError( + "The supplied zoom-dimension {} exceeds the image dimensionality of 0 to {}.".format( + dimension, image.ndim - 1 + ) + ) + # get logger logger = Logger.getInstance() - logger.debug('Old shape = {}.'.format(image.shape)) + logger.debug("Old shape = {}.".format(image.shape)) # perform the zoom zoom = [1] * image.ndim - zoom[dimension] = (image.shape[dimension] + (image.shape[dimension] - 1) * factor) / float(image.shape[dimension]) - logger.debug('Reshaping with = {}.'.format(zoom)) + zoom[dimension] = ( + image.shape[dimension] + (image.shape[dimension] - 1) * factor + ) / float(image.shape[dimension]) + logger.debug("Reshaping with = {}.".format(zoom)) image = interpolation.zoom(image, zoom, order=order) - - logger.debug('New shape = {}.'.format(image.shape)) - + + logger.debug("New shape = {}.".format(image.shape)) + if hdr: new_spacing = list(header.get_pixel_spacing(hdr)) new_spacing[dimension] = new_spacing[dimension] / float(factor + 1) - logger.debug('Setting pixel spacing from {} to {}....'.format(header.get_pixel_spacing(hdr), new_spacing)) + logger.debug( + "Setting pixel spacing from {} to {}....".format( + header.get_pixel_spacing(hdr), new_spacing + ) + ) header.set_pixel_spacing(hdr, tuple(new_spacing)) - + return image, hdr - + + def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() + def getParser(): "Creates and returns the argparse parser object." - parser = argparse.ArgumentParser(description=__description__, formatter_class=argparse.RawTextHelpFormatter) - parser.add_argument('input', help='Source volume.') - parser.add_argument('output', help='Target volume.') - parser.add_argument('dimension', type=int, help='The dimension along which to zoom.') - parser.add_argument('enhancement', type=int, help='How many slices to put between each original slice.') - #parser.add_argument('-o', dest='objects', action='store_true', help='Activate this flag to perform the zoom for any binary object in the image separatly.') - parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') - parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') - parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.') + parser = argparse.ArgumentParser( + description=__description__, formatter_class=argparse.RawTextHelpFormatter + ) + parser.add_argument("input", help="Source volume.") + parser.add_argument("output", help="Target volume.") + parser.add_argument( + "dimension", type=int, help="The dimension along which to zoom." + ) + parser.add_argument( + "enhancement", + type=int, + help="How many slices to put between each original slice.", + ) + # parser.add_argument('-o', dest='objects', action='store_true', help='Activate this flag to perform the zoom for any binary object in the image separatly.') + parser.add_argument( + "-v", dest="verbose", action="store_true", help="Display more information." + ) + parser.add_argument( + "-d", dest="debug", action="store_true", help="Display debug information." + ) + parser.add_argument( + "-f", + dest="force", + action="store_true", + help="Silently override existing output images.", + ) return parser + if __name__ == "__main__": - main() + main() diff --git a/doc/README b/doc/README index bd5727d1..730aec57 100644 --- a/doc/README +++ b/doc/README @@ -10,7 +10,7 @@ and test if the right binary is called. Higher versions break with the used nump Then run sphinx-build -aE -b html source/ build/ - + , then edit .rst files belong to Python classes source/generated/medpy.graphcut.graph.Graph.rst @@ -23,15 +23,15 @@ source/generated/medpy.iterators.* by removing the line .. automethod:: __init__ - + and adding the line - + :toctree: generated/ - + beneath each ".. autosummary::" command. Finally rerun the build - + sphinx-build -aE -b html source/ build/ @@ -41,7 +41,7 @@ Enabling the search box Remove scipy-sphinx-theme/_theme/scipy/searchbox.html - + from the scipy template, as it somehow overrides the search box with a custom link to edit the .rst files in-place online. @@ -51,4 +51,3 @@ Generate the API documentation files Run sphinx-apidoc -efF -H MedPy -A "Oskar Maier" -V 0.2 -R 1 -o generated/ ../../medpy/medpy/ - diff --git a/doc/numpydoc/LICENSE.txt b/doc/numpydoc/LICENSE.txt index b15c699d..fe707adb 100644 --- a/doc/numpydoc/LICENSE.txt +++ b/doc/numpydoc/LICENSE.txt @@ -91,4 +91,3 @@ Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved. 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement. - diff --git a/doc/numpydoc/numpydoc/__init__.py b/doc/numpydoc/numpydoc/__init__.py index 0779af9b..19ffd871 100644 --- a/doc/numpydoc/numpydoc/__init__.py +++ b/doc/numpydoc/numpydoc/__init__.py @@ -1,3 +1 @@ - - -from .numpydoc import setup +from .numpydoc import setup # nopycln: import diff --git a/doc/numpydoc/numpydoc/comment_eater.py b/doc/numpydoc/numpydoc/comment_eater.py index 769b39a2..d6de51a9 100644 --- a/doc/numpydoc/numpydoc/comment_eater.py +++ b/doc/numpydoc/numpydoc/comment_eater.py @@ -1,23 +1,24 @@ - - import sys + if sys.version_info[0] >= 3: from io import StringIO else: from io import StringIO -import compiler import inspect import textwrap import tokenize +import compiler + from .compiler_unparse import unparse class Comment(object): - """ A comment block. - """ + """A comment block.""" + is_comment = True + def __init__(self, start_lineno, end_lineno, text): # int : The first line number in the block. 1-indexed. self.start_lineno = start_lineno @@ -27,41 +28,47 @@ def __init__(self, start_lineno, end_lineno, text): self.text = text def add(self, string, start, end, line): - """ Add a new comment line. - """ + """Add a new comment line.""" self.start_lineno = min(self.start_lineno, start[0]) self.end_lineno = max(self.end_lineno, end[0]) self.text += string def __repr__(self): - return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno, - self.end_lineno, self.text) + return "%s(%r, %r, %r)" % ( + self.__class__.__name__, + self.start_lineno, + self.end_lineno, + self.text, + ) class NonComment(object): - """ A non-comment block of code. - """ + """A non-comment block of code.""" + is_comment = False + def __init__(self, start_lineno, end_lineno): self.start_lineno = start_lineno self.end_lineno = end_lineno def add(self, string, start, end, line): - """ Add lines to the block. - """ + """Add lines to the block.""" if string.strip(): # Only add if not entirely whitespace. self.start_lineno = min(self.start_lineno, start[0]) self.end_lineno = max(self.end_lineno, end[0]) def __repr__(self): - return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno, - self.end_lineno) + return "%s(%r, %r)" % ( + self.__class__.__name__, + self.start_lineno, + self.end_lineno, + ) class CommentBlocker(object): - """ Pull out contiguous comment blocks. - """ + """Pull out contiguous comment blocks.""" + def __init__(self): # Start with a dummy. self.current_block = NonComment(0, 0) @@ -73,8 +80,7 @@ def __init__(self): self.index = {} def process_file(self, file): - """ Process a file object. - """ + """Process a file object.""" if sys.version_info[0] >= 3: nxt = file.__next__ else: @@ -84,8 +90,7 @@ def process_file(self, file): self.make_index() def process_token(self, kind, string, start, end, line): - """ Process a single token. - """ + """Process a single token.""" if self.current_block.is_comment: if kind == tokenize.COMMENT: self.current_block.add(string, start, end, line) @@ -98,19 +103,18 @@ def process_token(self, kind, string, start, end, line): self.current_block.add(string, start, end, line) def new_noncomment(self, start_lineno, end_lineno): - """ We are transitioning from a noncomment to a comment. - """ + """We are transitioning from a noncomment to a comment.""" block = NonComment(start_lineno, end_lineno) self.blocks.append(block) self.current_block = block def new_comment(self, string, start, end, line): - """ Possibly add a new comment. + """Possibly add a new comment. Only adds a new comment if this comment is the only thing on the line. Otherwise, it extends the noncomment block. """ - prefix = line[:start[1]] + prefix = line[: start[1]] if prefix.strip(): # Oops! Trailing comment, not a comment block. self.current_block.add(string, start, end, line) @@ -121,7 +125,7 @@ def new_comment(self, string, start, end, line): self.current_block = block def make_index(self): - """ Make the index mapping lines of actual code to their associated + """Make the index mapping lines of actual code to their associated prefix comments. """ for prev, block in zip(self.blocks[:-1], self.blocks[1:]): @@ -129,30 +133,28 @@ def make_index(self): self.index[block.start_lineno] = prev def search_for_comment(self, lineno, default=None): - """ Find the comment block just before the given line number. + """Find the comment block just before the given line number. Returns None (or the specified default) if there is no such block. """ if not self.index: self.make_index() block = self.index.get(lineno, None) - text = getattr(block, 'text', default) + text = getattr(block, "text", default) return text def strip_comment_marker(text): - """ Strip # markers at the front of a block of comment text. - """ + """Strip # markers at the front of a block of comment text.""" lines = [] for line in text.splitlines(): - lines.append(line.lstrip('#')) - text = textwrap.dedent('\n'.join(lines)) + lines.append(line.lstrip("#")) + text = textwrap.dedent("\n".join(lines)) return text def get_class_traits(klass): - """ Yield all of the documentation for trait definitions on a class object. - """ + """Yield all of the documentation for trait definitions on a class object.""" # FIXME: gracefully handle errors here or in the caller? source = inspect.getsource(klass) cb = CommentBlocker() @@ -164,6 +166,5 @@ def get_class_traits(klass): if isinstance(node, compiler.ast.Assign): name = node.nodes[0].name rhs = unparse(node.expr).strip() - doc = strip_comment_marker(cb.search_for_comment(node.lineno, default='')) + doc = strip_comment_marker(cb.search_for_comment(node.lineno, default="")) yield name, rhs, doc - diff --git a/doc/numpydoc/numpydoc/compiler_unparse.py b/doc/numpydoc/numpydoc/compiler_unparse.py index fcda8758..a1c28638 100644 --- a/doc/numpydoc/numpydoc/compiler_unparse.py +++ b/doc/numpydoc/numpydoc/compiler_unparse.py @@ -13,35 +13,44 @@ import sys -from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add + +from compiler.ast import Add, Const, Div, Mul, Sub, Tuple if sys.version_info[0] >= 3: from io import StringIO else: from io import StringIO + def unparse(ast, single_line_functions=False): s = StringIO() UnparseCompilerAst(ast, s, single_line_functions) return s.getvalue().lstrip() -op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2, - 'compiler.ast.Add':1, 'compiler.ast.Sub':1 } + +op_precedence = { + "compiler.ast.Power": 3, + "compiler.ast.Mul": 2, + "compiler.ast.Div": 2, + "compiler.ast.Add": 1, + "compiler.ast.Sub": 1, +} + class UnparseCompilerAst: - """ Methods in this class recursively traverse an AST and - output source code for the abstract syntax; original formatting - is disregarged. + """Methods in this class recursively traverse an AST and + output source code for the abstract syntax; original formatting + is disregarged. """ ######################################################################### # object interface. ######################################################################### - def __init__(self, tree, file = sys.stdout, single_line_functions=False): - """ Unparser(tree, file=sys.stdout) -> None. + def __init__(self, tree, file=sys.stdout, single_line_functions=False): + """Unparser(tree, file=sys.stdout) -> None. - Print the source for tree to file. + Print the source for tree to file. """ self.f = file self._single_func = single_line_functions @@ -57,10 +66,10 @@ def __init__(self, tree, file = sys.stdout, single_line_functions=False): ### format, output, and dispatch methods ################################ - def _fill(self, text = ""): + def _fill(self, text=""): "Indent a piece of text, according to the current indentation level" if self._do_indent: - self._write("\n"+" "*self._indent + text) + self._write("\n" + " " * self._indent + text) else: self._write(text) @@ -83,12 +92,11 @@ def _dispatch(self, tree): for t in tree: self._dispatch(t) return - meth = getattr(self, "_"+tree.__class__.__name__) - if tree.__class__.__name__ == 'NoneType' and not self._do_indent: + meth = getattr(self, "_" + tree.__class__.__name__) + if tree.__class__.__name__ == "NoneType" and not self._do_indent: return meth(tree) - ######################################################################### # compiler.ast unparsing methods. # @@ -97,27 +105,26 @@ def _dispatch(self, tree): ######################################################################### def _Add(self, t): - self.__binary_op(t, '+') + self.__binary_op(t, "+") def _And(self, t): self._write(" (") for i, node in enumerate(t.nodes): self._dispatch(node) - if i != len(t.nodes)-1: + if i != len(t.nodes) - 1: self._write(") and (") self._write(")") def _AssAttr(self, t): - """ Handle assigning an attribute of an object - """ + """Handle assigning an attribute of an object""" self._dispatch(t.expr) - self._write('.'+t.attrname) + self._write("." + t.attrname) def _Assign(self, t): - """ Expression Assignment such as "a = 1". + """Expression Assignment such as "a = 1". - This only handles assignment in expressions. Keyword assignment - is handled separately. + This only handles assignment in expressions. Keyword assignment + is handled separately. """ self._fill() for target in t.nodes: @@ -125,18 +132,17 @@ def _Assign(self, t): self._write(" = ") self._dispatch(t.expr) if not self._do_indent: - self._write('; ') + self._write("; ") def _AssName(self, t): - """ Name on left hand side of expression. + """Name on left hand side of expression. - Treat just like a name on the right side of an expression. + Treat just like a name on the right side of an expression. """ self._Name(t) def _AssTuple(self, t): - """ Tuple on left hand side of an expression. - """ + """Tuple on left hand side of an expression.""" # _write each elements, separated by a comma. for element in t.nodes[:-1]: @@ -148,56 +154,58 @@ def _AssTuple(self, t): self._dispatch(last_element) def _AugAssign(self, t): - """ +=,-=,*=,/=,**=, etc. operations - """ + """+=,-=,*=,/=,**=, etc. operations""" self._fill() self._dispatch(t.node) - self._write(' '+t.op+' ') + self._write(" " + t.op + " ") self._dispatch(t.expr) if not self._do_indent: - self._write(';') + self._write(";") def _Bitand(self, t): - """ Bit and operation. - """ + """Bit and operation.""" for i, node in enumerate(t.nodes): self._write("(") self._dispatch(node) self._write(")") - if i != len(t.nodes)-1: + if i != len(t.nodes) - 1: self._write(" & ") def _Bitor(self, t): - """ Bit or operation - """ + """Bit or operation""" for i, node in enumerate(t.nodes): self._write("(") self._dispatch(node) self._write(")") - if i != len(t.nodes)-1: + if i != len(t.nodes) - 1: self._write(" | ") def _CallFunc(self, t): - """ Function call. - """ + """Function call.""" self._dispatch(t.node) self._write("(") comma = False for e in t.args: - if comma: self._write(", ") - else: comma = True + if comma: + self._write(", ") + else: + comma = True self._dispatch(e) if t.star_args: - if comma: self._write(", ") - else: comma = True + if comma: + self._write(", ") + else: + comma = True self._write("*") self._dispatch(t.star_args) if t.dstar_args: - if comma: self._write(", ") - else: comma = True + if comma: + self._write(", ") + else: + comma = True self._write("**") self._dispatch(t.dstar_args) self._write(")") @@ -209,67 +217,62 @@ def _Compare(self, t): self._dispatch(expr) def _Const(self, t): - """ A constant value such as an integer value, 3, or a string, "hello". - """ + """A constant value such as an integer value, 3, or a string, "hello".""" self._dispatch(t.value) def _Decorators(self, t): - """ Handle function decorators (eg. @has_units) - """ + """Handle function decorators (eg. @has_units)""" for node in t.nodes: self._dispatch(node) def _Dict(self, t): self._write("{") - for i, (k, v) in enumerate(t.items): + for i, (k, v) in enumerate(t.items): self._dispatch(k) self._write(": ") self._dispatch(v) - if i < len(t.items)-1: + if i < len(t.items) - 1: self._write(", ") self._write("}") def _Discard(self, t): - """ Node for when return value is ignored such as in "foo(a)". - """ + """Node for when return value is ignored such as in "foo(a)".""" self._fill() self._dispatch(t.expr) def _Div(self, t): - self.__binary_op(t, '/') + self.__binary_op(t, "/") def _Ellipsis(self, t): self._write("...") def _From(self, t): - """ Handle "from xyz import foo, bar as baz". - """ + """Handle "from xyz import foo, bar as baz".""" # fixme: Are From and ImportFrom handled differently? self._fill("from ") self._write(t.modname) self._write(" import ") - for i, (name,asname) in enumerate(t.names): + for i, (name, asname) in enumerate(t.names): if i != 0: self._write(", ") self._write(name) if asname is not None: - self._write(" as "+asname) + self._write(" as " + asname) def _Function(self, t): - """ Handle function definitions - """ + """Handle function definitions""" if t.decorators is not None: self._fill("@") self._dispatch(t.decorators) - self._fill("def "+t.name + "(") + self._fill("def " + t.name + "(") defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults) for i, arg in enumerate(zip(t.argnames, defaults)): self._write(arg[0]) if arg[1] is not None: - self._write('=') + self._write("=") self._dispatch(arg[1]) - if i < len(t.argnames)-1: - self._write(', ') + if i < len(t.argnames) - 1: + self._write(", ") self._write(")") if self._single_func: self._do_indent = False @@ -279,21 +282,20 @@ def _Function(self, t): self._do_indent = True def _Getattr(self, t): - """ Handle getting an attribute of an object - """ + """Handle getting an attribute of an object""" if isinstance(t.expr, (Div, Mul, Sub, Add)): - self._write('(') + self._write("(") self._dispatch(t.expr) - self._write(')') + self._write(")") else: self._dispatch(t.expr) - - self._write('.'+t.attrname) - + + self._write("." + t.attrname) + def _If(self, t): self._fill() - - for i, (compare,code) in enumerate(t.tests): + + for i, (compare, code) in enumerate(t.tests): if i == 0: self._write("if ") else: @@ -312,7 +314,7 @@ def _If(self, t): self._dispatch(t.else_) self._leave() self._write("\n") - + def _IfExp(self, t): self._dispatch(t.then) self._write(" if ") @@ -324,29 +326,27 @@ def _IfExp(self, t): self._write(")") def _Import(self, t): - """ Handle "import xyz.foo". - """ + """Handle "import xyz.foo".""" self._fill("import ") - - for i, (name,asname) in enumerate(t.names): + + for i, (name, asname) in enumerate(t.names): if i != 0: self._write(", ") self._write(name) if asname is not None: - self._write(" as "+asname) + self._write(" as " + asname) def _Keyword(self, t): - """ Keyword value assignment within function calls and definitions. - """ + """Keyword value assignment within function calls and definitions.""" self._write(t.name) self._write("=") self._dispatch(t.expr) - + def _List(self, t): self._write("[") - for i,node in enumerate(t.nodes): + for i, node in enumerate(t.nodes): self._dispatch(node) - if i < len(t.nodes)-1: + if i < len(t.nodes) - 1: self._write(", ") self._write("]") @@ -356,27 +356,27 @@ def _Module(self, t): self._dispatch(t.node) def _Mul(self, t): - self.__binary_op(t, '*') + self.__binary_op(t, "*") def _Name(self, t): self._write(t.name) def _NoneType(self, t): self._write("None") - + def _Not(self, t): - self._write('not (') + self._write("not (") self._dispatch(t.expr) - self._write(')') - + self._write(")") + def _Or(self, t): self._write(" (") for i, node in enumerate(t.nodes): self._dispatch(node) - if i != len(t.nodes)-1: + if i != len(t.nodes) - 1: self._write(") or (") self._write(")") - + def _Pass(self, t): self._write("pass\n") @@ -388,23 +388,25 @@ def _Printnl(self, t): self._write(", ") comma = False for node in t.nodes: - if comma: self._write(', ') - else: comma = True + if comma: + self._write(", ") + else: + comma = True self._dispatch(node) def _Power(self, t): - self.__binary_op(t, '**') + self.__binary_op(t, "**") def _Return(self, t): self._fill("return ") if t.value: if isinstance(t.value, Tuple): - text = ', '.join([ name.name for name in t.value.asList() ]) + text = ", ".join([name.name for name in t.value.asList()]) self._write(text) else: self._dispatch(t.value) if not self._do_indent: - self._write('; ') + self._write("; ") def _Slice(self, t): self._dispatch(t.expr) @@ -414,7 +416,7 @@ def _Slice(self, t): self._write(":") if t.upper: self._dispatch(t.upper) - #if t.step: + # if t.step: # self._write(":") # self._dispatch(t.step) self._write("]") @@ -431,7 +433,7 @@ def _Stmt(self, tree): self._dispatch(node) def _Sub(self, t): - self.__binary_op(t, '-') + self.__binary_op(t, "-") def _Subscript(self, t): self._dispatch(t.expr) @@ -449,15 +451,15 @@ def _TryExcept(self, t): self._leave() for handler in t.handlers: - self._fill('except ') + self._fill("except ") self._dispatch(handler[0]) if handler[1] is not None: - self._write(', ') + self._write(", ") self._dispatch(handler[1]) self._enter() self._dispatch(handler[2]) self._leave() - + if t.else_: self._fill("else") self._enter() @@ -465,7 +467,6 @@ def _TryExcept(self, t): self._leave() def _Tuple(self, t): - if not t.nodes: # Empty tuple. self._write("()") @@ -482,26 +483,26 @@ def _Tuple(self, t): self._dispatch(last_element) self._write(")") - + def _UnaryAdd(self, t): self._write("+") self._dispatch(t.expr) - + def _UnarySub(self, t): self._write("-") - self._dispatch(t.expr) + self._dispatch(t.expr) def _With(self, t): - self._fill('with ') + self._fill("with ") self._dispatch(t.expr) if t.vars: - self._write(' as ') + self._write(" as ") self._dispatch(t.vars.name) self._enter() self._dispatch(t.body) self._leave() - self._write('\n') - + self._write("\n") + def _int(self, t): self._write(repr(t)) @@ -509,27 +510,31 @@ def __binary_op(self, t, symbol): # Check if parenthesis are needed on left side and then dispatch has_paren = False left_class = str(t.left.__class__) - if (left_class in list(op_precedence.keys()) and - op_precedence[left_class] < op_precedence[str(t.__class__)]): + if ( + left_class in list(op_precedence.keys()) + and op_precedence[left_class] < op_precedence[str(t.__class__)] + ): has_paren = True if has_paren: - self._write('(') + self._write("(") self._dispatch(t.left) if has_paren: - self._write(')') + self._write(")") # Write the appropriate symbol for operator self._write(symbol) # Check if parenthesis are needed on the right side and then dispatch has_paren = False right_class = str(t.right.__class__) - if (right_class in list(op_precedence.keys()) and - op_precedence[right_class] < op_precedence[str(t.__class__)]): + if ( + right_class in list(op_precedence.keys()) + and op_precedence[right_class] < op_precedence[str(t.__class__)] + ): has_paren = True if has_paren: - self._write('(') + self._write("(") self._dispatch(t.right) if has_paren: - self._write(')') + self._write(")") def _float(self, t): # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001' @@ -538,7 +543,7 @@ def _float(self, t): def _str(self, t): self._write(repr(t)) - + def _tuple(self, t): self._write(str(t)) @@ -549,6 +554,7 @@ def _tuple(self, t): # modify some of the methods below so that they work for compiler.ast. ######################################################################### + # # stmt # def _Expr(self, tree): # self._fill() @@ -860,6 +866,3 @@ def _tuple(self, t): # self._dispatch(t.args) # self._write(": ") # self._dispatch(t.body) - - - diff --git a/doc/numpydoc/numpydoc/docscrape.py b/doc/numpydoc/numpydoc/docscrape.py index 723335da..bef19a1a 100644 --- a/doc/numpydoc/numpydoc/docscrape.py +++ b/doc/numpydoc/numpydoc/docscrape.py @@ -3,19 +3,18 @@ """ +import collections import inspect -import textwrap -import re import pydoc -from warnings import warn -import collections +import re import sys +import textwrap +from warnings import warn class Reader(object): - """A line-based string reader. + """A line-based string reader.""" - """ def __init__(self, data): """ Parameters @@ -24,10 +23,10 @@ def __init__(self, data): String with lines separated by '\n'. """ - if isinstance(data,list): + if isinstance(data, list): self._str = data else: - self._str = data.split('\n') # store string as list of lines + self._str = data.split("\n") # store string as list of lines self.reset() @@ -35,7 +34,7 @@ def __getitem__(self, n): return self._str[n] def reset(self): - self._l = 0 # current line nr + self._l = 0 # current line nr def read(self): if not self.eof(): @@ -43,10 +42,10 @@ def read(self): self._l += 1 return out else: - return '' + return "" def seek_next_non_empty_line(self): - for l in self[self._l:]: + for l in self[self._l :]: if l.strip(): break else: @@ -59,63 +58,66 @@ def read_to_condition(self, condition_func): start = self._l for line in self[start:]: if condition_func(line): - return self[start:self._l] + return self[start : self._l] self._l += 1 if self.eof(): - return self[start:self._l+1] + return self[start : self._l + 1] return [] def read_to_next_empty_line(self): self.seek_next_non_empty_line() + def is_empty(line): return not line.strip() + return self.read_to_condition(is_empty) def read_to_next_unindented_line(self): def is_unindented(line): - return (line.strip() and (len(line.lstrip()) == len(line))) + return line.strip() and (len(line.lstrip()) == len(line)) + return self.read_to_condition(is_unindented) - def peek(self,n=0): + def peek(self, n=0): if self._l + n < len(self._str): return self[self._l + n] else: - return '' + return "" def is_empty(self): - return not ''.join(self._str).strip() + return not "".join(self._str).strip() class NumpyDocString(object): def __init__(self, docstring, config={}): - docstring = textwrap.dedent(docstring).split('\n') + docstring = textwrap.dedent(docstring).split("\n") self._doc = Reader(docstring) self._parsed_data = { - 'Signature': '', - 'Summary': [''], - 'Extended Summary': [], - 'Parameters': [], - 'Returns': [], - 'Raises': [], - 'Warns': [], - 'Other Parameters': [], - 'Attributes': [], - 'Methods': [], - 'See Also': [], - 'Notes': [], - 'Warnings': [], - 'References': '', - 'Examples': '', - 'index': {} - } + "Signature": "", + "Summary": [""], + "Extended Summary": [], + "Parameters": [], + "Returns": [], + "Raises": [], + "Warns": [], + "Other Parameters": [], + "Attributes": [], + "Methods": [], + "See Also": [], + "Notes": [], + "Warnings": [], + "References": "", + "Examples": "", + "index": {}, + } self._parse() - def __getitem__(self,key): + def __getitem__(self, key): return self._parsed_data[key] - def __setitem__(self,key,val): + def __setitem__(self, key, val): if key not in self._parsed_data: warn("Unknown section %s" % key) else: @@ -129,29 +131,31 @@ def _is_at_section(self): l1 = self._doc.peek().strip() # e.g. Parameters - if l1.startswith('.. index::'): + if l1.startswith(".. index::"): return True - l2 = self._doc.peek(1).strip() # ---------- or ========== - return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) + l2 = self._doc.peek(1).strip() # ---------- or ========== + return l2.startswith("-" * len(l1)) or l2.startswith("=" * len(l1)) - def _strip(self,doc): + def _strip(self, doc): i = 0 j = 0 - for i,line in enumerate(doc): - if line.strip(): break + for i, line in enumerate(doc): + if line.strip(): + break - for j,line in enumerate(doc[::-1]): - if line.strip(): break + for j, line in enumerate(doc[::-1]): + if line.strip(): + break - return doc[i:len(doc)-j] + return doc[i : len(doc) - j] def _read_to_next_section(self): section = self._doc.read_to_next_empty_line() while not self._is_at_section() and not self._doc.eof(): - if not self._doc.peek(-1).strip(): # previous line was empty - section += [''] + if not self._doc.peek(-1).strip(): # previous line was empty + section += [""] section += self._doc.read_to_next_empty_line() @@ -162,33 +166,36 @@ def _read_sections(self): data = self._read_to_next_section() name = data[0].strip() - if name.startswith('..'): # index section + if name.startswith(".."): # index section yield name, data[1:] elif len(data) < 2: yield StopIteration else: yield name, self._strip(data[2:]) - def _parse_param_list(self,content): + def _parse_param_list(self, content): r = Reader(content) params = [] while not r.eof(): header = r.read().strip() - if ' : ' in header: - arg_name, arg_type = header.split(' : ')[:2] + if " : " in header: + arg_name, arg_type = header.split(" : ")[:2] else: - arg_name, arg_type = header, '' + arg_name, arg_type = header, "" desc = r.read_to_next_unindented_line() desc = dedent_lines(desc) - params.append((arg_name,arg_type,desc)) + params.append((arg_name, arg_type, desc)) return params + _name_rgx = re.compile( + r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" + r" (?P[a-zA-Z0-9_.-]+))\s*", + re.X, + ) - _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" - r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) def _parse_see_also(self, content): """ func_name : Descriptive text @@ -221,20 +228,21 @@ def push_item(name, rest): rest = [] for line in content: - if not line.strip(): continue + if not line.strip(): + continue m = self._name_rgx.match(line) - if m and line[m.end():].strip().startswith(':'): + if m and line[m.end() :].strip().startswith(":"): push_item(current_func, rest) - current_func, line = line[:m.end()], line[m.end():] - rest = [line.split(':', 1)[1].strip()] + current_func, line = line[: m.end()], line[m.end() :] + rest = [line.split(":", 1)[1].strip()] if not rest[0]: rest = [] - elif not line.startswith(' '): + elif not line.startswith(" "): push_item(current_func, rest) current_func = None - if ',' in line: - for func in line.split(','): + if "," in line: + for func in line.split(","): if func.strip(): push_item(func, []) elif line.strip(): @@ -250,17 +258,18 @@ def _parse_index(self, section, content): :refguide: something, else, and more """ + def strip_each_in(lst): return [s.strip() for s in lst] out = {} - section = section.split('::') + section = section.split("::") if len(section) > 1: - out['default'] = strip_each_in(section[1].split(','))[0] + out["default"] = strip_each_in(section[1].split(","))[0] for line in content: - line = line.split(':') + line = line.split(":") if len(line) > 2: - out[line[1]] = strip_each_in(line[2].split(',')) + out[line[1]] = strip_each_in(line[2].split(",")) return out def _parse_summary(self): @@ -272,61 +281,68 @@ def _parse_summary(self): while True: summary = self._doc.read_to_next_empty_line() summary_str = " ".join([s.strip() for s in summary]).strip() - if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): - self['Signature'] = summary_str + if re.compile("^([\w., ]+=)?\s*[\w\.]+\(.*\)$").match(summary_str): + self["Signature"] = summary_str if not self._is_at_section(): continue break if summary is not None: - self['Summary'] = summary + self["Summary"] = summary if not self._is_at_section(): - self['Extended Summary'] = self._read_to_next_section() + self["Extended Summary"] = self._read_to_next_section() def _parse(self): self._doc.reset() self._parse_summary() - for (section,content) in self._read_sections(): - if not section.startswith('..'): - section = ' '.join([s.capitalize() for s in section.split(' ')]) - if section in ('Parameters', 'Returns', 'Raises', 'Warns', - 'Other Parameters', 'Attributes', 'Methods'): + for section, content in self._read_sections(): + if not section.startswith(".."): + section = " ".join([s.capitalize() for s in section.split(" ")]) + if section in ( + "Parameters", + "Returns", + "Raises", + "Warns", + "Other Parameters", + "Attributes", + "Methods", + ): self[section] = self._parse_param_list(content) - elif section.startswith('.. index::'): - self['index'] = self._parse_index(section, content) - elif section == 'See Also': - self['See Also'] = self._parse_see_also(content) + elif section.startswith(".. index::"): + self["index"] = self._parse_index(section, content) + elif section == "See Also": + self["See Also"] = self._parse_see_also(content) else: self[section] = content # string conversion routines - def _str_header(self, name, symbol='-'): - return [name, len(name)*symbol] + def _str_header(self, name, symbol="-"): + return [name, len(name) * symbol] def _str_indent(self, doc, indent=4): out = [] for line in doc: - out += [' '*indent + line] + out += [" " * indent + line] return out def _str_signature(self): - if self['Signature']: - return [self['Signature'].replace('*','\*')] + [''] + if self["Signature"]: + return [self["Signature"].replace("*", "\*")] + [""] else: - return [''] + return [""] def _str_summary(self): - if self['Summary']: - return self['Summary'] + [''] + if self["Summary"]: + return self["Summary"] + [""] else: return [] def _str_extended_summary(self): - if self['Extended Summary']: - return self['Extended Summary'] + [''] + if self["Extended Summary"]: + return self["Extended Summary"] + [""] else: return [] @@ -334,13 +350,13 @@ def _str_param_list(self, name): out = [] if self[name]: out += self._str_header(name) - for param,param_type,desc in self[name]: + for param, param_type, desc in self[name]: if param_type: - out += ['%s : %s' % (param, param_type)] + out += ["%s : %s" % (param, param_type)] else: out += [param] out += self._str_indent(desc) - out += [''] + out += [""] return out def _str_section(self, name): @@ -348,89 +364,97 @@ def _str_section(self, name): if self[name]: out += self._str_header(name) out += self[name] - out += [''] + out += [""] return out def _str_see_also(self, func_role): - if not self['See Also']: return [] + if not self["See Also"]: + return [] out = [] out += self._str_header("See Also") last_had_desc = True - for func, desc, role in self['See Also']: + for func, desc, role in self["See Also"]: if role: - link = ':%s:`%s`' % (role, func) + link = ":%s:`%s`" % (role, func) elif func_role: - link = ':%s:`%s`' % (func_role, func) + link = ":%s:`%s`" % (func_role, func) else: link = "`%s`_" % func if desc or last_had_desc: - out += [''] + out += [""] out += [link] else: out[-1] += ", %s" % link if desc: - out += self._str_indent([' '.join(desc)]) + out += self._str_indent([" ".join(desc)]) last_had_desc = True else: last_had_desc = False - out += [''] + out += [""] return out def _str_index(self): - idx = self['index'] + idx = self["index"] out = [] - out += ['.. index:: %s' % idx.get('default','')] + out += [".. index:: %s" % idx.get("default", "")] for section, references in list(idx.items()): - if section == 'default': + if section == "default": continue - out += [' :%s: %s' % (section, ', '.join(references))] + out += [" :%s: %s" % (section, ", ".join(references))] return out - def __str__(self, func_role=''): + def __str__(self, func_role=""): out = [] out += self._str_signature() out += self._str_summary() out += self._str_extended_summary() - for param_list in ('Parameters', 'Returns', 'Other Parameters', - 'Raises', 'Warns'): + for param_list in ( + "Parameters", + "Returns", + "Other Parameters", + "Raises", + "Warns", + ): out += self._str_param_list(param_list) - out += self._str_section('Warnings') + out += self._str_section("Warnings") out += self._str_see_also(func_role) - for s in ('Notes','References','Examples'): + for s in ("Notes", "References", "Examples"): out += self._str_section(s) - for param_list in ('Attributes', 'Methods'): + for param_list in ("Attributes", "Methods"): out += self._str_param_list(param_list) out += self._str_index() - return '\n'.join(out) + return "\n".join(out) -def indent(str,indent=4): - indent_str = ' '*indent +def indent(str, indent=4): + indent_str = " " * indent if str is None: return indent_str - lines = str.split('\n') - return '\n'.join(indent_str + l for l in lines) + lines = str.split("\n") + return "\n".join(indent_str + l for l in lines) + def dedent_lines(lines): """Deindent a list of lines maximally""" return textwrap.dedent("\n".join(lines)).split("\n") -def header(text, style='-'): - return text + '\n' + style*len(text) + '\n' + +def header(text, style="-"): + return text + "\n" + style * len(text) + "\n" class FunctionDoc(NumpyDocString): - def __init__(self, func, role='func', doc=None, config={}): + def __init__(self, func, role="func", doc=None, config={}): self._f = func - self._role = role # e.g. "func" or "meth" + self._role = role # e.g. "func" or "meth" if doc is None: if func is None: raise ValueError("No function or docstring given") - doc = inspect.getdoc(func) or '' + doc = inspect.getdoc(func) or "" NumpyDocString.__init__(self, doc) - if not self['Signature'] and func is not None: + if not self["Signature"] and func is not None: func, func_name = self.get_func() try: # try to read signature @@ -439,54 +463,49 @@ def __init__(self, func, role='func', doc=None, config={}): else: argspec = inspect.getargspec(func) argspec = inspect.formatargspec(*argspec) - argspec = argspec.replace('*','\*') - signature = '%s%s' % (func_name, argspec) + argspec = argspec.replace("*", "\*") + signature = "%s%s" % (func_name, argspec) except TypeError as e: - signature = '%s()' % func_name - self['Signature'] = signature + signature = "%s()" % func_name + self["Signature"] = signature def get_func(self): - func_name = getattr(self._f, '__name__', self.__class__.__name__) + func_name = getattr(self._f, "__name__", self.__class__.__name__) if inspect.isclass(self._f): - func = getattr(self._f, '__call__', self._f.__init__) + func = getattr(self._f, "__call__", self._f.__init__) else: func = self._f return func, func_name def __str__(self): - out = '' + out = "" func, func_name = self.get_func() - signature = self['Signature'].replace('*', '\*') + signature = self["Signature"].replace("*", "\*") - roles = {'func': 'function', - 'meth': 'method'} + roles = {"func": "function", "meth": "method"} if self._role: if self._role not in roles: print("Warning: invalid role %s" % self._role) - out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), - func_name) + out += ".. %s:: %s\n \n\n" % (roles.get(self._role, ""), func_name) out += super(FunctionDoc, self).__str__(func_role=self._role) return out class ClassDoc(NumpyDocString): + extra_public_methods = ["__call__"] - extra_public_methods = ['__call__'] - - def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, - config={}): + def __init__(self, cls, doc=None, modulename="", func_doc=FunctionDoc, config={}): if not inspect.isclass(cls) and cls is not None: raise ValueError("Expected a class or None, but got %r" % cls) self._cls = cls - self.show_inherited_members = config.get('show_inherited_class_members', - True) + self.show_inherited_members = config.get("show_inherited_class_members", True) - if modulename and not modulename.endswith('.'): - modulename += '.' + if modulename and not modulename.endswith("."): + modulename += "." self._mod = modulename if doc is None: @@ -496,21 +515,24 @@ def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, NumpyDocString.__init__(self, doc) - if config.get('show_class_members', True): + if config.get("show_class_members", True): + def splitlines_x(s): if not s: return [] else: return s.splitlines() - for field, items in [('Methods', self.methods), - ('Attributes', self.properties)]: + for field, items in [ + ("Methods", self.methods), + ("Attributes", self.properties), + ]: if not self[field]: doc_list = [] for name in sorted(items): try: doc_item = pydoc.getdoc(getattr(self._cls, name)) - doc_list.append((name, '', splitlines_x(doc_item))) + doc_list.append((name, "", splitlines_x(doc_item))) except AttributeError: pass # method doesn't exist self[field] = doc_list @@ -519,21 +541,33 @@ def splitlines_x(s): def methods(self): if self._cls is None: return [] - return [name for name, func in inspect.getmembers(self._cls) - if ((not name.startswith('_') - or name in self.extra_public_methods) - and isinstance(func, collections.Callable) - and self._is_show_member(name))] + return [ + name + for name, func in inspect.getmembers(self._cls) + if ( + (not name.startswith("_") or name in self.extra_public_methods) + and isinstance(func, collections.Callable) + and self._is_show_member(name) + ) + ] @property def properties(self): if self._cls is None: return [] - return [name for name, func in inspect.getmembers(self._cls) - if (not name.startswith('_') and - (func is None or isinstance(func, property) or - inspect.isgetsetdescriptor(func)) - and self._is_show_member(name))] + return [ + name + for name, func in inspect.getmembers(self._cls) + if ( + not name.startswith("_") + and ( + func is None + or isinstance(func, property) + or inspect.isgetsetdescriptor(func) + ) + and self._is_show_member(name) + ) + ] def _is_show_member(self, name): if self.show_inherited_members: diff --git a/doc/numpydoc/numpydoc/docscrape_sphinx.py b/doc/numpydoc/numpydoc/docscrape_sphinx.py index bb357df0..5e42397b 100644 --- a/doc/numpydoc/numpydoc/docscrape_sphinx.py +++ b/doc/numpydoc/numpydoc/docscrape_sphinx.py @@ -1,14 +1,18 @@ +import collections +import inspect +import pydoc +import re +import sys +import textwrap - -import sys, re, inspect, textwrap, pydoc import sphinx -import collections -from .docscrape import NumpyDocString, FunctionDoc, ClassDoc + +from .docscrape import ClassDoc, FunctionDoc, NumpyDocString if sys.version_info[0] >= 3: sixu = lambda s: s else: - sixu = lambda s: str(s, 'unicode_escape') + sixu = lambda s: str(s, "unicode_escape") class SphinxDocString(NumpyDocString): @@ -17,74 +21,76 @@ def __init__(self, docstring, config={}): self.load_config(config) def load_config(self, config): - self.use_plots = config.get('use_plots', False) - self.class_members_toctree = config.get('class_members_toctree', True) + self.use_plots = config.get("use_plots", False) + self.class_members_toctree = config.get("class_members_toctree", True) # string conversion routines - def _str_header(self, name, symbol='`'): - return ['.. rubric:: ' + name, ''] + def _str_header(self, name, symbol="`"): + return [".. rubric:: " + name, ""] def _str_field_list(self, name): - return [':' + name + ':'] + return [":" + name + ":"] def _str_indent(self, doc, indent=4): out = [] for line in doc: - out += [' '*indent + line] + out += [" " * indent + line] return out def _str_signature(self): - return [''] - if self['Signature']: - return ['``%s``' % self['Signature']] + [''] + return [""] + if self["Signature"]: + return ["``%s``" % self["Signature"]] + [""] else: - return [''] + return [""] def _str_summary(self): - return self['Summary'] + [''] + return self["Summary"] + [""] def _str_extended_summary(self): - return self['Extended Summary'] + [''] + return self["Extended Summary"] + [""] def _str_returns(self): out = [] - if self['Returns']: - out += self._str_field_list('Returns') - out += [''] - for param, param_type, desc in self['Returns']: + if self["Returns"]: + out += self._str_field_list("Returns") + out += [""] + for param, param_type, desc in self["Returns"]: if param_type: - out += self._str_indent(['**%s** : %s' % (param.strip(), - param_type)]) + out += self._str_indent( + ["**%s** : %s" % (param.strip(), param_type)] + ) else: out += self._str_indent([param.strip()]) if desc: - out += [''] + out += [""] out += self._str_indent(desc, 8) - out += [''] + out += [""] return out def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) - out += [''] + out += [""] for param, param_type, desc in self[name]: if param_type: - out += self._str_indent(['**%s** : %s' % (param.strip(), - param_type)]) + out += self._str_indent( + ["**%s** : %s" % (param.strip(), param_type)] + ) else: - out += self._str_indent(['**%s**' % param.strip()]) + out += self._str_indent(["**%s**" % param.strip()]) if desc: - out += [''] + out += [""] out += self._str_indent(desc, 8) - out += [''] + out += [""] return out @property def _obj(self): - if hasattr(self, '_cls'): + if hasattr(self, "_cls"): return self._cls - elif hasattr(self, '_f'): + elif hasattr(self, "_f"): return self._f return None @@ -96,11 +102,11 @@ def _str_member_list(self, name): """ out = [] if self[name]: - out += ['.. rubric:: %s' % name, ''] - prefix = getattr(self, '_name', '') + out += [".. rubric:: %s" % name, ""] + prefix = getattr(self, "_name", "") if prefix: - prefix = '~%s.' % prefix + prefix = "~%s." % prefix autosum = [] others = [] @@ -109,9 +115,11 @@ def _str_member_list(self, name): # Check if the referenced member can have a docstring or not param_obj = getattr(self._obj, param, None) - if not (isinstance(param_obj, collections.Callable) - or isinstance(param_obj, property) - or inspect.isgetsetdescriptor(param_obj)): + if not ( + isinstance(param_obj, collections.Callable) + or isinstance(param_obj, property) + or inspect.isgetsetdescriptor(param_obj) + ): param_obj = None if param_obj and (pydoc.getdoc(param_obj) or not desc): @@ -121,152 +129,158 @@ def _str_member_list(self, name): others.append((param, param_type, desc)) if autosum: - out += ['.. autosummary::'] + out += [".. autosummary::"] if self.class_members_toctree: - out += [' :toctree:'] - out += [''] + autosum + out += [" :toctree:"] + out += [""] + autosum if others: maxlen_0 = max(3, max([len(x[0]) for x in others])) - hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10 - fmt = sixu('%%%ds %%s ') % (maxlen_0,) - out += ['', hdr] + hdr = sixu("=") * maxlen_0 + sixu(" ") + sixu("=") * 10 + fmt = sixu("%%%ds %%s ") % (maxlen_0,) + out += ["", hdr] for param, param_type, desc in others: desc = sixu(" ").join(x.strip() for x in desc).strip() if param_type: desc = "(%s) %s" % (param_type, desc) out += [fmt % (param.strip(), desc)] out += [hdr] - out += [''] + out += [""] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) - out += [''] + out += [""] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content - out += [''] + out += [""] return out def _str_see_also(self, func_role): out = [] - if self['See Also']: + if self["See Also"]: see_also = super(SphinxDocString, self)._str_see_also(func_role) - out = ['.. seealso::', ''] + out = [".. seealso::", ""] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] - if self['Warnings']: - out = ['.. warning::', ''] - out += self._str_indent(self['Warnings']) + if self["Warnings"]: + out = [".. warning::", ""] + out += self._str_indent(self["Warnings"]) return out def _str_index(self): - idx = self['index'] + idx = self["index"] out = [] if len(idx) == 0: return out - out += ['.. index:: %s' % idx.get('default','')] + out += [".. index:: %s" % idx.get("default", "")] for section, references in list(idx.items()): - if section == 'default': + if section == "default": continue - elif section == 'refguide': - out += [' single: %s' % (', '.join(references))] + elif section == "refguide": + out += [" single: %s" % (", ".join(references))] else: - out += [' %s: %s' % (section, ','.join(references))] + out += [" %s: %s" % (section, ",".join(references))] return out def _str_references(self): out = [] - if self['References']: - out += self._str_header('References') - if isinstance(self['References'], str): - self['References'] = [self['References']] - out.extend(self['References']) - out += [''] + if self["References"]: + out += self._str_header("References") + if isinstance(self["References"], str): + self["References"] = [self["References"]] + out.extend(self["References"]) + out += [""] # Latex collects all references to a separate bibliography, # so we need to insert links to it if sphinx.__version__ >= "0.6": - out += ['.. only:: latex',''] + out += [".. only:: latex", ""] else: - out += ['.. latexonly::',''] + out += [".. latexonly::", ""] items = [] - for line in self['References']: - m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) + for line in self["References"]: + m = re.match(r".. \[([a-z0-9._-]+)\]", line, re.I) if m: items.append(m.group(1)) - out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] + out += [" " + ", ".join(["[%s]_" % item for item in items]), ""] return out def _str_examples(self): - examples_str = "\n".join(self['Examples']) + examples_str = "\n".join(self["Examples"]) - if (self.use_plots and 'import matplotlib' in examples_str - and 'plot::' not in examples_str): + if ( + self.use_plots + and "import matplotlib" in examples_str + and "plot::" not in examples_str + ): out = [] - out += self._str_header('Examples') - out += ['.. plot::', ''] - out += self._str_indent(self['Examples']) - out += [''] + out += self._str_header("Examples") + out += [".. plot::", ""] + out += self._str_indent(self["Examples"]) + out += [""] return out else: - return self._str_section('Examples') + return self._str_section("Examples") def __str__(self, indent=0, func_role="obj"): out = [] out += self._str_signature() - out += self._str_index() + [''] + out += self._str_index() + [""] out += self._str_summary() out += self._str_extended_summary() - out += self._str_param_list('Parameters') + out += self._str_param_list("Parameters") out += self._str_returns() - for param_list in ('Other Parameters', 'Raises', 'Warns'): + for param_list in ("Other Parameters", "Raises", "Warns"): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) - out += self._str_section('Notes') + out += self._str_section("Notes") out += self._str_references() out += self._str_examples() - for param_list in ('Attributes', 'Methods'): + for param_list in ("Attributes", "Methods"): out += self._str_member_list(param_list) - out = self._str_indent(out,indent) - return '\n'.join(out) + out = self._str_indent(out, indent) + return "\n".join(out) + class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): self.load_config(config) FunctionDoc.__init__(self, obj, doc=doc, config=config) + class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): self.load_config(config) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) + class SphinxObjDoc(SphinxDocString): def __init__(self, obj, doc=None, config={}): self._f = obj self.load_config(config) SphinxDocString.__init__(self, doc, config=config) + def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): - what = 'class' + what = "class" elif inspect.ismodule(obj): - what = 'module' + what = "module" elif isinstance(obj, collections.Callable): - what = 'function' + what = "function" else: - what = 'object' - if what == 'class': - return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, - config=config) - elif what in ('function', 'method'): + what = "object" + if what == "class": + return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) + elif what in ("function", "method"): return SphinxFunctionDoc(obj, doc=doc, config=config) else: if doc is None: diff --git a/doc/numpydoc/numpydoc/linkcode.py b/doc/numpydoc/numpydoc/linkcode.py index cba43463..5704a7f3 100644 --- a/doc/numpydoc/numpydoc/linkcode.py +++ b/doc/numpydoc/numpydoc/linkcode.py @@ -11,41 +11,44 @@ """ -import warnings import collections +import warnings -warnings.warn("This extension has been accepted to Sphinx upstream. " - "Use the version from there (Sphinx >= 1.2) " - "https://bitbucket.org/birkenfeld/sphinx/pull-request/47/sphinxextlinkcode", - FutureWarning, stacklevel=1) +warnings.warn( + "This extension has been accepted to Sphinx upstream. " + "Use the version from there (Sphinx >= 1.2) " + "https://bitbucket.org/birkenfeld/sphinx/pull-request/47/sphinxextlinkcode", + FutureWarning, + stacklevel=1, +) from docutils import nodes - from sphinx import addnodes -from sphinx.locale import _ from sphinx.errors import SphinxError +from sphinx.locale import _ + class LinkcodeError(SphinxError): category = "linkcode error" + def doctree_read(app, doctree): env = app.builder.env - resolve_target = getattr(env.config, 'linkcode_resolve', None) + resolve_target = getattr(env.config, "linkcode_resolve", None) if not isinstance(env.config.linkcode_resolve, collections.Callable): - raise LinkcodeError( - "Function `linkcode_resolve` is not given in conf.py") + raise LinkcodeError("Function `linkcode_resolve` is not given in conf.py") domain_keys = dict( - py=['module', 'fullname'], - c=['names'], - cpp=['names'], - js=['object', 'fullname'], + py=["module", "fullname"], + c=["names"], + cpp=["names"], + js=["object", "fullname"], ) for objnode in doctree.traverse(addnodes.desc): - domain = objnode.get('domain') + domain = objnode.get("domain") uris = set() for signode in objnode: if not isinstance(signode, addnodes.desc_signature): @@ -56,7 +59,7 @@ def doctree_read(app, doctree): for key in domain_keys.get(domain, []): value = signode.get(key) if not value: - value = '' + value = "" info[key] = value if not info: continue @@ -72,12 +75,12 @@ def doctree_read(app, doctree): continue uris.add(uri) - onlynode = addnodes.only(expr='html') - onlynode += nodes.reference('', '', internal=False, refuri=uri) - onlynode[0] += nodes.inline('', _('[source]'), - classes=['viewcode-link']) + onlynode = addnodes.only(expr="html") + onlynode += nodes.reference("", "", internal=False, refuri=uri) + onlynode[0] += nodes.inline("", _("[source]"), classes=["viewcode-link"]) signode += onlynode + def setup(app): - app.connect('doctree-read', doctree_read) - app.add_config_value('linkcode_resolve', None, '') + app.connect("doctree-read", doctree_read) + app.add_config_value("linkcode_resolve", None, "") diff --git a/doc/numpydoc/numpydoc/numpydoc.py b/doc/numpydoc/numpydoc/numpydoc.py index 2846ed29..3bab844c 100644 --- a/doc/numpydoc/numpydoc/numpydoc.py +++ b/doc/numpydoc/numpydoc/numpydoc.py @@ -17,28 +17,27 @@ """ -import sys -import re +import collections +import inspect import pydoc +import re +import sys + import sphinx -import inspect -import collections -if sphinx.__version__ < '1.0.1': +if sphinx.__version__ < "1.0.1": raise RuntimeError("Sphinx 1.0.1 or newer is required") -from .docscrape_sphinx import get_doc_object, SphinxDocString -from sphinx.util.compat import Directive + +from .docscrape_sphinx import SphinxDocString, get_doc_object if sys.version_info[0] >= 3: sixu = lambda s: s else: - sixu = lambda s: str(s, 'unicode_escape') + sixu = lambda s: str(s, "unicode_escape") -def mangle_docstrings(app, what, name, obj, options, lines, - reference_offset=[0]): - +def mangle_docstrings(app, what, name, obj, options, lines, reference_offset=[0]): cfg = dict( use_plots=app.config.numpydoc_use_plots, show_class_members=app.config.numpydoc_show_class_members, @@ -46,11 +45,12 @@ def mangle_docstrings(app, what, name, obj, options, lines, class_members_toctree=app.config.numpydoc_class_members_toctree, ) - if what == 'module': + if what == "module": # Strip top title - title_re = re.compile(sixu('^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'), - re.I|re.S) - lines[:] = title_re.sub(sixu(''), sixu("\n").join(lines)).split(sixu("\n")) + title_re = re.compile( + sixu("^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*"), re.I | re.S + ) + lines[:] = title_re.sub(sixu(""), sixu("\n").join(lines)).split(sixu("\n")) else: doc = get_doc_object(obj, what, sixu("\n").join(lines), config=cfg) if sys.version_info[0] >= 3: @@ -59,21 +59,21 @@ def mangle_docstrings(app, what, name, obj, options, lines, doc = str(doc) lines[:] = doc.split(sixu("\n")) - if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ - obj.__name__: - if hasattr(obj, '__module__'): + if app.config.numpydoc_edit_link and hasattr(obj, "__name__") and obj.__name__: + if hasattr(obj, "__module__"): v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__)) else: v = dict(full_name=obj.__name__) - lines += [sixu(''), sixu('.. htmlonly::'), sixu('')] - lines += [sixu(' %s') % x for x in - (app.config.numpydoc_edit_link % v).split("\n")] + lines += [sixu(""), sixu(".. htmlonly::"), sixu("")] + lines += [ + sixu(" %s") % x for x in (app.config.numpydoc_edit_link % v).split("\n") + ] # replace reference numbers so that there are no duplicates references = [] for line in lines: line = line.strip() - m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I) + m = re.match(sixu("^.. \\[([a-z0-9_.-])\\]"), line, re.I) if m: references.append(m.group(1)) @@ -82,59 +82,68 @@ def mangle_docstrings(app, what, name, obj, options, lines, if references: for i, line in enumerate(lines): for r in references: - if re.match(sixu('^\\d+$'), r): + if re.match(sixu("^\\d+$"), r): new_r = sixu("R%d") % (reference_offset[0] + int(r)) else: new_r = sixu("%s%d") % (r, reference_offset[0]) - lines[i] = lines[i].replace(sixu('[%s]_') % r, - sixu('[%s]_') % new_r) - lines[i] = lines[i].replace(sixu('.. [%s]') % r, - sixu('.. [%s]') % new_r) + lines[i] = lines[i].replace(sixu("[%s]_") % r, sixu("[%s]_") % new_r) + lines[i] = lines[i].replace( + sixu(".. [%s]") % r, sixu(".. [%s]") % new_r + ) reference_offset[0] += len(references) + def mangle_signature(app, what, name, obj, options, sig, retann): # Do not try to inspect classes that don't define `__init__` - if (inspect.isclass(obj) and - (not hasattr(obj, '__init__') or - 'initializes x; see ' in pydoc.getdoc(obj.__init__))): - return '', '' - - if not (isinstance(obj, collections.Callable) or hasattr(obj, '__argspec_is_invalid_')): return - if not hasattr(obj, '__doc__'): return + if inspect.isclass(obj) and ( + not hasattr(obj, "__init__") + or "initializes x; see " in pydoc.getdoc(obj.__init__) + ): + return "", "" + + if not ( + isinstance(obj, collections.Callable) or hasattr(obj, "__argspec_is_invalid_") + ): + return + if not hasattr(obj, "__doc__"): + return doc = SphinxDocString(pydoc.getdoc(obj)) - if doc['Signature']: - sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature']) - return sig, sixu('') + if doc["Signature"]: + sig = re.sub(sixu("^[^(]*"), sixu(""), doc["Signature"]) + return sig, sixu("") + def setup(app, get_doc_object_=get_doc_object): - if not hasattr(app, 'add_config_value'): - return # probably called by nose, better bail out + if not hasattr(app, "add_config_value"): + return # probably called by nose, better bail out global get_doc_object get_doc_object = get_doc_object_ - app.connect('autodoc-process-docstring', mangle_docstrings) - app.connect('autodoc-process-signature', mangle_signature) - app.add_config_value('numpydoc_edit_link', None, False) - app.add_config_value('numpydoc_use_plots', None, False) - app.add_config_value('numpydoc_show_class_members', True, True) - app.add_config_value('numpydoc_show_inherited_class_members', True, True) - app.add_config_value('numpydoc_class_members_toctree', True, True) + app.connect("autodoc-process-docstring", mangle_docstrings) + app.connect("autodoc-process-signature", mangle_signature) + app.add_config_value("numpydoc_edit_link", None, False) + app.add_config_value("numpydoc_use_plots", None, False) + app.add_config_value("numpydoc_show_class_members", True, True) + app.add_config_value("numpydoc_show_inherited_class_members", True, True) + app.add_config_value("numpydoc_class_members_toctree", True, True) # Extra mangling domains app.add_domain(NumpyPythonDomain) app.add_domain(NumpyCDomain) -#------------------------------------------------------------------------------ + +# ------------------------------------------------------------------------------ # Docstring-mangling domains -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ from docutils.statemachine import ViewList from sphinx.domains.c import CDomain from sphinx.domains.python import PythonDomain + class ManglingDomainBase(object): directive_mangling_map = {} @@ -145,31 +154,35 @@ def __init__(self, *a, **kw): def wrap_mangling_directives(self): for name, objtype in list(self.directive_mangling_map.items()): self.directives[name] = wrap_mangling_directive( - self.directives[name], objtype) + self.directives[name], objtype + ) + class NumpyPythonDomain(ManglingDomainBase, PythonDomain): - name = 'np' + name = "np" directive_mangling_map = { - 'function': 'function', - 'class': 'class', - 'exception': 'class', - 'method': 'function', - 'classmethod': 'function', - 'staticmethod': 'function', - 'attribute': 'attribute', + "function": "function", + "class": "class", + "exception": "class", + "method": "function", + "classmethod": "function", + "staticmethod": "function", + "attribute": "attribute", } indices = [] + class NumpyCDomain(ManglingDomainBase, CDomain): - name = 'np-c' + name = "np-c" directive_mangling_map = { - 'function': 'function', - 'member': 'attribute', - 'macro': 'function', - 'type': 'class', - 'var': 'object', + "function": "function", + "member": "attribute", + "macro": "function", + "type": "class", + "var": "object", } + def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): @@ -177,7 +190,7 @@ def run(self): name = None if self.arguments: - m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) + m = re.match(r"^(.*\s+)?(.*?)(\(.*)?", self.arguments[0]) name = m.group(2).strip() if not name: diff --git a/doc/numpydoc/numpydoc/phantom_import.py b/doc/numpydoc/numpydoc/phantom_import.py index aea18fdc..d1060f06 100644 --- a/doc/numpydoc/numpydoc/phantom_import.py +++ b/doc/numpydoc/numpydoc/phantom_import.py @@ -16,21 +16,28 @@ """ -import imp, sys, compiler, types, os, inspect, re +import imp +import inspect +import os +import re +import sys + def setup(app): - app.connect('builder-inited', initialize) - app.add_config_value('phantom_import_file', None, True) + app.connect("builder-inited", initialize) + app.add_config_value("phantom_import_file", None, True) + def initialize(app): fn = app.config.phantom_import_file - if (fn and os.path.isfile(fn)): + if fn and os.path.isfile(fn): print("[numpydoc] Phantom importing modules from", fn, "...") import_phantom_module(fn) -#------------------------------------------------------------------------------ + +# ------------------------------------------------------------------------------ # Creating 'phantom' modules from an XML description -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ def import_phantom_module(xml_file): """ Insert a fake Python module to sys.modules, based on a XML file. @@ -48,7 +55,7 @@ def import_phantom_module(xml_file): ---------- xml_file : str Name of an XML file to read - + """ import lxml.etree as etree @@ -60,74 +67,81 @@ def import_phantom_module(xml_file): # Sort items so that # - Base classes come before classes inherited from them # - Modules come before their contents - all_nodes = dict([(n.attrib['id'], n) for n in root]) - + all_nodes = dict([(n.attrib["id"], n) for n in root]) + def _get_bases(node, recurse=False): - bases = [x.attrib['ref'] for x in node.findall('base')] + bases = [x.attrib["ref"] for x in node.findall("base")] if recurse: j = 0 while True: try: b = bases[j] - except IndexError: break + except IndexError: + break if b in all_nodes: bases.extend(_get_bases(all_nodes[b])) j += 1 return bases - type_index = ['module', 'class', 'callable', 'object'] - + type_index = ["module", "class", "callable", "object"] + def base_cmp(a, b): x = cmp(type_index.index(a.tag), type_index.index(b.tag)) - if x != 0: return x + if x != 0: + return x - if a.tag == 'class' and b.tag == 'class': + if a.tag == "class" and b.tag == "class": a_bases = _get_bases(a, recurse=True) b_bases = _get_bases(b, recurse=True) x = cmp(len(a_bases), len(b_bases)) - if x != 0: return x - if a.attrib['id'] in b_bases: return -1 - if b.attrib['id'] in a_bases: return 1 - - return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.')) + if x != 0: + return x + if a.attrib["id"] in b_bases: + return -1 + if b.attrib["id"] in a_bases: + return 1 + + return cmp(a.attrib["id"].count("."), b.attrib["id"].count(".")) nodes = root.getchildren() nodes.sort(base_cmp) # Create phantom items for node in nodes: - name = node.attrib['id'] - doc = (node.text or '').decode('string-escape') + "\n" - if doc == "\n": doc = "" + name = node.attrib["id"] + doc = (node.text or "").decode("string-escape") + "\n" + if doc == "\n": + doc = "" # create parent, if missing parent = name while True: - parent = '.'.join(parent.split('.')[:-1]) - if not parent: break - if parent in object_cache: break + parent = ".".join(parent.split(".")[:-1]) + if not parent: + break + if parent in object_cache: + break obj = imp.new_module(parent) object_cache[parent] = obj sys.modules[parent] = obj # create object - if node.tag == 'module': + if node.tag == "module": obj = imp.new_module(name) obj.__doc__ = doc sys.modules[name] = obj - elif node.tag == 'class': - bases = [object_cache[b] for b in _get_bases(node) - if b in object_cache] + elif node.tag == "class": + bases = [object_cache[b] for b in _get_bases(node) if b in object_cache] bases.append(object) init = lambda self: None init.__doc__ = doc - obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init}) - obj.__name__ = name.split('.')[-1] - elif node.tag == 'callable': - funcname = node.attrib['id'].split('.')[-1] - argspec = node.attrib.get('argspec') + obj = type(name, tuple(bases), {"__doc__": doc, "__init__": init}) + obj.__name__ = name.split(".")[-1] + elif node.tag == "callable": + funcname = node.attrib["id"].split(".")[-1] + argspec = node.attrib.get("argspec") if argspec: - argspec = re.sub('^[^(]*', '', argspec) + argspec = re.sub("^[^(]*", "", argspec) doc = "%s%s\n\n%s" % (funcname, argspec, doc) obj = lambda: 0 obj.__argspec_is_invalid_ = True @@ -140,7 +154,10 @@ def base_cmp(a, b): if inspect.isclass(object_cache[parent]): obj.__objclass__ = object_cache[parent] else: - class Dummy(object): pass + + class Dummy(object): + pass + obj = Dummy() obj.__name__ = name obj.__doc__ = doc @@ -151,17 +168,18 @@ class Dummy(object): pass if parent: if inspect.ismodule(object_cache[parent]): obj.__module__ = parent - setattr(object_cache[parent], name.split('.')[-1], obj) + setattr(object_cache[parent], name.split(".")[-1], obj) # Populate items for node in root: - obj = object_cache.get(node.attrib['id']) - if obj is None: continue - for ref in node.findall('ref'): - if node.tag == 'class': - if ref.attrib['ref'].startswith(node.attrib['id'] + '.'): - setattr(obj, ref.attrib['name'], - object_cache.get(ref.attrib['ref'])) + obj = object_cache.get(node.attrib["id"]) + if obj is None: + continue + for ref in node.findall("ref"): + if node.tag == "class": + if ref.attrib["ref"].startswith(node.attrib["id"] + "."): + setattr( + obj, ref.attrib["name"], object_cache.get(ref.attrib["ref"]) + ) else: - setattr(obj, ref.attrib['name'], - object_cache.get(ref.attrib['ref'])) + setattr(obj, ref.attrib["name"], object_cache.get(ref.attrib["ref"])) diff --git a/doc/numpydoc/numpydoc/plot_directive.py b/doc/numpydoc/numpydoc/plot_directive.py index 77589a64..675f1de2 100644 --- a/doc/numpydoc/numpydoc/plot_directive.py +++ b/doc/numpydoc/numpydoc/plot_directive.py @@ -76,8 +76,13 @@ """ -import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback -import sphinx +import os +import re +import shutil +import sys +import textwrap +import traceback +import warnings if sys.version_info[0] >= 3: from io import StringIO @@ -85,86 +90,113 @@ from io import StringIO import warnings -warnings.warn("A plot_directive module is also available under " - "matplotlib.sphinxext; expect this numpydoc.plot_directive " - "module to be deprecated after relevant features have been " - "integrated there.", - FutureWarning, stacklevel=2) + +warnings.warn( + "A plot_directive module is also available under " + "matplotlib.sphinxext; expect this numpydoc.plot_directive " + "module to be deprecated after relevant features have been " + "integrated there.", + FutureWarning, + stacklevel=2, +) -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ # Registration hook -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ + def setup(app): setup.app = app setup.config = app.config setup.confdir = app.confdir - app.add_config_value('plot_pre_code', '', True) - app.add_config_value('plot_include_source', False, True) - app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True) - app.add_config_value('plot_basedir', None, True) - app.add_config_value('plot_html_show_formats', True, True) + app.add_config_value("plot_pre_code", "", True) + app.add_config_value("plot_include_source", False, True) + app.add_config_value("plot_formats", ["png", "hires.png", "pdf"], True) + app.add_config_value("plot_basedir", None, True) + app.add_config_value("plot_html_show_formats", True, True) + + app.add_directive( + "plot", plot_directive, True, (0, 1, False), **plot_directive_options + ) - app.add_directive('plot', plot_directive, True, (0, 1, False), - **plot_directive_options) -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ # plot:: directive -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ from docutils.parsers.rst import directives -from docutils import nodes -def plot_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): + +def plot_directive( + name, + arguments, + options, + content, + lineno, + content_offset, + block_text, + state, + state_machine, +): return run(arguments, content, options, state_machine, state, lineno) + + plot_directive.__doc__ = __doc__ + def _option_boolean(arg): if not arg or not arg.strip(): # no argument given, assume used as a flag return True - elif arg.strip().lower() in ('no', '0', 'false'): + elif arg.strip().lower() in ("no", "0", "false"): return False - elif arg.strip().lower() in ('yes', '1', 'true'): + elif arg.strip().lower() in ("yes", "1", "true"): return True else: raise ValueError('"%s" unknown boolean' % arg) + def _option_format(arg): - return directives.choice(arg, ('python', 'lisp')) + return directives.choice(arg, ("python", "lisp")) + def _option_align(arg): - return directives.choice(arg, ("top", "middle", "bottom", "left", "center", - "right")) - -plot_directive_options = {'alt': directives.unchanged, - 'height': directives.length_or_unitless, - 'width': directives.length_or_percentage_or_unitless, - 'scale': directives.nonnegative_int, - 'align': _option_align, - 'class': directives.class_option, - 'include-source': _option_boolean, - 'format': _option_format, - } - -#------------------------------------------------------------------------------ + return directives.choice( + arg, ("top", "middle", "bottom", "left", "center", "right") + ) + + +plot_directive_options = { + "alt": directives.unchanged, + "height": directives.length_or_unitless, + "width": directives.length_or_percentage_or_unitless, + "scale": directives.nonnegative_int, + "align": _option_align, + "class": directives.class_option, + "include-source": _option_boolean, + "format": _option_format, +} + +# ------------------------------------------------------------------------------ # Generating output -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ -from docutils import nodes, utils try: # Sphinx depends on either Jinja or Jinja2 import jinja2 + def format_template(template, **kw): return jinja2.Template(template).render(**kw) + except ImportError: import jinja + def format_template(template, **kw): return jinja.from_string(template, **kw) + TEMPLATE = """ {{ source_code }} @@ -210,6 +242,7 @@ def format_template(template, **kw): """ + class ImageFile(object): def __init__(self, basename, dirname): self.basename = basename @@ -222,6 +255,7 @@ def filename(self, format): def filenames(self): return [self.filename(fmt) for fmt in self.formats] + def run(arguments, content, options, state_machine, state, lineno): if arguments and content: raise RuntimeError("plot:: directive can't have both args and content") @@ -229,42 +263,42 @@ def run(arguments, content, options, state_machine, state, lineno): document = state_machine.document config = document.settings.env.config - options.setdefault('include-source', config.plot_include_source) + options.setdefault("include-source", config.plot_include_source) # determine input - rst_file = document.attributes['source'] + rst_file = document.attributes["source"] rst_dir = os.path.dirname(rst_file) if arguments: if not config.plot_basedir: - source_file_name = os.path.join(rst_dir, - directives.uri(arguments[0])) + source_file_name = os.path.join(rst_dir, directives.uri(arguments[0])) else: - source_file_name = os.path.join(setup.confdir, config.plot_basedir, - directives.uri(arguments[0])) - code = open(source_file_name, 'r').read() + source_file_name = os.path.join( + setup.confdir, config.plot_basedir, directives.uri(arguments[0]) + ) + code = open(source_file_name, "r").read() output_base = os.path.basename(source_file_name) else: source_file_name = rst_file code = textwrap.dedent("\n".join(map(str, content))) - counter = document.attributes.get('_plot_counter', 0) + 1 - document.attributes['_plot_counter'] = counter + counter = document.attributes.get("_plot_counter", 0) + 1 + document.attributes["_plot_counter"] = counter base, ext = os.path.splitext(os.path.basename(source_file_name)) - output_base = '%s-%d.py' % (base, counter) + output_base = "%s-%d.py" % (base, counter) base, source_ext = os.path.splitext(output_base) - if source_ext in ('.py', '.rst', '.txt'): + if source_ext in (".py", ".rst", ".txt"): output_base = base else: - source_ext = '' + source_ext = "" # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames - output_base = output_base.replace('.', '-') + output_base = output_base.replace(".", "-") # is it in doctest format? is_doctest = contains_doctest(code) - if 'format' in options: - if options['format'] == 'python': + if "format" in options: + if options["format"] == "python": is_doctest = False else: is_doctest = True @@ -276,52 +310,53 @@ def run(arguments, content, options, state_machine, state, lineno): source_rel_dir = source_rel_dir[1:] # build_dir: where to place output files (temporarily) - build_dir = os.path.join(os.path.dirname(setup.app.doctreedir), - 'plot_directive', - source_rel_dir) + build_dir = os.path.join( + os.path.dirname(setup.app.doctreedir), "plot_directive", source_rel_dir + ) if not os.path.exists(build_dir): os.makedirs(build_dir) # output_dir: final location in the builder's directory - dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, - source_rel_dir)) + dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, source_rel_dir)) # how to link to files from the RST file - dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir), - source_rel_dir).replace(os.path.sep, '/') - build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') - source_link = dest_dir_link + '/' + output_base + source_ext + dest_dir_link = os.path.join( + relpath(setup.confdir, rst_dir), source_rel_dir + ).replace(os.path.sep, "/") + build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, "/") + source_link = dest_dir_link + "/" + output_base + source_ext # make figures try: - results = makefig(code, source_file_name, build_dir, output_base, - config) + results = makefig(code, source_file_name, build_dir, output_base, config) errors = [] except PlotError as err: reporter = state.memo.reporter sm = reporter.system_message( - 2, "Exception occurred in plotting %s: %s" % (output_base, err), - line=lineno) + 2, "Exception occurred in plotting %s: %s" % (output_base, err), line=lineno + ) results = [(code, [])] errors = [sm] # generate output restructuredtext total_lines = [] for j, (code_piece, images) in enumerate(results): - if options['include-source']: + if options["include-source"]: if is_doctest: - lines = [''] - lines += [row.rstrip() for row in code_piece.split('\n')] + lines = [""] + lines += [row.rstrip() for row in code_piece.split("\n")] else: - lines = ['.. code-block:: python', ''] - lines += [' %s' % row.rstrip() - for row in code_piece.split('\n')] + lines = [".. code-block:: python", ""] + lines += [" %s" % row.rstrip() for row in code_piece.split("\n")] source_code = "\n".join(lines) else: source_code = "" - opts = [':%s: %s' % (key, val) for key, val in list(options.items()) - if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] + opts = [ + ":%s: %s" % (key, val) + for key, val in list(options.items()) + if key in ("alt", "height", "width", "scale", "align", "class") + ] only_html = ".. only:: html" only_latex = ".. only:: latex" @@ -342,7 +377,8 @@ def run(arguments, content, options, state_machine, state, lineno): options=opts, images=images, source_code=source_code, - html_show_formats=config.plot_html_show_formats) + html_show_formats=config.plot_html_show_formats, + ) total_lines.extend(result.split("\n")) total_lines.extend("\n") @@ -357,42 +393,42 @@ def run(arguments, content, options, state_machine, state, lineno): for code_piece, images in results: for img in images: for fn in img.filenames(): - shutil.copyfile(fn, os.path.join(dest_dir, - os.path.basename(fn))) + shutil.copyfile(fn, os.path.join(dest_dir, os.path.basename(fn))) # copy script (if necessary) if source_file_name == rst_file: target_name = os.path.join(dest_dir, output_base + source_ext) - f = open(target_name, 'w') + f = open(target_name, "w") f.write(unescape_doctest(code)) f.close() return errors -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ # Run code and capture figures -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ import matplotlib -matplotlib.use('Agg') + +matplotlib.use("Agg") +import exceptions import matplotlib.pyplot as plt -import matplotlib.image as image from matplotlib import _pylab_helpers -import exceptions def contains_doctest(text): try: # check if it's valid Python as-is - compile(text, '', 'exec') + compile(text, "", "exec") return False except SyntaxError: pass - r = re.compile(r'^\s*>>>', re.M) + r = re.compile(r"^\s*>>>", re.M) m = r.search(text) return bool(m) + def unescape_doctest(text): """ Extract code from a piece of text, which contains either Python code @@ -404,7 +440,7 @@ def unescape_doctest(text): code = "" for line in text.split("\n"): - m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line) + m = re.match(r"^\s*(>>>|\.\.\.) (.*)$", line) if m: code += m.group(2) + "\n" elif line.strip(): @@ -413,6 +449,7 @@ def unescape_doctest(text): code += "\n" return code + def split_code_at_show(text): """ Split code at plt.show() @@ -424,8 +461,9 @@ def split_code_at_show(text): part = [] for line in text.split("\n"): - if (not is_doctest and line.strip() == 'plt.show()') or \ - (is_doctest and line.strip() == '>>> plt.show()'): + if (not is_doctest and line.strip() == "plt.show()") or ( + is_doctest and line.strip() == ">>> plt.show()" + ): part.append(line) parts.append("\n".join(part)) part = [] @@ -435,9 +473,11 @@ def split_code_at_show(text): parts.append("\n".join(part)) return parts + class PlotError(RuntimeError): pass + def run_code(code, code_path, ns=None): # Change the working directory to the directory of the example, so # it can get at its data files, if any. @@ -455,7 +495,7 @@ def run_code(code, code_path, ns=None): # Reset sys.argv old_sys_argv = sys.argv sys.argv = [code_path] - + try: try: code = unescape_doctest(code) @@ -474,17 +514,20 @@ def run_code(code, code_path, ns=None): return ns -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ # Generating figures -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ + def out_of_date(original, derived): """ Returns True if derivative is out-of-date wrt original, both of which are full file paths. """ - return (not os.path.exists(derived) - or os.stat(derived).st_mtime < os.stat(original).st_mtime) + return ( + not os.path.exists(derived) + or os.stat(derived).st_mtime < os.stat(original).st_mtime + ) def makefig(code, code_path, output_dir, output_base, config): @@ -495,12 +538,12 @@ def makefig(code, code_path, output_dir, output_base, config): """ # -- Parse format list - default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50} + default_dpi = {"png": 80, "hires.png": 200, "pdf": 50} formats = [] for fmt in config.plot_formats: if isinstance(fmt, str): formats.append((fmt, default_dpi.get(fmt, 80))) - elif type(fmt) in (tuple, list) and len(fmt)==2: + elif type(fmt) in (tuple, list) and len(fmt) == 2: formats.append((str(fmt[0]), int(fmt[1]))) else: raise PlotError('invalid image format "%r" in plot_formats' % fmt) @@ -527,7 +570,7 @@ def makefig(code, code_path, output_dir, output_base, config): for i, code_piece in enumerate(code_pieces): images = [] for j in range(1000): - img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir) + img = ImageFile("%s_%02d_%02d" % (output_base, i, j), output_dir) for format, dpi in formats: if out_of_date(code_path, img.filename(format)): all_exists = False @@ -536,7 +579,7 @@ def makefig(code, code_path, output_dir, output_base, config): # assume that if we have one, we have them all if not all_exists: - all_exists = (j > 0) + all_exists = j > 0 break images.append(img) if not all_exists: @@ -553,7 +596,7 @@ def makefig(code, code_path, output_dir, output_base, config): for i, code_piece in enumerate(code_pieces): # Clear between runs - plt.close('all') + plt.close("all") # Run code run_code(code_piece, code_path, ns) @@ -565,8 +608,7 @@ def makefig(code, code_path, output_dir, output_base, config): if len(fig_managers) == 1 and len(code_pieces) == 1: img = ImageFile(output_base, output_dir) else: - img = ImageFile("%s_%02d_%02d" % (output_base, i, j), - output_dir) + img = ImageFile("%s_%02d_%02d" % (output_base, i, j), output_dir) images.append(img) for format, dpi in formats: try: @@ -581,19 +623,19 @@ def makefig(code, code_path, output_dir, output_base, config): return results -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ # Relative pathnames -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ try: from os.path import relpath except ImportError: # Copied from Python 2.7 - if 'posix' in sys.builtin_module_names: + if "posix" in sys.builtin_module_names: + def relpath(path, start=os.path.curdir): """Return a relative version of a path""" - from os.path import sep, curdir, join, abspath, commonprefix, \ - pardir + from os.path import abspath, commonprefix, curdir, join, pardir, sep if not path: raise ValueError("no path specified") @@ -604,15 +646,24 @@ def relpath(path, start=os.path.curdir): # Work out how much of the filepath is shared by start and path. i = len(commonprefix([start_list, path_list])) - rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + rel_list = [pardir] * (len(start_list) - i) + path_list[i:] if not rel_list: return curdir return join(*rel_list) - elif 'nt' in sys.builtin_module_names: + + elif "nt" in sys.builtin_module_names: + def relpath(path, start=os.path.curdir): """Return a relative version of a path""" - from os.path import sep, curdir, join, abspath, commonprefix, \ - pardir, splitunc + from os.path import ( + abspath, + commonprefix, + curdir, + join, + pardir, + sep, + splitunc, + ) if not path: raise ValueError("no path specified") @@ -622,11 +673,14 @@ def relpath(path, start=os.path.curdir): unc_path, rest = splitunc(path) unc_start, rest = splitunc(start) if bool(unc_path) ^ bool(unc_start): - raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" - % (path, start)) + raise ValueError( + "Cannot mix UNC and non-UNC paths (%s and %s)" % (path, start) + ) else: - raise ValueError("path is on drive %s, start on drive %s" - % (path_list[0], start_list[0])) + raise ValueError( + "path is on drive %s, start on drive %s" + % (path_list[0], start_list[0]) + ) # Work out how much of the filepath is shared by start and path. for i in range(min(len(start_list), len(path_list))): if start_list[i].lower() != path_list[i].lower(): @@ -634,9 +688,10 @@ def relpath(path, start=os.path.curdir): else: i += 1 - rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + rel_list = [pardir] * (len(start_list) - i) + path_list[i:] if not rel_list: return curdir return join(*rel_list) + else: raise RuntimeError("Unsupported platform (no relpath available!)") diff --git a/doc/numpydoc/numpydoc/tests/test_docscrape.py b/doc/numpydoc/numpydoc/tests/test_docscrape.py deleted file mode 100644 index 6af15b7f..00000000 --- a/doc/numpydoc/numpydoc/tests/test_docscrape.py +++ /dev/null @@ -1,807 +0,0 @@ -# -*- encoding:utf-8 -*- - - -import sys, textwrap - -from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc -from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc -from nose.tools import * - -if sys.version_info[0] >= 3: - sixu = lambda s: s -else: - sixu = lambda s: str(s, 'unicode_escape') - - -doc_txt = '''\ - numpy.multivariate_normal(mean, cov, shape=None, spam=None) - - Draw values from a multivariate normal distribution with specified - mean and covariance. - - The multivariate normal or Gaussian distribution is a generalisation - of the one-dimensional normal distribution to higher dimensions. - - Parameters - ---------- - mean : (N,) ndarray - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 - - cov : (N, N) ndarray - Covariance matrix of the distribution. - shape : tuple of ints - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - - Returns - ------- - out : ndarray - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - list of str - This is not a real return value. It exists to test - anonymous return values. - - Other Parameters - ---------------- - spam : parrot - A parrot off its mortal coil. - - Raises - ------ - RuntimeError - Some error - - Warns - ----- - RuntimeWarning - Some warning - - Warnings - -------- - Certain warnings apply. - - Notes - ----- - Instead of specifying the full covariance matrix, popular - approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - - This geometrical property can be seen in two dimensions by plotting - generated data-points: - - >>> mean = [0,0] - >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - - >>> x,y = multivariate_normal(mean,cov,5000).T - >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - - Note that the covariance matrix must be symmetric and non-negative - definite. - - References - ---------- - .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 - .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - - See Also - -------- - some, other, funcs - otherfunc : relationship - - Examples - -------- - >>> mean = (1,2) - >>> cov = [[1,0],[1,0]] - >>> x = multivariate_normal(mean,cov,(3,3)) - >>> print x.shape - (3, 3, 2) - - The following is probably true, given that 0.6 is roughly twice the - standard deviation: - - >>> print list( (x[0,0,:] - mean) < 0.6 ) - [True, True] - - .. index:: random - :refguide: random;distributions, random;gauss - - ''' -doc = NumpyDocString(doc_txt) - - -def test_signature(): - assert doc['Signature'].startswith('numpy.multivariate_normal(') - assert doc['Signature'].endswith('spam=None)') - -def test_summary(): - assert doc['Summary'][0].startswith('Draw values') - assert doc['Summary'][-1].endswith('covariance.') - -def test_extended_summary(): - assert doc['Extended Summary'][0].startswith('The multivariate normal') - -def test_parameters(): - assert_equal(len(doc['Parameters']), 3) - assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape']) - - arg, arg_type, desc = doc['Parameters'][1] - assert_equal(arg_type, '(N, N) ndarray') - assert desc[0].startswith('Covariance matrix') - assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3' - -def test_other_parameters(): - assert_equal(len(doc['Other Parameters']), 1) - assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam']) - arg, arg_type, desc = doc['Other Parameters'][0] - assert_equal(arg_type, 'parrot') - assert desc[0].startswith('A parrot off its mortal coil') - -def test_returns(): - assert_equal(len(doc['Returns']), 2) - arg, arg_type, desc = doc['Returns'][0] - assert_equal(arg, 'out') - assert_equal(arg_type, 'ndarray') - assert desc[0].startswith('The drawn samples') - assert desc[-1].endswith('distribution.') - - arg, arg_type, desc = doc['Returns'][1] - assert_equal(arg, 'list of str') - assert_equal(arg_type, '') - assert desc[0].startswith('This is not a real') - assert desc[-1].endswith('anonymous return values.') - -def test_notes(): - assert doc['Notes'][0].startswith('Instead') - assert doc['Notes'][-1].endswith('definite.') - assert_equal(len(doc['Notes']), 17) - -def test_references(): - assert doc['References'][0].startswith('..') - assert doc['References'][-1].endswith('2001.') - -def test_examples(): - assert doc['Examples'][0].startswith('>>>') - assert doc['Examples'][-1].endswith('True]') - -def test_index(): - assert_equal(doc['index']['default'], 'random') - assert_equal(len(doc['index']), 2) - assert_equal(len(doc['index']['refguide']), 2) - -def non_blank_line_by_line_compare(a,b): - a = textwrap.dedent(a) - b = textwrap.dedent(b) - a = [l.rstrip() for l in a.split('\n') if l.strip()] - b = [l.rstrip() for l in b.split('\n') if l.strip()] - for n,line in enumerate(a): - if not line == b[n]: - raise AssertionError("Lines %s of a and b differ: " - "\n>>> %s\n<<< %s\n" % - (n,line,b[n])) -def test_str(): - non_blank_line_by_line_compare(str(doc), -"""numpy.multivariate_normal(mean, cov, shape=None, spam=None) - -Draw values from a multivariate normal distribution with specified -mean and covariance. - -The multivariate normal or Gaussian distribution is a generalisation -of the one-dimensional normal distribution to higher dimensions. - -Parameters ----------- -mean : (N,) ndarray - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 - -cov : (N, N) ndarray - Covariance matrix of the distribution. -shape : tuple of ints - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - -Returns -------- -out : ndarray - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. -list of str - This is not a real return value. It exists to test - anonymous return values. - -Other Parameters ----------------- -spam : parrot - A parrot off its mortal coil. - -Raises ------- -RuntimeError - Some error - -Warns ------ -RuntimeWarning - Some warning - -Warnings --------- -Certain warnings apply. - -See Also --------- -`some`_, `other`_, `funcs`_ - -`otherfunc`_ - relationship - -Notes ------ -Instead of specifying the full covariance matrix, popular -approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - -This geometrical property can be seen in two dimensions by plotting -generated data-points: - ->>> mean = [0,0] ->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - ->>> x,y = multivariate_normal(mean,cov,5000).T ->>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - -Note that the covariance matrix must be symmetric and non-negative -definite. - -References ----------- -.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 -.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - -Examples --------- ->>> mean = (1,2) ->>> cov = [[1,0],[1,0]] ->>> x = multivariate_normal(mean,cov,(3,3)) ->>> print x.shape -(3, 3, 2) - -The following is probably true, given that 0.6 is roughly twice the -standard deviation: - ->>> print list( (x[0,0,:] - mean) < 0.6 ) -[True, True] - -.. index:: random - :refguide: random;distributions, random;gauss""") - - -def test_sphinx_str(): - sphinx_doc = SphinxDocString(doc_txt) - non_blank_line_by_line_compare(str(sphinx_doc), -""" -.. index:: random - single: random;distributions, random;gauss - -Draw values from a multivariate normal distribution with specified -mean and covariance. - -The multivariate normal or Gaussian distribution is a generalisation -of the one-dimensional normal distribution to higher dimensions. - -:Parameters: - - **mean** : (N,) ndarray - - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 - - **cov** : (N, N) ndarray - - Covariance matrix of the distribution. - - **shape** : tuple of ints - - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - -:Returns: - - **out** : ndarray - - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - - list of str - - This is not a real return value. It exists to test - anonymous return values. - -:Other Parameters: - - **spam** : parrot - - A parrot off its mortal coil. - -:Raises: - - **RuntimeError** - - Some error - -:Warns: - - **RuntimeWarning** - - Some warning - -.. warning:: - - Certain warnings apply. - -.. seealso:: - - :obj:`some`, :obj:`other`, :obj:`funcs` - - :obj:`otherfunc` - relationship - -.. rubric:: Notes - -Instead of specifying the full covariance matrix, popular -approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - -This geometrical property can be seen in two dimensions by plotting -generated data-points: - ->>> mean = [0,0] ->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - ->>> x,y = multivariate_normal(mean,cov,5000).T ->>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - -Note that the covariance matrix must be symmetric and non-negative -definite. - -.. rubric:: References - -.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 -.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - -.. only:: latex - - [1]_, [2]_ - -.. rubric:: Examples - ->>> mean = (1,2) ->>> cov = [[1,0],[1,0]] ->>> x = multivariate_normal(mean,cov,(3,3)) ->>> print x.shape -(3, 3, 2) - -The following is probably true, given that 0.6 is roughly twice the -standard deviation: - ->>> print list( (x[0,0,:] - mean) < 0.6 ) -[True, True] -""") - - -doc2 = NumpyDocString(""" - Returns array of indices of the maximum values of along the given axis. - - Parameters - ---------- - a : {array_like} - Array to look in. - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis""") - -def test_parameters_without_extended_description(): - assert_equal(len(doc2['Parameters']), 2) - -doc3 = NumpyDocString(""" - my_signature(*params, **kwds) - - Return this and that. - """) - -def test_escape_stars(): - signature = str(doc3).split('\n')[0] - assert_equal(signature, 'my_signature(\*params, \*\*kwds)') - -doc4 = NumpyDocString( - """a.conj() - - Return an array with all complex-valued elements conjugated.""") - -def test_empty_extended_summary(): - assert_equal(doc4['Extended Summary'], []) - -doc5 = NumpyDocString( - """ - a.something() - - Raises - ------ - LinAlgException - If array is singular. - - Warns - ----- - SomeWarning - If needed - """) - -def test_raises(): - assert_equal(len(doc5['Raises']), 1) - name,_,desc = doc5['Raises'][0] - assert_equal(name,'LinAlgException') - assert_equal(desc,['If array is singular.']) - -def test_warns(): - assert_equal(len(doc5['Warns']), 1) - name,_,desc = doc5['Warns'][0] - assert_equal(name,'SomeWarning') - assert_equal(desc,['If needed']) - -def test_see_also(): - doc6 = NumpyDocString( - """ - z(x,theta) - - See Also - -------- - func_a, func_b, func_c - func_d : some equivalent func - foo.func_e : some other func over - multiple lines - func_f, func_g, :meth:`func_h`, func_j, - func_k - :obj:`baz.obj_q` - :class:`class_j`: fubar - foobar - """) - - assert len(doc6['See Also']) == 12 - for func, desc, role in doc6['See Also']: - if func in ('func_a', 'func_b', 'func_c', 'func_f', - 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'): - assert(not desc) - else: - assert(desc) - - if func == 'func_h': - assert role == 'meth' - elif func == 'baz.obj_q': - assert role == 'obj' - elif func == 'class_j': - assert role == 'class' - else: - assert role is None - - if func == 'func_d': - assert desc == ['some equivalent func'] - elif func == 'foo.func_e': - assert desc == ['some other func over', 'multiple lines'] - elif func == 'class_j': - assert desc == ['fubar', 'foobar'] - -def test_see_also_print(): - class Dummy(object): - """ - See Also - -------- - func_a, func_b - func_c : some relationship - goes here - func_d - """ - pass - - obj = Dummy() - s = str(FunctionDoc(obj, role='func')) - assert(':func:`func_a`, :func:`func_b`' in s) - assert(' some relationship' in s) - assert(':func:`func_d`' in s) - -doc7 = NumpyDocString(""" - - Doc starts on second line. - - """) - -def test_empty_first_line(): - assert doc7['Summary'][0].startswith('Doc starts') - - -def test_no_summary(): - str(SphinxDocString(""" - Parameters - ----------""")) - - -def test_unicode(): - doc = SphinxDocString(""" - öäöäöäöäöåååå - - öäöäöäööäååå - - Parameters - ---------- - ååå : äää - ööö - - Returns - ------- - ååå : ööö - äää - - """) - assert isinstance(doc['Summary'][0], str) - assert doc['Summary'][0] == 'öäöäöäöäöåååå' - -def test_plot_examples(): - cfg = dict(use_plots=True) - - doc = SphinxDocString(""" - Examples - -------- - >>> import matplotlib.pyplot as plt - >>> plt.plot([1,2,3],[4,5,6]) - >>> plt.show() - """, config=cfg) - assert 'plot::' in str(doc), str(doc) - - doc = SphinxDocString(""" - Examples - -------- - .. plot:: - - import matplotlib.pyplot as plt - plt.plot([1,2,3],[4,5,6]) - plt.show() - """, config=cfg) - assert str(doc).count('plot::') == 1, str(doc) - -def test_class_members(): - - class Dummy(object): - """ - Dummy class. - - """ - def spam(self, a, b): - """Spam\n\nSpam spam.""" - pass - def ham(self, c, d): - """Cheese\n\nNo cheese.""" - pass - @property - def spammity(self): - """Spammity index""" - return 0.95 - - class Ignorable(object): - """local class, to be ignored""" - pass - - for cls in (ClassDoc, SphinxClassDoc): - doc = cls(Dummy, config=dict(show_class_members=False)) - assert 'Methods' not in str(doc), (cls, str(doc)) - assert 'spam' not in str(doc), (cls, str(doc)) - assert 'ham' not in str(doc), (cls, str(doc)) - assert 'spammity' not in str(doc), (cls, str(doc)) - assert 'Spammity index' not in str(doc), (cls, str(doc)) - - doc = cls(Dummy, config=dict(show_class_members=True)) - assert 'Methods' in str(doc), (cls, str(doc)) - assert 'spam' in str(doc), (cls, str(doc)) - assert 'ham' in str(doc), (cls, str(doc)) - assert 'spammity' in str(doc), (cls, str(doc)) - - if cls is SphinxClassDoc: - assert '.. autosummary::' in str(doc), str(doc) - else: - assert 'Spammity index' in str(doc), str(doc) - - class SubDummy(Dummy): - """ - Subclass of Dummy class. - - """ - def ham(self, c, d): - """Cheese\n\nNo cheese.\nOverloaded Dummy.ham""" - pass - - def bar(self, a, b): - """Bar\n\nNo bar""" - pass - - for cls in (ClassDoc, SphinxClassDoc): - doc = cls(SubDummy, config=dict(show_class_members=True, - show_inherited_class_members=False)) - assert 'Methods' in str(doc), (cls, str(doc)) - assert 'spam' not in str(doc), (cls, str(doc)) - assert 'ham' in str(doc), (cls, str(doc)) - assert 'bar' in str(doc), (cls, str(doc)) - assert 'spammity' not in str(doc), (cls, str(doc)) - - if cls is SphinxClassDoc: - assert '.. autosummary::' in str(doc), str(doc) - else: - assert 'Spammity index' not in str(doc), str(doc) - - doc = cls(SubDummy, config=dict(show_class_members=True, - show_inherited_class_members=True)) - assert 'Methods' in str(doc), (cls, str(doc)) - assert 'spam' in str(doc), (cls, str(doc)) - assert 'ham' in str(doc), (cls, str(doc)) - assert 'bar' in str(doc), (cls, str(doc)) - assert 'spammity' in str(doc), (cls, str(doc)) - - if cls is SphinxClassDoc: - assert '.. autosummary::' in str(doc), str(doc) - else: - assert 'Spammity index' in str(doc), str(doc) - -def test_duplicate_signature(): - # Duplicate function signatures occur e.g. in ufuncs, when the - # automatic mechanism adds one, and a more detailed comes from the - # docstring itself. - - doc = NumpyDocString( - """ - z(x1, x2) - - z(a, theta) - """) - - assert doc['Signature'].strip() == 'z(a, theta)' - - -class_doc_txt = """ - Foo - - Parameters - ---------- - f : callable ``f(t, y, *f_args)`` - Aaa. - jac : callable ``jac(t, y, *jac_args)`` - Bbb. - - Attributes - ---------- - t : float - Current time. - y : ndarray - Current variable values. - - Methods - ------- - a - b - c - - Examples - -------- - For usage examples, see `ode`. -""" - -def test_class_members_doc(): - doc = ClassDoc(None, class_doc_txt) - non_blank_line_by_line_compare(str(doc), - """ - Foo - - Parameters - ---------- - f : callable ``f(t, y, *f_args)`` - Aaa. - jac : callable ``jac(t, y, *jac_args)`` - Bbb. - - Examples - -------- - For usage examples, see `ode`. - - Attributes - ---------- - t : float - Current time. - y : ndarray - Current variable values. - - Methods - ------- - a - - b - - c - - .. index:: - - """) - -def test_class_members_doc_sphinx(): - doc = SphinxClassDoc(None, class_doc_txt) - non_blank_line_by_line_compare(str(doc), - """ - Foo - - :Parameters: - - **f** : callable ``f(t, y, *f_args)`` - - Aaa. - - **jac** : callable ``jac(t, y, *jac_args)`` - - Bbb. - - .. rubric:: Examples - - For usage examples, see `ode`. - - .. rubric:: Attributes - - === ========== - t (float) Current time. - y (ndarray) Current variable values. - === ========== - - .. rubric:: Methods - - === ========== - a - b - c - === ========== - - """) - -if __name__ == "__main__": - import nose - nose.run() diff --git a/doc/numpydoc/numpydoc/tests/test_linkcode.py b/doc/numpydoc/numpydoc/tests/test_linkcode.py deleted file mode 100644 index 26fec6dd..00000000 --- a/doc/numpydoc/numpydoc/tests/test_linkcode.py +++ /dev/null @@ -1,5 +0,0 @@ - - -import numpydoc.linkcode - -# No tests at the moment... diff --git a/doc/numpydoc/numpydoc/tests/test_phantom_import.py b/doc/numpydoc/numpydoc/tests/test_phantom_import.py deleted file mode 100644 index 51f98db0..00000000 --- a/doc/numpydoc/numpydoc/tests/test_phantom_import.py +++ /dev/null @@ -1,12 +0,0 @@ - - -import sys -from nose import SkipTest - -def test_import(): - if sys.version_info[0] >= 3: - raise SkipTest("phantom_import not ported to Py3") - - import numpydoc.phantom_import - -# No tests at the moment... diff --git a/doc/numpydoc/numpydoc/tests/test_plot_directive.py b/doc/numpydoc/numpydoc/tests/test_plot_directive.py deleted file mode 100644 index 0daffe21..00000000 --- a/doc/numpydoc/numpydoc/tests/test_plot_directive.py +++ /dev/null @@ -1,11 +0,0 @@ - - -import sys -from nose import SkipTest - -def test_import(): - if sys.version_info[0] >= 3: - raise SkipTest("plot_directive not ported to Python 3 (use the one from Matplotlib instead)") - import numpydoc.plot_directive - -# No tests at the moment... diff --git a/doc/numpydoc/numpydoc/tests/test_traitsdoc.py b/doc/numpydoc/numpydoc/tests/test_traitsdoc.py deleted file mode 100644 index 89c059e9..00000000 --- a/doc/numpydoc/numpydoc/tests/test_traitsdoc.py +++ /dev/null @@ -1,11 +0,0 @@ - - -import sys -from nose import SkipTest - -def test_import(): - if sys.version_info[0] >= 3: - raise SkipTest("traitsdoc not ported to Python3") - import numpydoc.traitsdoc - -# No tests at the moment... diff --git a/doc/numpydoc/numpydoc/traitsdoc.py b/doc/numpydoc/numpydoc/traitsdoc.py index bf102ea8..d5a45c21 100644 --- a/doc/numpydoc/numpydoc/traitsdoc.py +++ b/doc/numpydoc/numpydoc/traitsdoc.py @@ -15,92 +15,86 @@ """ +import collections import inspect -import os import pydoc -import collections -from . import docscrape -from . import docscrape_sphinx -from .docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString +from . import comment_eater, docscrape, numpydoc +from .docscrape_sphinx import SphinxClassDoc, SphinxDocString, SphinxFunctionDoc -from . import numpydoc - -from . import comment_eater class SphinxTraitsDoc(SphinxClassDoc): - def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc): + def __init__(self, cls, modulename="", func_doc=SphinxFunctionDoc): if not inspect.isclass(cls): raise ValueError("Initialise using a class. Got %r" % cls) self._cls = cls - if modulename and not modulename.endswith('.'): - modulename += '.' + if modulename and not modulename.endswith("."): + modulename += "." self._mod = modulename self._name = cls.__name__ self._func_doc = func_doc docstring = pydoc.getdoc(cls) - docstring = docstring.split('\n') + docstring = docstring.split("\n") # De-indent paragraph try: - indent = min(len(s) - len(s.lstrip()) for s in docstring - if s.strip()) + indent = min(len(s) - len(s.lstrip()) for s in docstring if s.strip()) except ValueError: indent = 0 - for n,line in enumerate(docstring): + for n, line in enumerate(docstring): docstring[n] = docstring[n][indent:] self._doc = docscrape.Reader(docstring) self._parsed_data = { - 'Signature': '', - 'Summary': '', - 'Description': [], - 'Extended Summary': [], - 'Parameters': [], - 'Returns': [], - 'Raises': [], - 'Warns': [], - 'Other Parameters': [], - 'Traits': [], - 'Methods': [], - 'See Also': [], - 'Notes': [], - 'References': '', - 'Example': '', - 'Examples': '', - 'index': {} - } + "Signature": "", + "Summary": "", + "Description": [], + "Extended Summary": [], + "Parameters": [], + "Returns": [], + "Raises": [], + "Warns": [], + "Other Parameters": [], + "Traits": [], + "Methods": [], + "See Also": [], + "Notes": [], + "References": "", + "Example": "", + "Examples": "", + "index": {}, + } self._parse() def _str_summary(self): - return self['Summary'] + [''] + return self["Summary"] + [""] def _str_extended_summary(self): - return self['Description'] + self['Extended Summary'] + [''] + return self["Description"] + self["Extended Summary"] + [""] def __str__(self, indent=0, func_role="func"): out = [] out += self._str_signature() - out += self._str_index() + [''] + out += self._str_index() + [""] out += self._str_summary() out += self._str_extended_summary() - for param_list in ('Parameters', 'Traits', 'Methods', - 'Returns','Raises'): + for param_list in ("Parameters", "Traits", "Methods", "Returns", "Raises"): out += self._str_param_list(param_list) out += self._str_see_also("obj") - out += self._str_section('Notes') + out += self._str_section("Notes") out += self._str_references() - out += self._str_section('Example') - out += self._str_section('Examples') - out = self._str_indent(out,indent) - return '\n'.join(out) + out += self._str_section("Example") + out += self._str_section("Examples") + out = self._str_indent(out, indent) + return "\n".join(out) + def looks_like_issubclass(obj, classname): - """ Return True if the object has a class or superclass with the given class + """Return True if the object has a class or superclass with the given class name. Ignores old-style classes. @@ -113,30 +107,31 @@ def looks_like_issubclass(obj, classname): return True return False + def get_doc_object(obj, what=None, config=None): if what is None: if inspect.isclass(obj): - what = 'class' + what = "class" elif inspect.ismodule(obj): - what = 'module' + what = "module" elif isinstance(obj, collections.Callable): - what = 'function' + what = "function" else: - what = 'object' - if what == 'class': - doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config) - if looks_like_issubclass(obj, 'HasTraits'): + what = "object" + if what == "class": + doc = SphinxTraitsDoc(obj, "", func_doc=SphinxFunctionDoc, config=config) + if looks_like_issubclass(obj, "HasTraits"): for name, trait, comment in comment_eater.get_class_traits(obj): # Exclude private traits. - if not name.startswith('_'): - doc['Traits'].append((name, trait, comment.splitlines())) + if not name.startswith("_"): + doc["Traits"].append((name, trait, comment.splitlines())) return doc - elif what in ('function', 'method'): - return SphinxFunctionDoc(obj, '', config=config) + elif what in ("function", "method"): + return SphinxFunctionDoc(obj, "", config=config) else: return SphinxDocString(pydoc.getdoc(obj), config=config) + def setup(app): # init numpydoc numpydoc.setup(app, get_doc_object) - diff --git a/doc/numpydoc/setup.py b/doc/numpydoc/setup.py index ed755682..a1d7235f 100644 --- a/doc/numpydoc/setup.py +++ b/doc/numpydoc/setup.py @@ -1,7 +1,4 @@ - - import sys -import setuptools from distutils.core import setup if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[0:2] < (3, 3): @@ -15,16 +12,18 @@ version=version, description="Sphinx extension to support docstrings in Numpy format", # classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers - classifiers=["Development Status :: 3 - Alpha", - "Environment :: Plugins", - "License :: OSI Approved :: BSD License", - "Topic :: Documentation"], + classifiers=[ + "Development Status :: 3 - Alpha", + "Environment :: Plugins", + "License :: OSI Approved :: BSD License", + "Topic :: Documentation", + ], keywords="sphinx numpy", author="Pauli Virtanen and others", author_email="pav@iki.fi", url="https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt", license="BSD", requires=["sphinx (>= 1.0.1)"], - package_data={'numpydoc': ['tests/test_*.py']}, - test_suite = 'nose.collector', + package_data={"numpydoc": ["tests/test_*.py"]}, + test_suite="nose.collector", ) diff --git a/doc/scipy-sphinx-theme/README.rst b/doc/scipy-sphinx-theme/README.rst index 650741dc..fe073425 100644 --- a/doc/scipy-sphinx-theme/README.rst +++ b/doc/scipy-sphinx-theme/README.rst @@ -41,7 +41,7 @@ configuration variable: The following blocks are defined: - ``layout.html:header`` - + Block at the top of the page, for logo etc. - ``searchbox.html:edit_link`` diff --git a/doc/scipy-sphinx-theme/_theme/scipy/layout.html b/doc/scipy-sphinx-theme/_theme/scipy/layout.html index d15bf1a8..b0406d52 100644 --- a/doc/scipy-sphinx-theme/_theme/scipy/layout.html +++ b/doc/scipy-sphinx-theme/_theme/scipy/layout.html @@ -266,4 +266,3 @@ {%- endblock %} - diff --git a/doc/scipy-sphinx-theme/_theme/scipy/static/js/copybutton.js b/doc/scipy-sphinx-theme/_theme/scipy/static/js/copybutton.js index ace69221..2402d37e 100644 --- a/doc/scipy-sphinx-theme/_theme/scipy/static/js/copybutton.js +++ b/doc/scipy-sphinx-theme/_theme/scipy/static/js/copybutton.js @@ -57,4 +57,3 @@ $(document).ready(function() { button.attr('title', hide_text); }); }); - diff --git a/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/close.less b/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/close.less index 4c626bda..5c27c7e0 100644 --- a/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/close.less +++ b/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/close.less @@ -29,4 +29,4 @@ button.close { background: transparent; border: 0; -webkit-appearance: none; -} \ No newline at end of file +} diff --git a/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/code.less b/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/code.less index 266a926e..6314c898 100644 --- a/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/code.less +++ b/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/code.less @@ -58,4 +58,4 @@ pre { .pre-scrollable { max-height: 340px; overflow-y: scroll; -} \ No newline at end of file +} diff --git a/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/layouts.less b/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/layouts.less index 24a20621..ade5c751 100644 --- a/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/layouts.less +++ b/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/layouts.less @@ -13,4 +13,4 @@ padding-right: @gridGutterWidth; padding-left: @gridGutterWidth; .clearfix(); -} \ No newline at end of file +} diff --git a/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/pager.less b/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/pager.less index 14761882..1d461cdb 100644 --- a/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/pager.less +++ b/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/pager.less @@ -40,4 +40,4 @@ color: @grayLight; background-color: #fff; cursor: default; -} \ No newline at end of file +} diff --git a/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive-navbar.less b/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive-navbar.less index 21cd3ba6..39dee454 100644 --- a/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive-navbar.less +++ b/doc/scipy-sphinx-theme/_theme/scipy/static/less/bootstrap/responsive-navbar.less @@ -116,8 +116,8 @@ .border-radius(0); .box-shadow(none); } - .nav-collapse .open > .dropdown-menu { - display: block; + .nav-collapse .open > .dropdown-menu { + display: block; } .nav-collapse .dropdown-menu:before, diff --git a/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-bootstrap.less b/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-bootstrap.less index 20a5091e..cf34080a 100644 --- a/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-bootstrap.less +++ b/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-bootstrap.less @@ -15,4 +15,4 @@ //Sprites @iconSpritePath: '../../img/glyphicons-halflings.png'; -@iconWhiteSpritePath: '../../img/glyphicons-halflings-white.png'; \ No newline at end of file +@iconWhiteSpritePath: '../../img/glyphicons-halflings-white.png'; diff --git a/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-content.less b/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-content.less index 94911236..542c679e 100644 --- a/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-content.less +++ b/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-content.less @@ -8,7 +8,7 @@ } } -//tags -- depricated +//tags -- depricated // need to design .tags .btn { border: none; @@ -30,7 +30,7 @@ .spc-snippet-info { padding-top: 10px; - + .dl-horizontal { margin: 5px; dt { font-weight: normal; } @@ -39,17 +39,17 @@ .spc-snippet-body { padding: 10px; - + .accordion-group { border: none; } - + .accordion-heading { text-transform: uppercase; font-size: 14px; border-bottom: 1px solid #e5e5e5; } - + .accordion-heading .accordion-toggle { padding-top: 10px; padding-bottom: 5px; diff --git a/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-extend.less b/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-extend.less index 9e0cd6ce..8d43cd0f 100644 --- a/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-extend.less +++ b/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-extend.less @@ -19,4 +19,4 @@ body { @import "spc-header.less"; @import "spc-content.less"; @import "spc-rightsidebar.less"; -@import "spc-footer.less"; \ No newline at end of file +@import "spc-footer.less"; diff --git a/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-footer.less b/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-footer.less index 8e4d09b8..2c6d373a 100644 --- a/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-footer.less +++ b/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-footer.less @@ -6,4 +6,4 @@ font-size: small; } -//footer inside yet to be done (may be not required). \ No newline at end of file +//footer inside yet to be done (may be not required). diff --git a/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-header.less b/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-header.less index 0d77cd28..e8c940dd 100644 --- a/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-header.less +++ b/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-header.less @@ -1,4 +1,4 @@ -// settings for +// settings for // 1) .header // header block is found on the top of the website // spc-navbar, spc-header-searchbar found inside .header @@ -16,10 +16,10 @@ .nav-pills { margin-bottom: 0px; font-size: 12px; - + >li >a { padding-top: 2.5px; padding-bottom: 2.5px; } } -} \ No newline at end of file +} diff --git a/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-rightsidebar.less b/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-rightsidebar.less index afef531e..d41d8b39 100644 --- a/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-rightsidebar.less +++ b/doc/scipy-sphinx-theme/_theme/scipy/static/less/spc-rightsidebar.less @@ -11,4 +11,4 @@ text-transform: uppercase; } .navigation li { margin: 5px; } -} \ No newline at end of file +} diff --git a/doc/scipy-sphinx-theme/_theme/scipy/static/scipy.css_t b/doc/scipy-sphinx-theme/_theme/scipy/static/scipy.css_t index 3909af92..792e8923 100644 --- a/doc/scipy-sphinx-theme/_theme/scipy/static/scipy.css_t +++ b/doc/scipy-sphinx-theme/_theme/scipy/static/scipy.css_t @@ -118,7 +118,7 @@ td.field-body blockquote { td.field-body blockquote p, dl.class blockquote p, dl.function blockquote p, -dl.method blockquote p +dl.method blockquote p { font-family: inherit; font-size: inherit; diff --git a/doc/scipy-sphinx-theme/conf.py b/doc/scipy-sphinx-theme/conf.py index ae9b1b79..cb3cdbd5 100644 --- a/doc/scipy-sphinx-theme/conf.py +++ b/doc/scipy-sphinx-theme/conf.py @@ -1,31 +1,39 @@ -needs_sphinx = '1.1' +needs_sphinx = "1.1" -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc', - 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', - 'sphinx.ext.autosummary', 'matplotlib.sphinxext.plot_directive'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.pngmath", + "numpydoc", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.autosummary", + "matplotlib.sphinxext.plot_directive", +] -templates_path = ['_templates'] -source_suffix = '.rst' -master_doc = 'index' -project = 'scipy-sphinx-theme' -copyright = '2013, Surya Kasturi and Pauli Virtanen' -version = '0.1' -release = '0.1' -exclude_patterns = ['_build'] -pygments_style = 'sphinx' +templates_path = ["_templates"] +source_suffix = ".rst" +master_doc = "index" +project = "scipy-sphinx-theme" +copyright = "2013, Surya Kasturi and Pauli Virtanen" +version = "0.1" +release = "0.1" +exclude_patterns = ["_build"] +pygments_style = "sphinx" # -- Options for HTML output --------------------------------------------------- -html_theme = 'scipy' -html_theme_path = ['_theme'] -#html_logo = '_static/scipyshiny_small.png' -html_static_path = ['_static'] +html_theme = "scipy" +html_theme_path = ["_theme"] +# html_logo = '_static/scipyshiny_small.png' +html_static_path = ["_static"] html_theme_options = { "edit_link": "true", "sidebar": "right", "scipy_org_logo": "false", - "rootlinks": [("http://scipy.org/", "Scipy.org"), - ("http://docs.scipy.org/", "Docs")] + "rootlinks": [ + ("http://scipy.org/", "Scipy.org"), + ("http://docs.scipy.org/", "Docs"), + ], } pngmath_latex_preamble = r""" @@ -34,11 +42,11 @@ \color{textgray} """ pngmath_use_preview = True -pngmath_dvipng_args = ['-gamma 1.5', '-D 96', '-bg Transparent'] +pngmath_dvipng_args = ["-gamma 1.5", "-D 96", "-bg Transparent"] -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ # Plot style -#------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ plot_pre_code = """ import numpy as np @@ -46,26 +54,27 @@ np.random.seed(123) """ plot_include_source = True -plot_formats = [('png', 96), 'pdf'] +plot_formats = [("png", 96), "pdf"] plot_html_show_formats = False import math -phi = (math.sqrt(5) + 1)/2 -font_size = 13*72/96.0 # 13 px +phi = (math.sqrt(5) + 1) / 2 + +font_size = 13 * 72 / 96.0 # 13 px plot_rcparams = { - 'font.size': font_size, - 'axes.titlesize': font_size, - 'axes.labelsize': font_size, - 'xtick.labelsize': font_size, - 'ytick.labelsize': font_size, - 'legend.fontsize': font_size, - 'figure.figsize': (3*phi, 3), - 'figure.subplot.bottom': 0.2, - 'figure.subplot.left': 0.2, - 'figure.subplot.right': 0.9, - 'figure.subplot.top': 0.85, - 'figure.subplot.wspace': 0.4, - 'text.usetex': False, + "font.size": font_size, + "axes.titlesize": font_size, + "axes.labelsize": font_size, + "xtick.labelsize": font_size, + "ytick.labelsize": font_size, + "legend.fontsize": font_size, + "figure.figsize": (3 * phi, 3), + "figure.subplot.bottom": 0.2, + "figure.subplot.left": 0.2, + "figure.subplot.right": 0.9, + "figure.subplot.top": 0.85, + "figure.subplot.wspace": 0.4, + "text.usetex": False, } diff --git a/doc/scipy-sphinx-theme/index.rst b/doc/scipy-sphinx-theme/index.rst index f7f8a750..22290bff 100644 --- a/doc/scipy-sphinx-theme/index.rst +++ b/doc/scipy-sphinx-theme/index.rst @@ -27,4 +27,3 @@ Indices and tables * :ref:`genindex` * :ref:`modindex` * :ref:`search` - diff --git a/doc/scipy-sphinx-theme/test_autodoc_3.rst b/doc/scipy-sphinx-theme/test_autodoc_3.rst index fcc669cc..8a425ee0 100644 --- a/doc/scipy-sphinx-theme/test_autodoc_3.rst +++ b/doc/scipy-sphinx-theme/test_autodoc_3.rst @@ -4,4 +4,3 @@ scipy.odr.ODR.run .. currentmodule:: scipy.odr .. automethod:: scipy.odr.ODR.run - diff --git a/doc/source/conf.py b/doc/source/conf.py index 44261e74..b36ae7cd 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -12,75 +12,75 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys import os +import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) +# sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -sys.path.insert(0, os.path.abspath('../numpydoc')) +sys.path.insert(0, os.path.abspath("../numpydoc")) extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.viewcode', - 'sphinx.ext.mathjax', - 'sphinx.ext.autosummary', - 'sphinx.ext.todo', - 'numpydoc', + "sphinx.ext.autodoc", + "sphinx.ext.viewcode", + "sphinx.ext.mathjax", + "sphinx.ext.autosummary", + "sphinx.ext.todo", + "numpydoc", ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -source_encoding = 'utf-8-sig' +source_encoding = "utf-8-sig" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'MedPy' -copyright = '2013-2019, Oskar Maier' +project = "MedPy" +copyright = "2013-2019, Oskar Maier" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '0.4' +version = "0.4" # The full version, including alpha/beta/rc tags. -release = '0.4.0' +release = "0.4.0" # Automatically created autosummary entries (thus no need to call sphinx-autogen) autosummary_generate = True # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. @@ -91,90 +91,92 @@ # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme') +themedir = os.path.join(os.pardir, "scipy-sphinx-theme", "_theme") if os.path.isdir(themedir): - html_theme = 'scipy' + html_theme = "scipy" html_theme_path = [themedir] html_theme_options = { "edit_link": False, "sidebar": "left", "scipy_org_logo": False, - "rootlinks": [('https://github.com/loli/medpy/', 'GitHub'), - ('https://pypi.python.org/pypi/MedPy/', 'PyPi')], - "navigation_links": True + "rootlinks": [ + ("https://github.com/loli/medpy/", "GitHub"), + ("https://pypi.python.org/pypi/MedPy/", "PyPi"), + ], + "navigation_links": True, } - + else: - html_theme = 'default' + html_theme = "default" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -#html_extra_path = [] +# html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -html_last_updated_fmt = '%b %d, %Y' +html_last_updated_fmt = "%b %d, %Y" # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. html_domain_indices = True @@ -183,7 +185,7 @@ html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = True @@ -197,68 +199,62 @@ # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Output file base name for HTML help builder. -htmlhelp_basename = 'medpy' +htmlhelp_basename = "medpy" # -- Options for LaTeX output --------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - ('index', 'MedPy.tex', 'MedPy Documentation', - 'Oskar Maier', 'manual'), + ("index", "MedPy.tex", "MedPy Documentation", "Oskar Maier", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'medpy', 'MedPy Documentation', - ['Oskar Maier'], 1) -] +man_pages = [("index", "medpy", "MedPy Documentation", ["Oskar Maier"], 1)] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------- @@ -267,92 +263,98 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'MedPy', 'MedPy Documentation', - 'Oskar Maier', 'MedPy', 'One line description of project.', - 'Miscellaneous'), + ( + "index", + "MedPy", + "MedPy Documentation", + "Oskar Maier", + "MedPy", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False +# texinfo_no_detailmenu = False # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. -epub_title = 'MedPy' -epub_author = 'Oskar Maier' -epub_publisher = 'Oskar Maier' -epub_copyright = '2018, Oskar Maier' +epub_title = "MedPy" +epub_author = "Oskar Maier" +epub_publisher = "Oskar Maier" +epub_copyright = "2018, Oskar Maier" # The basename for the epub file. It defaults to the project name. -#epub_basename = u'MedPy' +# epub_basename = u'MedPy' # The HTML theme for the epub output. Since the default themes are not optimized # for small screen space, using the same theme for HTML and epub output is # usually not wise. This defaults to 'epub', a theme designed to save visual # space. -#epub_theme = 'epub' +# epub_theme = 'epub' # The language of the text. It defaults to the language option # or en if the language is not set. -#epub_language = '' +# epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. -#epub_scheme = '' +# epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. -#epub_identifier = '' +# epub_identifier = '' # A unique identification for the text. -#epub_uid = '' +# epub_uid = '' # A tuple containing the cover image and cover page html template filenames. -#epub_cover = () +# epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. -#epub_guide = () +# epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. -#epub_pre_files = [] +# epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. -#epub_post_files = [] +# epub_post_files = [] # A list of files that should not be packed into the epub file. -epub_exclude_files = ['search.html'] +epub_exclude_files = ["search.html"] # The depth of the table of contents in toc.ncx. -#epub_tocdepth = 3 +# epub_tocdepth = 3 # Allow duplicate toc entries. -#epub_tocdup = True +# epub_tocdup = True # Choose between 'default' and 'includehidden'. -#epub_tocscope = 'default' +# epub_tocscope = 'default' # Fix unsupported image types using the PIL. -#epub_fix_images = False +# epub_fix_images = False # Scale large images. -#epub_max_image_width = 0 +# epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. -#epub_show_urls = 'inline' +# epub_show_urls = 'inline' # If false, no index is generated. -#epub_use_index = True +# epub_use_index = True ### # NUMPYDOC options diff --git a/doc/source/features.rst b/doc/source/features.rst index 1788b56f..89c9e302 100644 --- a/doc/source/features.rst +++ b/doc/source/features.rst @@ -1,2 +1 @@ .. automodule:: medpy.features - diff --git a/doc/source/filter.rst b/doc/source/filter.rst index bf4efbd9..21634671 100644 --- a/doc/source/filter.rst +++ b/doc/source/filter.rst @@ -1,2 +1 @@ .. automodule:: medpy.filter - diff --git a/doc/source/graphcut.rst b/doc/source/graphcut.rst index f8cb5a66..1ba34e1d 100644 --- a/doc/source/graphcut.rst +++ b/doc/source/graphcut.rst @@ -1,2 +1 @@ .. automodule:: medpy.graphcut - diff --git a/doc/source/index.rst b/doc/source/index.rst index 646d0a1b..32de9006 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -11,7 +11,7 @@ Installation .. toctree:: :maxdepth: 1 - + installation/fastpath installation/venv installation/asroot @@ -30,7 +30,7 @@ Information .. toctree:: :glob: :maxdepth: 1 - + information/* Tutorials @@ -39,7 +39,7 @@ Tutorials .. toctree:: :glob: :maxdepth: 1 - + tutorial/* Notebooks @@ -47,10 +47,10 @@ Notebooks `Accessing the image's meta-data `_. In this tutorial we will learn how to access and manipulate the image's meta-data form the header. - + `Load, threshold and save an image `_. In this tutorial you will learn how to load a medical image with MedPy, how to perform a simple thresholding operation and how to save the resulting binary image. - + `Simple binary image processing `_. In this tutorial you will learn some simple binary image processing. @@ -69,4 +69,3 @@ Reference graphcut core utilities - diff --git a/doc/source/information/commandline_tools_listing.rst b/doc/source/information/commandline_tools_listing.rst index c4e453c9..88733cd4 100644 --- a/doc/source/information/commandline_tools_listing.rst +++ b/doc/source/information/commandline_tools_listing.rst @@ -71,18 +71,18 @@ Image volume manipulation .. topic:: medpy_extract_sub_volume_auto.py (`notebook `_) - Splits a volume into a number of sub volumes along a given dimension. + Splits a volume into a number of sub volumes along a given dimension. .. topic:: medpy_extract_sub_volume_by_example.py (`notebook `_) Takes an image and a second image containing a binary mask, then extracts the sub volume of the first image defined by the bounding box of the foreground object in the binary image. - + .. topic:: medpy_fit_into_shape.py (`notebook `_) - + Fit an existing image into a new shape by either extending or cutting all dimensions symmetrically. - + .. topic:: medpy_intersection.py (`notebook `_) - + Extracts the intersecting parts of two volumes regarding offset and voxel-spacing. .. topic:: medpy_join_xd_to_xplus1d.py (`notebook `_) @@ -133,7 +133,7 @@ Binary image manipulation Converts a binary volume into a surface contour. .. topic:: medpy_join_masks.py (`notebook `_) - + Joins a number of binary images into a single conjunction using sum, avg, max or min. .. topic:: medpy_merge.py (`notebook `_) diff --git a/doc/source/information/imageformats.rst b/doc/source/information/imageformats.rst index 56dd9ee2..4747dc4e 100644 --- a/doc/source/information/imageformats.rst +++ b/doc/source/information/imageformats.rst @@ -16,7 +16,7 @@ Medical formats: - Analyze (plain, SPM99, SPM2) (.hdr/.img, .img.gz) - Digital Imaging and Communications in Medicine (DICOM) (.dcm, .dicom) - Digital Imaging and Communications in Medicine (DICOM) series (/) -- Nearly Raw Raster Data (Nrrd) (.nrrd, .nhdr) +- Nearly Raw Raster Data (Nrrd) (.nrrd, .nhdr) - Medical Imaging NetCDF (MINC) (.mnc, .MNC) - Guys Image Processing Lab (GIPL) (.gipl, .gipl.gz) @@ -39,7 +39,7 @@ Other formats: - Windows bitmap (.bmp, .BMP) - Hierarchical Data Format (HDF5) (.h5 , .hdf5 , .he5) - MSX-DOS Screen-x (.ge4, .ge5) - + For informations about which image formats, dimensionalities and pixel data types your current configuration supports, run `python3 tests/support.py > myformats.log`. diff --git a/doc/source/installation/asroot.rst b/doc/source/installation/asroot.rst index a11dbbeb..dddfee7e 100644 --- a/doc/source/installation/asroot.rst +++ b/doc/source/installation/asroot.rst @@ -5,19 +5,18 @@ Installing MedPy as root All installation instructions are for Debian derivates, such as Ubuntu, but they should be simmilar for other distributions. - + When installed with root privileges, **MedPy** will be available for all uses of your machine. To install Python packages from `PyPi `_, we recommend `PIP `_. -To enable the graph-cut package, we need the following - +To enable the graph-cut package, we need the following + .. code-block:: bash - + sudo apt-get install libboost-python-dev build-essential - + Now we can install **MedPy** .. code-block:: bash sudo pip install medpy - diff --git a/doc/source/installation/asuser.rst b/doc/source/installation/asuser.rst index 7c016935..ae2d0443 100644 --- a/doc/source/installation/asuser.rst +++ b/doc/source/installation/asuser.rst @@ -11,22 +11,22 @@ The local install will place **MedPy** in your user site-packages directory and .. code-block:: bash python -c 'import site;print site.USER_SITE' - + In some cases, the Python configuration does not find packages in the users site-packages directory, in which case you will have to add it to your PYTHONPATH variable. To make this permanent, add the extension to your `.bashrc`, e.g. using: .. code-block:: bash echo "export PYTHONPATH=${PYTHONPATH}:$( python -c 'import site;print site.USER_SITE' )" >> ~/.bashrc - + More importantly, the script shipped with **MedPy** won't be in your PATH and hence can not be used directly. If your user site-packages directory is e.g. `/home//.local/lib/python2.7/site-packages/`, the scripts are most likely to be found under `/home//.local/bin/`. Add this directory to your PATH using: .. code-block:: bash echo "export PATH=${PATH}:/home//.local/bin/" >> ~/.bashrc - -(Don't forget to replace with your own user name.) + +(Don't forget to replace with your own user name.) Installing using `PIP `_ ---------------------------------------------------------- @@ -34,9 +34,9 @@ Requires `PIP `_ to be installed on your machi To enable the graph-cut package, we also need the following, which required administrator rights. If you do not plan on using the graph-cut functionality of **MedPy**, you can skip this step. - + .. code-block:: bash - + sudo apt-get install libboost-python-dev build-essential To install **MedPy** itself, simply call diff --git a/doc/source/installation/conda.rst b/doc/source/installation/conda.rst index 82bec3db..0b89e96c 100644 --- a/doc/source/installation/conda.rst +++ b/doc/source/installation/conda.rst @@ -13,4 +13,4 @@ But you can nevertheless install it into a conda environement using *pip* after Note that the graph-cut package won't compile in the conda environement due to unmet dependencies. For conda-purists: The friendly folks from `bioconda `_ wrapped the previous (0.3.0) version of **MedPy** -into their distribution system (see https://anaconda.org/bioconda/medpy). \ No newline at end of file +into their distribution system (see https://anaconda.org/bioconda/medpy). diff --git a/doc/source/installation/fastpath.rst b/doc/source/installation/fastpath.rst index bed3e872..a1a8196e 100644 --- a/doc/source/installation/fastpath.rst +++ b/doc/source/installation/fastpath.rst @@ -10,4 +10,3 @@ Installing MedPy the fast way sudo apt-get install libboost-python-dev build-essential sudo pip install medpy - diff --git a/doc/source/installation/graphcutsupport.rst b/doc/source/installation/graphcutsupport.rst index e47490a0..7514d468 100644 --- a/doc/source/installation/graphcutsupport.rst +++ b/doc/source/installation/graphcutsupport.rst @@ -17,4 +17,4 @@ These dependencies can be found in the repositories of all major distribution. F sudo apt-get install libboost-python-dev build-essential -Then install **MedPy** the usual way. \ No newline at end of file +Then install **MedPy** the usual way. diff --git a/doc/source/io.rst b/doc/source/io.rst index ba8bd701..a9f135b2 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1,2 +1 @@ .. automodule:: medpy.io - diff --git a/doc/source/iterators.rst b/doc/source/iterators.rst index c7357798..4592d467 100644 --- a/doc/source/iterators.rst +++ b/doc/source/iterators.rst @@ -1,2 +1 @@ .. automodule:: medpy.iterators - diff --git a/doc/source/metric.rst b/doc/source/metric.rst index 20883fc8..63caf511 100644 --- a/doc/source/metric.rst +++ b/doc/source/metric.rst @@ -1,2 +1 @@ .. automodule:: medpy.metric - diff --git a/doc/source/neighbours.rst b/doc/source/neighbours.rst index 17f7d1f0..f3a80c04 100644 --- a/doc/source/neighbours.rst +++ b/doc/source/neighbours.rst @@ -1,2 +1 @@ .. automodule:: medpy.neighbours - diff --git a/doc/source/utilities.rst b/doc/source/utilities.rst index 08d66d00..15410c0e 100644 --- a/doc/source/utilities.rst +++ b/doc/source/utilities.rst @@ -1,2 +1 @@ .. automodule:: medpy.utilities - diff --git a/lib/maxflow/src/BUILD b/lib/maxflow/src/BUILD index d3fa44e3..2811cd3b 100644 --- a/lib/maxflow/src/BUILD +++ b/lib/maxflow/src/BUILD @@ -8,5 +8,3 @@ mkdir build cd build cmake ../. make - - diff --git a/lib/maxflow/src/CMakeLists.txt b/lib/maxflow/src/CMakeLists.txt index d301e5d6..9e947fe6 100644 --- a/lib/maxflow/src/CMakeLists.txt +++ b/lib/maxflow/src/CMakeLists.txt @@ -7,7 +7,7 @@ if(COMMAND cmake_policy) cmake_policy(SET CMP0012 NEW) endif(COMMAND cmake_policy) -SET(SOURCES maxflow.cpp graph.cpp wrapper.cpp) +SET(SOURCES maxflow.cpp graph.cpp wrapper.cpp) SET(LIBRARY_NAME maxflow) FIND_PACKAGE( Boost 1.46.0 COMPONENTS python REQUIRED) @@ -17,4 +17,3 @@ INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_PATH}) ADD_LIBRARY(${LIBRARY_NAME} MODULE ${SOURCES}) SET_TARGET_PROPERTIES(${LIBRARY_NAME} PROPERTIES PREFIX "") TARGET_LINK_LIBRARIES(${LIBRARY_NAME} ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARIES} ) - diff --git a/lib/maxflow/src/Jamroot b/lib/maxflow/src/Jamroot index e6cc0f48..a917f2e0 100644 --- a/lib/maxflow/src/Jamroot +++ b/lib/maxflow/src/Jamroot @@ -16,4 +16,4 @@ project : requirements libboost_python ; # Declare the three extension modules. You can specify multiple # source files after the colon separated by spaces. -python-extension maxflow : wrapper.cpp ; \ No newline at end of file +python-extension maxflow : wrapper.cpp ; diff --git a/lib/maxflow/src/block.h b/lib/maxflow/src/block.h index c4dfa467..63c87be7 100644 --- a/lib/maxflow/src/block.h +++ b/lib/maxflow/src/block.h @@ -55,7 +55,7 @@ ... DBlock *dblock = new DBlock(BLOCK_SIZE); - + // adding items for (int i=0; i class DBlock #endif - diff --git a/lib/maxflow/src/get_edge_test.py b/lib/maxflow/src/get_edge_test.py index 55c484c1..fd6bb593 100755 --- a/lib/maxflow/src/get_edge_test.py +++ b/lib/maxflow/src/get_edge_test.py @@ -1,67 +1,77 @@ #!/usr/bin/python -from maxflow import GraphDouble, GraphFloat, GraphInt import random +from maxflow import GraphDouble, GraphFloat, GraphInt + + def main(): - print("GRAPHDOUBLE") - test(GraphDouble, 100) - print("GRAPHFLOAT") - test(GraphFloat, 100) - print("GRAPHINT") - test(GraphInt, 100) + print("GRAPHDOUBLE") + test(GraphDouble, 100) + print("GRAPHFLOAT") + test(GraphFloat, 100) + print("GRAPHINT") + test(GraphInt, 100) + def test(graphtype, runs): - print("#### FIRST ####") - g = graphtype(2,1) - g.add_node(3) - g.add_edge(0, 1, 2, 2) - g.add_edge(0, 2, 4, 5) + print("#### FIRST ####") + g = graphtype(2, 1) + g.add_node(3) + g.add_edge(0, 1, 2, 2) + g.add_edge(0, 2, 4, 5) - p(g,0,1,2) - p(g,1,0,2) - p(g,0,2,4) - p(g,2,0,5) - p(g,1,2,0) - p(g,2,1,0) - #p(g,1,3,1) # should raise error: node id out of bounds + p(g, 0, 1, 2) + p(g, 1, 0, 2) + p(g, 0, 2, 4) + p(g, 2, 0, 5) + p(g, 1, 2, 0) + p(g, 2, 1, 0) + # p(g,1,3,1) # should raise error: node id out of bounds - print("#### SECOND ####") - g = graphtype(2,1) - g.add_node(2) - g.add_edge(0, 1, 2, 3) - p(g,0,1,2) - p(g,1,0,3) - #p(g,1,2,1) # should raise error: node id unknown, as add_node has not been often enough called + print("#### SECOND ####") + g = graphtype(2, 1) + g.add_node(2) + g.add_edge(0, 1, 2, 3) + p(g, 0, 1, 2) + p(g, 1, 0, 3) + # p(g,1,2,1) # should raise error: node id unknown, as add_node has not been often enough called + + print("#### THIRD: RANDOM ####") + nodes = runs + edges = nodes * (nodes - 1) + g = graphtype(nodes, edges) + g.add_node(nodes) + connection = dict() + for fr in range(nodes): + for to in range(fr, nodes): + if fr == to: + continue + connection[(fr, to)] = (random.randint(1, 10), random.randint(1, 10)) + g.add_edge(fr, to, connection[(fr, to)][0], connection[(fr, to)][1]) + print("Testing {} random edge weights...".format(edges)) + for fr in range(nodes): + for to in range(fr, nodes): + if fr == to: + continue + p2(g, fr, to, connection[(fr, to)][0]) + p2(g, to, fr, connection[(fr, to)][1]) + print("Finished.") - print("#### THIRD: RANDOM ####") - nodes = runs - edges = nodes * (nodes - 1) - g = graphtype(nodes,edges) - g.add_node(nodes) - connection = dict() - for fr in range(nodes): - for to in range(fr, nodes): - if fr == to: continue - connection[(fr, to)] = (random.randint(1,10), random.randint(1,10)) - g.add_edge(fr, to, connection[(fr, to)][0], connection[(fr, to)][1]) - print('Testing {} random edge weights...'.format(edges)) - for fr in range(nodes): - for to in range(fr, nodes): - if fr == to: continue - p2(g, fr, to, connection[(fr, to)][0]) - p2(g, to, fr, connection[(fr, to)][1]) - print('Finished.') def p(g, f, t, exp): - if exp != g.get_edge(f, t): print('!Failed:', end=' ') - else: print('Passed:', end=' ') - print('{}->{}:{} (expected: {})'.format(f, t, g.get_edge(f, t), exp)) + if exp != g.get_edge(f, t): + print("!Failed:", end=" ") + else: + print("Passed:", end=" ") + print("{}->{}:{} (expected: {})".format(f, t, g.get_edge(f, t), exp)) + def p2(g, f, t, exp): - if exp != g.get_edge(f, t): - print('!Failed:', end=' ') - print('{}->{}:{} (expected: {})'.format(f, t, g.get_edge(f, t), exp)) - + if exp != g.get_edge(f, t): + print("!Failed:", end=" ") + print("{}->{}:{} (expected: {})".format(f, t, g.get_edge(f, t), exp)) + + if __name__ == "__main__": - main() + main() diff --git a/lib/maxflow/src/graph.cpp b/lib/maxflow/src/graph.cpp index 7ab9a614..f4fa8f7b 100644 --- a/lib/maxflow/src/graph.cpp +++ b/lib/maxflow/src/graph.cpp @@ -8,7 +8,7 @@ #include "graph.h" -template +template Graph::Graph(int node_num_max, int edge_num_max, void (*err_function)(char *)) : node_num(0), nodeptr_block(NULL), @@ -30,36 +30,36 @@ template flow = 0; } -template +template Graph::~Graph() { - if (nodeptr_block) - { - delete nodeptr_block; - nodeptr_block = NULL; + if (nodeptr_block) + { + delete nodeptr_block; + nodeptr_block = NULL; } free(nodes); free(arcs); } -template +template void Graph::reset() { node_last = nodes; arc_last = arcs; node_num = 0; - if (nodeptr_block) - { - delete nodeptr_block; - nodeptr_block = NULL; + if (nodeptr_block) + { + delete nodeptr_block; + nodeptr_block = NULL; } maxflow_iteration = 0; flow = 0; } -template +template void Graph::reallocate_nodes(int num) { int node_num_max = (int)(node_max - nodes); @@ -83,7 +83,7 @@ template } } -template +template void Graph::reallocate_arcs() { int arc_num_max = (int)(arc_max - arcs); diff --git a/lib/maxflow/src/graph.h b/lib/maxflow/src/graph.h index f851fea2..aaae2d74 100644 --- a/lib/maxflow/src/graph.h +++ b/lib/maxflow/src/graph.h @@ -5,7 +5,7 @@ "An Experimental Comparison of Min-Cut/Max-Flow Algorithms for Energy Minimization in Vision." Yuri Boykov and Vladimir Kolmogorov. - In IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI), + In IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI), September 2004 This algorithm was developed by Yuri Boykov and Vladimir Kolmogorov @@ -58,7 +58,7 @@ template class Graph { SOURCE = 0, SINK = 1 - } termtype; // terminals + } termtype; // terminals typedef int node_id; ///////////////////////////////////////////////////////////////////////// @@ -66,16 +66,16 @@ template class Graph // (should be enough for most applications) // ///////////////////////////////////////////////////////////////////////// - // Constructor. + // Constructor. // The first argument gives an estimate of the maximum number of nodes that can be added // to the graph, and the second argument is an estimate of the maximum number of edges. - // The last (optional) argument is the pointer to the function which will be called - // if an error occurs; an error message is passed to this function. + // The last (optional) argument is the pointer to the function which will be called + // if an error occurs; an error message is passed to this function. // If this argument is omitted, exit(1) will be called. // - // IMPORTANT: It is possible to add more nodes to the graph than node_num_max - // (and node_num_max can be zero). However, if the count is exceeded, then - // the internal memory is reallocated (increased by 50%) which is expensive. + // IMPORTANT: It is possible to add more nodes to the graph than node_num_max + // (and node_num_max can be zero). However, if the count is exceeded, then + // the internal memory is reallocated (increased by 50%) which is expensive. // Also, temporarily the amount of allocated memory would be more than twice than needed. // Similarly for edges. // If you wish to avoid this overhead, you can download version 2.2, where nodes and edges are stored in blocks. @@ -84,13 +84,13 @@ template class Graph // Destructor ~Graph(); - // Adds node(s) to the graph. By default, one node is added (num=1); then first call returns 0, second call returns 1, and so on. + // Adds node(s) to the graph. By default, one node is added (num=1); then first call returns 0, second call returns 1, and so on. // If num>1, then several nodes are added, and node_id of the first one is returned. - // IMPORTANT: see note about the constructor + // IMPORTANT: see note about the constructor node_id add_node(int num = 1); // Adds a bidirectional edge between 'i' and 'j' with the weights 'cap' and 'rev_cap'. - // IMPORTANT: see note about the constructor + // IMPORTANT: see note about the constructor // NOTE: One call to this function adds two arcs (i->j and j->i) to the graph. But in // the sense of the memory allocation passed to the constructor, these count as one // single edge! @@ -155,8 +155,8 @@ template class Graph // 1. Reallocating graph. // //////////////////////////// - // Removes all nodes and edges. - // After that functions add_node() and add_edge() must be called again. + // Removes all nodes and edges. + // After that functions add_node() and add_edge() must be called again. // // Advantage compared to deleting Graph and allocating it again: // no calls to delete/new (which could be quite slow). @@ -195,7 +195,7 @@ template class Graph /////////////////////////////////////////////////// // returns residual capacity of SOURCE->i minus residual capacity of i->SINK - tcaptype get_trcap(node_id i); + tcaptype get_trcap(node_id i); // returns residual capacity of arc a captype get_rcap(arc* a); @@ -205,7 +205,7 @@ template class Graph // returned by maxflow() will not be valid! // ///////////////////////////////////////////////////////////////// - void set_trcap(node_id i, tcaptype trcap); + void set_trcap(node_id i, tcaptype trcap); void set_rcap(arc* a, captype rcap); //////////////////////////////////////////////////////////////////// @@ -213,7 +213,7 @@ template class Graph //////////////////////////////////////////////////////////////////// // If flag reuse_trees is true while calling maxflow(), then search trees - // are reused from previous maxflow computation. + // are reused from previous maxflow computation. // In this case before calling maxflow() the user must // specify which parts of the graph have changed by calling mark_node(): // add_tweights(i),set_trcap(i) => call mark_node(i) @@ -221,12 +221,12 @@ template class Graph // // This option makes sense only if a small part of the graph is changed. // The initialization procedure goes only through marked nodes then. - // + // // mark_node(i) can either be called before or after graph modification. // Can be called more than once per node, but calls after the first one // do not have any effect. - // - // NOTE: + // + // NOTE: // - This option cannot be used in the first call to maxflow(). // - It is not necessary to call mark_node() if the change is ``not essential'', // i.e. sign(trcap) is preserved for a node and zero/nonzero status is preserved for an arc. @@ -262,16 +262,16 @@ template class Graph // changed_list->Reset(); // } // delete changed_list; - // + // // NOTE: // - If changed_list option is used, then reuse_trees must be used as well. // - In the example above, the user may omit calls g->remove_from_changed_list(i) and changed_list->Reset() in a given iteration. // Then during the next call to maxflow(true, &changed_list) new nodes will be added to changed_list. // - If the next call to maxflow() does not use option reuse_trees, then calling remove_from_changed_list() // is not necessary. ("changed_list->Reset()" or "delete changed_list" should still be called, though). - void remove_from_changed_list(node_id i) - { - assert(i>=0 && i=0 && i class Graph ///////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////// - + private: // internal variables and functions @@ -298,10 +298,10 @@ template class Graph int DIST; // distance to the terminal int is_sink : 1; // flag showing whether the node is in the source or in the sink tree (if parent!=NULL) int is_marked : 1; // set by mark_node() - int is_in_changed_list : 1; // set by maxflow if + int is_in_changed_list : 1; // set by maxflow if tcaptype tr_cap; // if tr_cap > 0 then tr_cap is residual capacity of the arc SOURCE->node - // otherwise -tr_cap is residual capacity of the arc node->SINK + // otherwise -tr_cap is residual capacity of the arc node->SINK }; @@ -384,7 +384,7 @@ template class Graph -template +template inline typename Graph::node_id Graph::add_node(int num) { assert(num > 0); @@ -412,7 +412,7 @@ template } } -template +template inline void Graph::add_tweights(node_id i, tcaptype cap_source, tcaptype cap_sink) { assert(i >= 0 && i < node_num); @@ -424,7 +424,7 @@ template nodes[i].tr_cap = cap_source - cap_sink; } -template +template inline void Graph::add_edge(node_id _i, node_id _j, captype cap, captype rev_cap) { assert(_i >= 0 && _i < node_num); @@ -508,19 +508,19 @@ template // Added by Os return NULL; } -template +template inline typename Graph::arc* Graph::get_first_arc() { return arcs; } -template - inline typename Graph::arc* Graph::get_next_arc(arc* a) +template + inline typename Graph::arc* Graph::get_next_arc(arc* a) { - return a + 1; + return a + 1; } -template +template inline void Graph::get_arc_ends(arc* a, node_id& i, node_id& j) { assert(a >= arcs && a < arc_last); @@ -528,28 +528,28 @@ template j = (node_id) (a->head - nodes); } -template +template inline tcaptype Graph::get_trcap(node_id i) { assert(i>=0 && i +template inline captype Graph::get_rcap(arc* a) { assert(a >= arcs && a < arc_last); return a->r_cap; } -template +template inline void Graph::set_trcap(node_id i, tcaptype trcap) { - assert(i>=0 && i=0 && i +template inline void Graph::set_rcap(arc* a, captype rcap) { assert(a >= arcs && a < arc_last); @@ -557,7 +557,7 @@ template } -template +template inline typename Graph::termtype Graph::what_segment(node_id i, termtype default_segm) { if (nodes[i].parent) @@ -570,7 +570,7 @@ template } } -template +template inline void Graph::mark_node(node_id _i) { node* i = nodes + _i; diff --git a/lib/maxflow/src/instances.inc b/lib/maxflow/src/instances.inc index 9c9ee37c..a7e463f8 100644 --- a/lib/maxflow/src/instances.inc +++ b/lib/maxflow/src/instances.inc @@ -5,12 +5,11 @@ #endif // Instantiations: -// IMPORTANT: -// flowtype should be 'larger' than tcaptype +// IMPORTANT: +// flowtype should be 'larger' than tcaptype // tcaptype should be 'larger' than captype template class Graph; template class Graph; template class Graph; template class Graph; - diff --git a/lib/maxflow/src/maxflow.cpp b/lib/maxflow/src/maxflow.cpp index a2fc042b..62812fde 100644 --- a/lib/maxflow/src/maxflow.cpp +++ b/lib/maxflow/src/maxflow.cpp @@ -30,7 +30,7 @@ */ -template +template inline void Graph::set_active(node *i) { if (!i->next) @@ -48,7 +48,7 @@ template If it is connected to the sink, it stays in the list, otherwise it is removed from the list */ -template +template inline typename Graph::node* Graph::next_active() { node *i; @@ -76,7 +76,7 @@ template /***********************************************************************/ -template +template inline void Graph::set_orphan_front(node *i) { nodeptr *np; @@ -87,7 +87,7 @@ template orphan_first = np; } -template +template inline void Graph::set_orphan_rear(node *i) { nodeptr *np; @@ -102,7 +102,7 @@ template /***********************************************************************/ -template +template inline void Graph::add_to_changed_list(node *i) { if (changed_list && !i->is_in_changed_list) @@ -115,7 +115,7 @@ template /***********************************************************************/ -template +template void Graph::maxflow_init() { node *i; @@ -155,7 +155,7 @@ template } } -template +template void Graph::maxflow_reuse_trees_init() { node* i; @@ -240,7 +240,7 @@ template //test_consistency(); } -template +template void Graph::augment(arc *middle_arc) { node *i; @@ -312,7 +312,7 @@ template /***********************************************************************/ -template +template void Graph::process_source_orphan(node *i) { node *j; @@ -389,7 +389,7 @@ template } } -template +template void Graph::process_sink_orphan(node *i) { node *j; @@ -468,7 +468,7 @@ template /***********************************************************************/ -template +template flowtype Graph::maxflow(bool reuse_trees, Block* _changed_list) { node *i, *j, *current_node = NULL; @@ -595,8 +595,8 @@ template if (!reuse_trees || (maxflow_iteration % 64) == 0) { - delete nodeptr_block; - nodeptr_block = NULL; + delete nodeptr_block; + nodeptr_block = NULL; } maxflow_iteration ++; @@ -606,7 +606,7 @@ template /***********************************************************************/ -template +template void Graph::test_consistency(node* current_node) { node *i; diff --git a/lib/maxflow/src/pythongraph.h b/lib/maxflow/src/pythongraph.h index bf10cd90..0ac47381 100644 --- a/lib/maxflow/src/pythongraph.h +++ b/lib/maxflow/src/pythongraph.h @@ -21,4 +21,3 @@ class Pythongraph : public Graph typename Graph::termtype what_segment(int i) { Graph::what_segment(i); }; }; #endif - diff --git a/lib/maxflow/src/sum_edge_test.py b/lib/maxflow/src/sum_edge_test.py index 45c75179..d8ef584c 100755 --- a/lib/maxflow/src/sum_edge_test.py +++ b/lib/maxflow/src/sum_edge_test.py @@ -1,85 +1,88 @@ #!/usr/bin/python + from maxflow import GraphDouble, GraphFloat, GraphInt -import random -def main(): - print("\nGRAPHINT") - test(GraphInt) - print("\nGRAPHFLOAT") - test(GraphFloat) - print("\nGRAPHDOUBLE") - test(GraphDouble) - print("\nADDITIONAL TESTS") - test_sum(GraphDouble) - test_multiple_arcs(GraphDouble) - test_overflow(GraphDouble) +def main(): + print("\nGRAPHINT") + test(GraphInt) + print("\nGRAPHFLOAT") + test(GraphFloat) + print("\nGRAPHDOUBLE") + test(GraphDouble) + print("\nADDITIONAL TESTS") + test_sum(GraphDouble) + test_multiple_arcs(GraphDouble) + test_overflow(GraphDouble) def test(graphtype): - g = graphtype(4,4) - g.add_node(4) + g = graphtype(4, 4) + g.add_node(4) - g.add_tweights(0, 99, 0) - g.add_tweights(3, 0, 99) + g.add_tweights(0, 99, 0) + g.add_tweights(3, 0, 99) - g.add_edge(0, 1, 1, 1) - g.add_edge(0, 2, 1, 1) - g.add_edge(1, 3, 2, 2) - g.add_edge(2, 3, 2, 2) - print('Flow: {}'.format(g.maxflow())) - print_cut(g, 4) + g.add_edge(0, 1, 1, 1) + g.add_edge(0, 2, 1, 1) + g.add_edge(1, 3, 2, 2) + g.add_edge(2, 3, 2, 2) + print("Flow: {}".format(g.maxflow())) + print_cut(g, 4) - g.add_edge(0, 1, 2, 2) - g.add_edge(0, 2, 2, 2) - print('Flow: {}'.format(g.maxflow())) - print_cut(g, 4) + g.add_edge(0, 1, 2, 2) + g.add_edge(0, 2, 2, 2) + print("Flow: {}".format(g.maxflow())) + print_cut(g, 4) def test_sum(graphtype): - g = graphtype(2,1) - g.add_node(2) + g = graphtype(2, 1) + g.add_node(2) - print('Expected to go all the way to 20 without increasing the memory requirements...') - for i in range(20): - print(i, end=' ') - g.sum_edge(0, 1, 1, 2) + print( + "Expected to go all the way to 20 without increasing the memory requirements..." + ) + for i in range(20): + print(i, end=" ") + g.sum_edge(0, 1, 1, 2) - v1 = g.get_edge(0, 1) - v2 = g.get_edge(1, 0) - print('\nFinal edge weight should be 20 resp. 40. Found {} resp. {}'.format(v1, v2)) + v1 = g.get_edge(0, 1) + v2 = g.get_edge(1, 0) + print("\nFinal edge weight should be 20 resp. 40. Found {} resp. {}".format(v1, v2)) def test_multiple_arcs(graphtype): - g = graphtype(2,1) - g.add_node(2) + g = graphtype(2, 1) + g.add_node(2) - g.add_edge(0, 1, 1, 2) - g.add_edge(0, 1, 1, 2) + g.add_edge(0, 1, 1, 2) + g.add_edge(0, 1, 1, 2) - v1 = g.get_edge(0, 1) - v2 = g.get_edge(1, 0) - print('Final edge weight should be 1 resp. 2. Found {} resp. {}'.format(v1, v2)) + v1 = g.get_edge(0, 1) + v2 = g.get_edge(1, 0) + print("Final edge weight should be 1 resp. 2. Found {} resp. {}".format(v1, v2)) def test_overflow(graphtype): - g = graphtype(2,1) - g.add_node(2) + g = graphtype(2, 1) + g.add_node(2) - print('Memory expected to double after 15...') - for i in range(20): - g.add_edge(0, 1, 1, 2) - print(i, end=' ') + print("Memory expected to double after 15...") + for i in range(20): + g.add_edge(0, 1, 1, 2) + print(i, end=" ") + + v1 = g.get_edge(0, 1) + v2 = g.get_edge(1, 0) + print("\nFinal edge weight should be 1 resp. 2. Found {} resp. {}".format(v1, v2)) - v1 = g.get_edge(0, 1); - v2 = g.get_edge(1, 0); - print('\nFinal edge weight should be 1 resp. 2. Found {} resp. {}'.format(v1, v2)) - def print_cut(g, nodes): - for n in range(nodes): - print('{} in {}'.format(n, g.what_segment(n))) - + for n in range(nodes): + print("{} in {}".format(n, g.what_segment(n))) + + if __name__ == "__main__": - main() + main() diff --git a/lib/maxflow/src/wrapper.cpp b/lib/maxflow/src/wrapper.cpp index 16da8ccf..852d2de5 100644 --- a/lib/maxflow/src/wrapper.cpp +++ b/lib/maxflow/src/wrapper.cpp @@ -27,7 +27,7 @@ BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(GraphInt_what_segment_overload, what_segm void wrap_scopegraphfloat() { using namespace boost::python; - scope graphFloat = + scope graphFloat = class_("GraphFloat", "Graph template intance with float for flowtype, tcaptype and captype. Takes the number of nodes as first and the number of edges as second parameter. Although it is possible to exceed these values later, it is discourage as it leads to bad memory management. The edges i->j and j->i count here as one single edge.", init()) .def("add_node", &GraphFloat::add_node/*, GraphFloat_add_node_overload()*/) // "Add one or more nodes to the graph and returns the id of the first such created node. The total number of added nodes should never exceed the max node number passed to the initializer. Only nodes added with this function can be referenced in methods such as add_edge and add_tweights." .def("add_edge", &GraphFloat::add_edge, "Add an edge from i to j with the capacity cap and reversed capacity rev_cap. Node ids start from 0. Repeated calls lead to the addition of multiple arcs and therefore the allocate memory can be exceeded.") @@ -59,7 +59,7 @@ void wrap_scopegraphfloat() void wrap_scopegraphdouble() { using namespace boost::python; - scope graphDouble = + scope graphDouble = class_("GraphDouble", "Graph template intance with double for flowtype, tcaptype and captype. Takes the number of nodes as first and the number of edges as second parameter. Although it is possible to exceed these values later, it is discourage as it leads to bad memory management. The edges i->j and j->i count here as one single edge.", init()) .def("add_node", &GraphDouble::add_node/*, GraphDouble_add_node_overload()*/) // "Add one or more nodes to the graph and returns the id of the first such created node. The total number of added nodes should never exceed the max node number passed to the initializer. Only nodes added with this function can be referenced in methods such as add_edge and add_tweights." .def("add_edge", &GraphDouble::add_edge, "Add an edge from i to j with the capacity cap and reversed capacity rev_cap. Node ids start from 0. Repeated calls lead to the addition of multiple arcs and therefore the allocate memory can be exceeded.") @@ -91,7 +91,7 @@ void wrap_scopegraphdouble() void wrap_scopegraphint() { using namespace boost::python; - scope graphInt = + scope graphInt = class_("GraphInt", "Graph template intance with int for flowtype, tcaptype and captype. Takes the number of nodes as first and the number of edges as second parameter. Although it is possible to exceed these values later, it is discourage as it leads to bad memory management. The edges i->j and j->i count here as one single edge.", init()) .def("add_node", &GraphInt::add_node/*, GraphInt_add_node_overload()*/) // "Add one or more nodes to the graph and returns the id of the first such created node. The total number of added nodes should never exceed the max node number passed to the initializer. Only nodes added with this function can be referenced in methods such as add_edge and add_tweights." .def("add_edge", &GraphInt::add_edge, "Add an edge from i to j with the capacity cap and reversed capacity rev_cap. Node ids start from 0. Repeated calls lead to the addition of multiple arcs and therefore the allocate memory can be exceeded.") diff --git a/medpy/__init__.py b/medpy/__init__.py index a9c80316..3eb06f7f 100644 --- a/medpy/__init__.py +++ b/medpy/__init__.py @@ -23,4 +23,4 @@ You should have received a copy of the GNU General Public License along with this program. If not, see . """ -__version__ = '0.3.0' +__version__ = "0.3.0" diff --git a/medpy/core/__init__.py b/medpy/core/__init__.py index f32adcec..6c4adb49 100644 --- a/medpy/core/__init__.py +++ b/medpy/core/__init__.py @@ -14,17 +14,17 @@ .. module:: medpy.core.logger .. autosummary:: :toctree: generated/ - + Logger - - + + Exceptions :mod:`medpy.core.exceptions` ======================================= .. module:: medpy.core.exceptions .. autosummary:: :toctree: generated/ - + ArgumentError FunctionError SubprocessError @@ -37,24 +37,22 @@ """ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . + # import all functions/methods/classes into the module -from .logger import Logger -from .exceptions import ArgumentError, FunctionError, SubprocessError, ImageLoadingError, \ - DependencyError, ImageSavingError, ImageTypeError, MetaDataError - + # import all sub-modules in the __all__ variable -__all__ = [s for s in dir() if not s.startswith('_')] \ No newline at end of file +__all__ = [s for s in dir() if not s.startswith("_")] diff --git a/medpy/core/exceptions.py b/medpy/core/exceptions.py index a7a1ebc1..29b7805f 100644 --- a/medpy/core/exceptions.py +++ b/medpy/core/exceptions.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -26,43 +26,43 @@ # own modules + # code class ArgumentError(Exception): - r"""Thrown by an application when an invalid command line argument has been supplied. - """ + r"""Thrown by an application when an invalid command line argument has been supplied.""" pass - + + class FunctionError(Exception): - r"""Thrown when a supplied function returns unexpected results. - """ + r"""Thrown when a supplied function returns unexpected results.""" pass - + + class SubprocessError(Exception): - r"""Thrown by an application when a subprocess execution failed. - """ + r"""Thrown by an application when a subprocess execution failed.""" pass + class ImageTypeError(Exception): - r"""Thrown when trying to load or save an image of unknown type. - """ + r"""Thrown when trying to load or save an image of unknown type.""" pass + class DependencyError(Exception): - r"""Thrown when a required module could not be loaded. - """ + r"""Thrown when a required module could not be loaded.""" pass + class ImageLoadingError(Exception): - r"""Thrown when a image could not be loaded. - """ + r"""Thrown when a image could not be loaded.""" pass + class ImageSavingError(Exception): - r"""Thrown when a image could not be saved. - """ + r"""Thrown when a image could not be saved.""" pass + class MetaDataError(Exception): - r"""Thrown when an image meta data failure occurred. - """ + r"""Thrown when an image meta data failure occurred.""" pass diff --git a/medpy/core/logger.py b/medpy/core/logger.py index 7b96ada8..d327ee8d 100644 --- a/medpy/core/logger.py +++ b/medpy/core/logger.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -18,9 +18,10 @@ # since 2011-12-12 # status Release +import logging + # build-in module import sys -import logging from logging import Logger as NativeLogger # third-party modules @@ -29,85 +30,86 @@ # constants + # code -class Logger (NativeLogger): +class Logger(NativeLogger): r"""Logger to be used by all applications and classes. - + Notes ----- Singleton class i.e. setting the log level changes the output globally. - + Examples -------- Initializing the logger - + >>> from medpy.core import Logger >>> logger = Logger.getInstance() - + Error messages are passed to stdout - + >>> logger.error('error message') 15.09.2014 12:40:25 [ERROR ] error message >>> logger.error('critical message') 15.09.2014 12:40:42 [CRITICAL] critical message - + But debug and info messages are suppressed - + >>> logger.info('info message') >>> logger.debug('debug message') - + Unless the log level is set accordingly - + >>> import logging >>> logger.setLevel(logging.DEBUG) - + >>> logger.info('info message') 15.09.2014 12:43:06 [INFO ] info message (in .:1) >>> logger.debug('debug message') 15.09.2014 12:42:50 [DEBUG ] debug message (in .:1) - + """ - - class LoggerHelper (object): - r"""A helper class which performs the actual initialization. - """ - def __call__(self, *args, **kw) : + + class LoggerHelper(object): + r"""A helper class which performs the actual initialization.""" + + def __call__(self, *args, **kw): # If an instance of TestSingleton does not exist, # create one and assign it to TestSingleton.instance. - if Logger._instance is None : + if Logger._instance is None: Logger._instance = Logger() # Return TestSingleton.instance, which should contain # a reference to the only instance of TestSingleton # in the system. return Logger._instance - - r"""Member variable initiating and returning the instance of the class.""" - getInstance = LoggerHelper() + + r"""Member variable initiating and returning the instance of the class.""" + getInstance = LoggerHelper() r"""The member variable holding the actual instance of the class.""" _instance = None r"""Holds the loggers handler for format changes.""" _handler = None - def __init__(self, name = 'MedPyLogger', level = 0) : + def __init__(self, name="MedPyLogger", level=0): # To guarantee that no one created more than one instance of Logger: - if not Logger._instance == None : - raise RuntimeError('Only one instance of Logger is allowed!') - + if not Logger._instance == None: + raise RuntimeError("Only one instance of Logger is allowed!") + # initialize parent NativeLogger.__init__(self, name, level) - + # set attributes self.setHandler(logging.StreamHandler(sys.stdout)) self.setLevel(logging.WARNING) - + def setHandler(self, hdlr): r"""Replace the current handler with a new one. - + Parameters ---------- hdlr : logging.Handler - A subclass of Handler that should used to handle the logging output. - + A subclass of Handler that should used to handle the logging output. + Notes ----- If none should be replaces, but just one added, use the parent classes @@ -117,28 +119,30 @@ def setHandler(self, hdlr): self.removeHandler(self._handler) self._handler = hdlr self.addHandler(self._handler) - + def setLevel(self, level): r"""Overrides the parent method to adapt the formatting string to the level. - + Parameters ---------- level : int The new log level to set. See the logging levels in the logging module for details. - + Examples -------- >>> import logging >>> Logger.setLevel(logging.DEBUG) """ if logging.DEBUG >= level: - formatter = logging.Formatter("%(asctime)s [%(levelname)-8s] %(message)s (in %(module)s.%(funcName)s:%(lineno)s)", - "%d.%m.%Y %H:%M:%S") + formatter = logging.Formatter( + "%(asctime)s [%(levelname)-8s] %(message)s (in %(module)s.%(funcName)s:%(lineno)s)", + "%d.%m.%Y %H:%M:%S", + ) self._handler.setFormatter(formatter) else: - formatter = logging.Formatter("%(asctime)s [%(levelname)-8s] %(message)s", - "%d.%m.%Y %H:%M:%S") + formatter = logging.Formatter( + "%(asctime)s [%(levelname)-8s] %(message)s", "%d.%m.%Y %H:%M:%S" + ) self._handler.setFormatter(formatter) - + NativeLogger.setLevel(self, level) - diff --git a/medpy/features/__init__.py b/medpy/features/__init__.py index 5a7e412a..edb2e066 100644 --- a/medpy/features/__init__.py +++ b/medpy/features/__init__.py @@ -6,7 +6,7 @@ This package contains various functions for feature extraction and manipulation in medical images. - + Intensity :mod:`medpy.features.intensity` ========================================= Functions to extracts intensity based features. Ready to be @@ -16,7 +16,7 @@ .. module:: medpy.features.intensity .. autosummary:: :toctree: generated/ - + intensities centerdistance centerdistance_xdminus1 @@ -36,12 +36,12 @@ ===== | == == ===== s1 | s2 s3 [...] - f1.1 | - f1.2 | - f2.1 | - f3.1 | - f3.2 | - [...] | + f1.1 | + f1.2 | + f2.1 | + f3.1 | + f3.2 | + [...] | ===== | == == ===== , where each column sX denotes a single sample (voxel) and each row @@ -68,12 +68,12 @@ .. module:: medpy.features.utilities .. autosummary:: :toctree: generated/ - + normalize normalize_with_model append join - + Histogram :mod:`medy.features.histogram` ======================================== Functions to create various kinds of fuzzy histograms with the fuzzy_histogram function. @@ -81,12 +81,12 @@ .. module:: medpy.features.histogram .. autosummary:: :toctree: generated/ - + fuzzy_histogram triangular_membership trapezoid_membership gaussian_membership - sigmoidal_difference_membership + sigmoidal_difference_membership Available membership functions ------------------------------ @@ -109,9 +109,9 @@ An example of the smoothness parameter:: ____________ ________ ____________ ________ ____________ - / / \ / \ / \ / \ \ - / / \ / \ / \ / \ \ - / / \ / \ / \ / \ \ + / / \ / \ / \ / \ \ + / / \ / \ / \ / \ \ + / / \ / \ / \ / \ \ ---|----------|----------|----------|----------|----------|----------|----------|---- x-3 x-2 x-1 x x+1 x+2 x+3 |-nbh | |crisp bin | | +nbh| @@ -130,36 +130,28 @@ lies outside of the histogram range. To avoid this affect (which can be quite strong for histograms with few bins and a height smoothness term), set 'guarantee' to True. The histogram size is then selected to be (left_side - smoothness * bin_width till -right_side + smoothness * bin_width) and therefore neglect all boundary effects. +right_side + smoothness * bin_width) and therefore neglect all boundary effects. Plots of the membership functions can e.g. be found at http://www.atp.ruhr-uni-bochum.de/rt1/syscontrol/node117.html . - + """ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # import all functions/methods/classes into the module -from .histogram import fuzzy_histogram, triangular_membership, trapezoid_membership, \ - gaussian_membership, sigmoidal_difference_membership -from .intensity import centerdistance, centerdistance_xdminus1, gaussian_gradient_magnitude, \ - hemispheric_difference, indices, intensities, local_histogram, local_mean_gauss, \ - median, shifted_mean_gauss, mask_distance -from .utilities import append, join, normalize, normalize_with_model # import all sub-modules in the __all__ variable -__all__ = [s for s in dir() if not s.startswith('_')] - - +__all__ = [s for s in dir() if not s.startswith("_")] diff --git a/medpy/features/histogram.py b/medpy/features/histogram.py index e6573271..defd7856 100644 --- a/medpy/features/histogram.py +++ b/medpy/features/histogram.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -28,15 +28,24 @@ # constants # the available membership functions for fuzzy histogram calculation -__MBS = ['triangular', 'trapezoid', 'gaussian', 'sigmoid'] +__MBS = ["triangular", "trapezoid", "gaussian", "sigmoid"] + # code -def fuzzy_histogram(a, bins=10, range=None, normed=False, membership='triangular', smoothness=None, guarantee=False): +def fuzzy_histogram( + a, + bins=10, + range=None, + normed=False, + membership="triangular", + smoothness=None, + guarantee=False, +): r"""Compute a fuzzy histogram. The percentage of a value's membership in a bin is computed using the selected membership function. This functions stays as near as possible to the `numpy.histogram` behaviour. - + Parameters ---------- a : array_like @@ -59,18 +68,18 @@ def fuzzy_histogram(a, bins=10, range=None, normed=False, membership='triangular guarantee : bool Guarantee that all values contribute equally to the histogram; when this value is set, the range term is ignored; see package descriptions for details. - + Returns ------- hist : array The values of the histogram. See normed and weights for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges (length(hist)+1). - + Notes ----- See package description for more details on the usage. - + Examples -------- >>> import numpy as np @@ -82,39 +91,51 @@ def fuzzy_histogram(a, bins=10, range=None, normed=False, membership='triangular (array([ 3.4 , 2.04444444, 2.04444444, 3.4 ]), array([ 1. , 3.25, 5.5 , 7.75, 10. ])) >>> fuzzy_histogram(a, bins=4, membership='sigmoid') (array([ 3.34304743, 2.15613626, 2.15613626, 3.34304743]), array([ 1. , 3.25, 5.5 , 7.75, 10. ])) - + """ # check and prepare parameters a = scipy.asarray(a).ravel() - if None == range: range = (a.min(), a.max()) - if range[1] <= range[0]: raise AttributeError('max must be larger than min in range parameter.') - if not int == type(bins): raise AttributeError('bins must an integer.') - if bins <= 0: raise AttributeError('bins must greater than zero.') - if membership not in __MBS: raise AttributeError('Unknown type: {}. Must be one of {}.'.format(membership, __MBS)) - if not None == smoothness and smoothness <= 0.0: raise AttributeError('smoothness must be greater than zero.') - + if None == range: + range = (a.min(), a.max()) + if range[1] <= range[0]: + raise AttributeError("max must be larger than min in range parameter.") + if not int == type(bins): + raise AttributeError("bins must an integer.") + if bins <= 0: + raise AttributeError("bins must greater than zero.") + if membership not in __MBS: + raise AttributeError( + "Unknown type: {}. Must be one of {}.".format(membership, __MBS) + ) + if not None == smoothness and smoothness <= 0.0: + raise AttributeError("smoothness must be greater than zero.") + # set default smoothness values if None == smoothness: - smoothness = 0.25 if 'trapezoid' == membership else 0.5 - - if not guarantee: # compute bin distribution in no guarantee case + smoothness = 0.25 if "trapezoid" == membership else 0.5 + + if not guarantee: # compute bin distribution in no guarantee case binw = (range[1] - range[0]) / float(bins) bins = scipy.asarray([i * binw + range[0] for i in scipy.arange(bins + 1)]) - else: # compute bin distribution for guarantee case + else: # compute bin distribution for guarantee case bins_core = bins - 2 * int(math.ceil(smoothness)) - if bins_core <= 0: raise AttributeError('bins to few to guarantee removing boundary effect.') + if bins_core <= 0: + raise AttributeError("bins to few to guarantee removing boundary effect.") binw = (range[1] - range[0]) / float(bins_core) - range = (range[0] - int(math.ceil(smoothness)) * binw, range[1] + int(math.ceil(smoothness)) * binw) + range = ( + range[0] - int(math.ceil(smoothness)) * binw, + range[1] + int(math.ceil(smoothness)) * binw, + ) bins = scipy.asarray([i * binw + range[0] for i in scipy.arange(bins + 1)]) - + # create membership function (centered at 0) - if 'triangular' == membership: + if "triangular" == membership: membership = triangular_membership(0, binw, smoothness) - elif 'trapezoid' == membership: + elif "trapezoid" == membership: membership = trapezoid_membership(0, binw, smoothness) - elif 'gaussian' == membership: + elif "gaussian" == membership: membership = gaussian_membership(0, binw, smoothness) - elif 'sigmoid' == membership: + elif "sigmoid" == membership: membership = sigmoidal_difference_membership(0, binw, smoothness) # compute histogram i.e. memberships of values across neighbourhood (determined by smoothness) @@ -122,26 +143,33 @@ def fuzzy_histogram(a, bins=10, range=None, normed=False, membership='triangular l = len(bins) - 2 histogram = scipy.zeros(l + 1) m = range[0] - for v in a: # for each value + for v in a: # for each value idx = min(l, int((v - m) / binw)) - for i in scipy.arange(max(0, idx - neighbourhood), min(l + 1, idx + neighbourhood + 1)): # for crips bin neighbourhood + for i in scipy.arange( + max(0, idx - neighbourhood), min(l + 1, idx + neighbourhood + 1) + ): # for crips bin neighbourhood start = bins[i] - histogram[i] += membership(v - start - 0.5 * binw) # adjust v for evaluation on zero-centered membership function + histogram[i] += membership( + v - start - 0.5 * binw + ) # adjust v for evaluation on zero-centered membership function # normalize - if normed: histogram /= float(sum(histogram)) - + if normed: + histogram /= float(sum(histogram)) + return histogram, bins - + + # //////////////////// # # Membership functions # # //////////////////// # -# see http://www.atp.ruhr-uni-bochum.de/rt1/syscontrol/node117.html for graphs +# see http://www.atp.ruhr-uni-bochum.de/rt1/syscontrol/node117.html for graphs + -def triangular_membership(bin_center, bin_width, smoothness = 0.5): +def triangular_membership(bin_center, bin_width, smoothness=0.5): r""" Create a triangular membership function for a fuzzy histogram bin. - + Parameters ---------- bin_center : number @@ -151,22 +179,22 @@ def triangular_membership(bin_center, bin_width, smoothness = 0.5): smoothness : number, optional The smoothness of the function; determines the neighbourhood affected. See below and `fuzzy_histogram` for a more detailed explanation - + Returns ------- triangular_membership : function A triangular membership function centered on the bin. - + Notes ----- For the triangular function the smoothness factor has to be 0.5. Lower values are accepted, but then the function assumes the shape of the trapezium membership function. Higher values lead to an exception. - + The triangular membership function is defined as .. math:: - + \mu_{\triangle}(x) = \left\{ \begin{array}{ll} @@ -179,32 +207,41 @@ def triangular_membership(bin_center, bin_width, smoothness = 0.5): where :math:`a` is the left border, :math:`c` the right border and :math:`b` the center of the triangular function. The height of the triangle is chosen such, that all values contribute with exactly one. - + The standard triangular function (:math:`smoothness = 0.5`) is displayed in the following figure - + .. .. image:: images/triangular_01.png - + "Triangular functions (1)" - + where the bin width is :math:`2` with centers at :math:`-2`, :math:`0` and :math:`2`. """ - if smoothness > 0.5: raise AttributeError('the triangular/trapezium membership functions supports only smoothnesses between 1/10 and 1/2.') - if smoothness < 0.5: return trapezoid_membership(bin_center, bin_width, smoothness) - + if smoothness > 0.5: + raise AttributeError( + "the triangular/trapezium membership functions supports only smoothnesses between 1/10 and 1/2." + ) + if smoothness < 0.5: + return trapezoid_membership(bin_center, bin_width, smoothness) + a = bin_center - bin_width b = float(bin_center) c = bin_center + bin_width - + def fun(x): - if x < a or x > c: return 0 - elif x <= b: return (x-a)/(b-a) - else: return (c-x)/(c-b) + if x < a or x > c: + return 0 + elif x <= b: + return (x - a) / (b - a) + else: + return (c - x) / (c - b) + return fun - + + def trapezoid_membership(bin_center, bin_width, smoothness): r"""Create a trapezium membership function for a fuzzy histogram bin. - + Parameters ---------- bin_center : number @@ -214,22 +251,22 @@ def trapezoid_membership(bin_center, bin_width, smoothness): smoothness : number, optional The smoothness of the function; determines the neighbourhood affected. See below and `fuzzy_histogram` for a more detailed explanation - + Returns ------- trapezoid_membership : function A trapezoidal membership function centered on the bin. - + Notes ----- For the trapezium function the smoothness factor can be between >0.0 and <0.5. Higher values are excepted, but then the function assumes the shape of the triangular membership function. A value of 0.0 would make the histogram behave like a crisp one. - + The trapezium membership function is defined as - + .. math:: - + \mu_{trapez}(x) = \left\{ \begin{array}{ll} @@ -239,56 +276,66 @@ def trapezoid_membership(bin_center, bin_width, smoothness): \frac{d-x}{d-c}, & c\leq x\leq d\\ \end{array} \right. - + where :math:`a` is the left lower border, :math:`b` the left upper border, :math:`c` the right upper border and :math:`d` the right lower border of the trapezium. - + A smoothness term of 0.1 makes the trapezium function reach by :math:`0.1 * bin\_width` into the areas of the adjunct bins, as can be observed in the following figure - + .. .. image:: images/trapezium_02.png - + "Trapezium functions (1)" - + where the bin width is 2 with centers at -2, 0 and 2. - + Increasing the smoothness term toward 0.5, the function starts to resemble the triangular membership function, which in fact it becomes for any :math:`smoothness >= 0.5`. - The behavior can be observed in the following graph with :math:`smoothness=0.4` - + The behavior can be observed in the following graph with :math:`smoothness=0.4` + .. .. image:: images/trapezium_01.png - + "Trapezium functions (2)" - + Lowering the smoothness toward 0.0, on the other hand, leads the trapezium function to behave more and more like a crisp histogram membership, which in fact it becomes at a smoothness of 0.0. The following figure, where the smoothness term is near zero, illustrates this behaviour - + .. .. image:: images/trapezium_03.png - + "Trapezium functions (3)" - + """ # special case of high smoothness - if smoothness < 1./10: raise AttributeError('the triangular/trapezium membership functions supports only smoothnesses between 1/10 and 1/2.') - if smoothness >= 0.5: return triangular_membership(bin_center, bin_width, smoothness) + if smoothness < 1.0 / 10: + raise AttributeError( + "the triangular/trapezium membership functions supports only smoothnesses between 1/10 and 1/2." + ) + if smoothness >= 0.5: + return triangular_membership(bin_center, bin_width, smoothness) - a = bin_center - (smoothness + 0.5) * bin_width + a = bin_center - (smoothness + 0.5) * bin_width b = bin_center - (0.5 - smoothness) * bin_width c = bin_center + (0.5 - smoothness) * bin_width d = bin_center + (smoothness + 0.5) * bin_width - + def fun(x): - if x < a or x > d: return 0 - elif x <= b: return (x-a)/float(b-a) - elif x <= c: return 1 - else: return (d-x)/float(d-c) + if x < a or x > d: + return 0 + elif x <= b: + return (x - a) / float(b - a) + elif x <= c: + return 1 + else: + return (d - x) / float(d - c) + return fun + def gaussian_membership(bin_center, bin_width, smoothness): r"""Create a gaussian membership function for a fuzzy histogram bin. - + Parameters ---------- bin_center : number @@ -298,7 +345,7 @@ def gaussian_membership(bin_center, bin_width, smoothness): smoothness : number, optional The smoothness of the function; determines the neighbourhood affected. See below and `fuzzy_histogram` for a more detailed explanation - + Returns ------- gaussian_membership : function @@ -310,30 +357,30 @@ def gaussian_membership(bin_center, bin_width, smoothness): not actually true that it does not contribute to bins outside of the neighbourhood range. But the contribution is so marginal (:math:`eps <= 0.001` per value) that it can be safely ignored. - + The gaussian membership function is defined as - + .. math:: - + \mu_{gauss}(x) = \frac{1}{\sigma\sqrt{2\pi}} e^{-\frac{(x-\zeta)^2}{2\sigma^2}} Since the gaussian distributions can not be formed to sum up to one at each point of the x-axis, their cumulative density functions (CDF) are used instead. For more details on CDF see http://en.wikipedia.org/wiki/Normal_distribution . - + The gaussian and therefore the CDF are centered above the requested value instead of the bin center. Then the CDF value for the left side of the bin is subtracted from the CDF value returned for the right side. The result is the integral under the gaussian with :math:`\mu/\zeta = value` with the bin-sides as the integral borders. - + This approach might seem a little bit unintuitive, but is the best possible for gaussian membership functions. The following graph gives a graphical example of the computation of each values bin membership - - .. .. image:: images/gaussian_01.png - + + .. .. image:: images/gaussian_01.png + "Trapezium functions (1)" - + where the bin_width is 1, one bin between each of the x tics (e.g. [-1, 0], [0, 1], etc.). The value which membership should be computed is marked by a yellow bar at :math:`x = 0.3`. Its membership in each bin is defined by the integral under the gaussian @@ -345,8 +392,11 @@ def gaussian_membership(bin_center, bin_width, smoothness): For computation the function normalizes all values to a bin_width of 1, which can introduce marginal rounding errors. """ - if smoothness > 10 or smoothness < 1./10: raise AttributeError('the gaussian membership function supports only smoothnesses between 1/10 and 5.') - + if smoothness > 10 or smoothness < 1.0 / 10: + raise AttributeError( + "the gaussian membership function supports only smoothnesses between 1/10 and 5." + ) + bin_width = float(bin_width) bin_center = bin_center / bin_width start = bin_center - 0.5 @@ -354,11 +404,14 @@ def gaussian_membership(bin_center, bin_width, smoothness): sigma = _gaussian_membership_sigma(smoothness) def fun(x): - return scipy.stats.norm.cdf(end, x / bin_width, sigma) - scipy.stats.norm.cdf(start, x / bin_width, sigma) # x, mu, sigma - + return scipy.stats.norm.cdf(end, x / bin_width, sigma) - scipy.stats.norm.cdf( + start, x / bin_width, sigma + ) # x, mu, sigma + return fun -def _gaussian_membership_sigma(smoothness, eps = 0.0005): # 275us @ smothness=10 + +def _gaussian_membership_sigma(smoothness, eps=0.0005): # 275us @ smothness=10 r"""Compute the sigma required for a gaussian, such that in a neighbourhood of smoothness the maximum error is 'eps'. The error is here the difference between the clipped integral and one. @@ -366,17 +419,20 @@ def _gaussian_membership_sigma(smoothness, eps = 0.0005): # 275us @ smothness=10 error = 0 deltas = [0.1, 0.01, 0.001, 0.0001] sigma = smoothness * 0.3 - point = -1. * (smoothness + 0.5) + point = -1.0 * (smoothness + 0.5) for delta in deltas: while error < eps: sigma += delta - error = scipy.stats.norm.cdf(0.5, point, sigma) - scipy.stats.norm.cdf(-0.5, point, sigma) # x, mu, sigma + error = scipy.stats.norm.cdf(0.5, point, sigma) - scipy.stats.norm.cdf( + -0.5, point, sigma + ) # x, mu, sigma sigma -= delta return sigma + def sigmoidal_difference_membership(bin_center, bin_width, smoothness): r"""Create the difference of two sigmoids as membership function for a fuzzy histogram bin. - + Parameters ---------- bin_center : number @@ -386,35 +442,35 @@ def sigmoidal_difference_membership(bin_center, bin_width, smoothness): smoothness : number, optional The smoothness of the function; determines the neighbourhood affected. See below and `fuzzy_histogram` for a more detailed explanation - + Returns ------- sigmoidal_difference_membership : function A sigmoidal difference membership function centered on the bin. - + Notes ----- Since the sigmoidal membership function is infinite, it is not actually true that it does not contribute to bins outside of the neighbourhood range. But the contribution is so marginal (eps <= 0.001 per value) that it can be safely ignored. - + The sigmoidal membership function is defined as - + .. math:: - + \mu_{sigmoid}(x) = \left[1+e^{-\alpha_1 (x-\zeta_1)}\right]^{-1} - \left[1+e^{-\alpha_2 (x-\zeta_2)}\right]^{-1} where :math:`\alpha_1 = \alpha_2 = \alpha` is computed throught the smoothness term and :math:`\zeta_1` and :math:`\zeta_2` constitute the left resp. right borders of the bin. - + The following figure shows three sigmoidal membership functions for bins at the centers -2, -0 and 2 with a bin width of 2 and a smoothness of 2: - + .. .. image:: images/sigmoid_01.png - + "Sigmoidal functions (1)" - + The central (green) membership functions extends to its up till the second bin (centered around -4) and the same to the right (until the bin centered around +4). Therefore all values from -5 to +5 are considered for membership in this bin. Values @@ -422,44 +478,49 @@ def sigmoidal_difference_membership(bin_center, bin_width, smoothness): Furthermore it is inteligable that the sum of all membership functions at each point is equal to 1, therefore all values are equally represented (i.e. contribute with 1 to the overall histogram). - + The influence of the smoothness term can be observed in the following figure: - + .. .. image:: images/sigmoid_02.png - + "Sigmoidal functions (2)" - + Here smoothness has been chosen to be 1. The green function therefore extends just into the directly adjunct bins to its left and right. - + """ - if smoothness > 10 or smoothness < 1./10: raise AttributeError('the sigmoidal membership function supports only smoothnesses between 1/10 and 10.') - + if smoothness > 10 or smoothness < 1.0 / 10: + raise AttributeError( + "the sigmoidal membership function supports only smoothnesses between 1/10 and 10." + ) + # compute the alpha that will give a contribution to the next bins right and left - alpha_nbh1 = 8. / bin_width # experimental value - # compute the alpha that results in the desired smoothness level + alpha_nbh1 = 8.0 / bin_width # experimental value + # compute the alpha that results in the desired smoothness level alpha = alpha_nbh1 / smoothness - + def fun(x): - sigmoid1 = 1 + math.exp(-1. * alpha * (x - (bin_center - 0.5 * bin_width))) - sigmoid2 = 1 + math.exp(-1. * alpha * (x - (bin_center + 0.5 * bin_width))) + sigmoid1 = 1 + math.exp(-1.0 * alpha * (x - (bin_center - 0.5 * bin_width))) + sigmoid2 = 1 + math.exp(-1.0 * alpha * (x - (bin_center + 0.5 * bin_width))) return math.pow(sigmoid1, -1) - math.pow(sigmoid2, -1) + return fun - -#def generalized_bell_membership(alpha, beta, zeta): + + +# def generalized_bell_membership(alpha, beta, zeta): # """ # Create a generalized bell function as membership function for a fuzzy histogram bin. -# +# # @param alpha controls the width of the plateau # @param beta controls the width of the base # @param zeta the center of the function -# +# # Recommended values are: # - alpha: bin-width/2 # - beta: bin-width/2 # - zeta: bin center -# -# The bell membership function is defined as +# +# The bell membership function is defined as # \f[ # \mu_{bell}(x) = \left[1+\left|\frac{x-\zeta}{\alpha}\right|^{2\beta}\right]^{-1} # \f] diff --git a/medpy/features/intensity.py b/medpy/features/intensity.py index 0b80802d..20ab55e8 100644 --- a/medpy/features/intensity.py +++ b/medpy/features/intensity.py @@ -22,20 +22,24 @@ # third-party modules import numpy -from scipy.ndimage import gaussian_filter, median_filter -from scipy.ndimage import gaussian_gradient_magnitude as scipy_gaussian_gradient_magnitude from scipy.interpolate.interpolate import interp1d -from scipy.ndimage import distance_transform_edt +from scipy.ndimage import distance_transform_edt, gaussian_filter +from scipy.ndimage import ( + gaussian_gradient_magnitude as scipy_gaussian_gradient_magnitude, +) +from scipy.ndimage import median_filter from scipy.ndimage._ni_support import _get_output -# own modules -from .utilities import join from ..core import ArgumentError from ..filter import sum_filter +# own modules +from .utilities import join + # constants -def intensities(image, mask = slice(None)): + +def intensities(image, mask=slice(None)): r"""Takes a simple or multi-spectral image and returns its voxel-wise intensities. A multi-spectral image must be supplied as a list or tuple of its spectra. @@ -56,7 +60,8 @@ def intensities(image, mask = slice(None)): """ return _extract_feature(_extract_intensities, image, mask) -def centerdistance(image, voxelspacing = None, mask = slice(None)): + +def centerdistance(image, voxelspacing=None, mask=slice(None)): r""" Takes a simple or multi-spectral image and returns its voxel-wise center distance in mm. A multi-spectral image must be supplied as a list or tuple of its spectra. @@ -93,9 +98,12 @@ def centerdistance(image, voxelspacing = None, mask = slice(None)): if type(image) == tuple or type(image) == list: image = image[0] - return _extract_feature(_extract_centerdistance, image, mask, voxelspacing = voxelspacing) + return _extract_feature( + _extract_centerdistance, image, mask, voxelspacing=voxelspacing + ) -def centerdistance_xdminus1(image, dim, voxelspacing = None, mask = slice(None)): + +def centerdistance_xdminus1(image, dim, voxelspacing=None, mask=slice(None)): r""" Implementation of `centerdistance` that allows to compute sub-volume wise centerdistances. @@ -145,14 +153,23 @@ def centerdistance_xdminus1(image, dim, voxelspacing = None, mask = slice(None)) # check arguments if len(dims) >= image.ndim - 1: - raise ArgumentError('Applying a sub-volume extraction of depth {} on a image of dimensionality {} would lead to invalid images of dimensionality <= 1.'.format(len(dims), image.ndim)) + raise ArgumentError( + "Applying a sub-volume extraction of depth {} on a image of dimensionality {} would lead to invalid images of dimensionality <= 1.".format( + len(dims), image.ndim + ) + ) for dim in dims: if dim >= image.ndim: - raise ArgumentError('Invalid dimension index {} supplied for image(s) of shape {}.'.format(dim, image.shape)) + raise ArgumentError( + "Invalid dimension index {} supplied for image(s) of shape {}.".format( + dim, image.shape + ) + ) # extract desired sub-volume slicer = [slice(None)] * image.ndim - for dim in dims: slicer[dim] = slice(1) + for dim in dims: + slicer[dim] = slice(1) subvolume = numpy.squeeze(image[slicer]) # compute centerdistance for sub-volume and reshape to original sub-volume shape (note that normalization and mask are not passed on in this step) @@ -166,7 +183,8 @@ def centerdistance_xdminus1(image, dim, voxelspacing = None, mask = slice(None)) # extract intensities / centerdistance values, applying normalization and mask in this step return intensities(o, mask) -def indices(image, voxelspacing = None, mask = slice(None)): + +def indices(image, voxelspacing=None, mask=slice(None)): r""" Takes an image and returns the voxels ndim-indices as voxel-wise feature. The voxel spacing is taken into account, i.e. the indices are not array indices, but millimeter @@ -203,11 +221,19 @@ def indices(image, voxelspacing = None, mask = slice(None)): mask = numpy.array(mask, copy=False, dtype=numpy.bool_) if voxelspacing is None: - voxelspacing = [1.] * image.ndim + voxelspacing = [1.0] * image.ndim + + return join( + *[ + a[mask].ravel() * vs + for a, vs in zip(numpy.indices(image.shape), voxelspacing) + ] + ) - return join(*[a[mask].ravel() * vs for a, vs in zip(numpy.indices(image.shape), voxelspacing)]) -def shifted_mean_gauss(image, offset = None, sigma = 5, voxelspacing = None, mask = slice(None)): +def shifted_mean_gauss( + image, offset=None, sigma=5, voxelspacing=None, mask=slice(None) +): r""" The approximate mean over a small region at an offset from each voxel. @@ -241,9 +267,17 @@ def shifted_mean_gauss(image, offset = None, sigma = 5, voxelspacing = None, mas local_mean_gauss """ - return _extract_feature(_extract_shifted_mean_gauss, image, mask, offset = offset, sigma = sigma, voxelspacing = voxelspacing) + return _extract_feature( + _extract_shifted_mean_gauss, + image, + mask, + offset=offset, + sigma=sigma, + voxelspacing=voxelspacing, + ) -def mask_distance(image, voxelspacing = None, mask = slice(None)): + +def mask_distance(image, voxelspacing=None, mask=slice(None)): r""" Computes the distance of each point under the mask to the mask border taking the voxel-spacing into account. @@ -272,9 +306,10 @@ def mask_distance(image, voxelspacing = None, mask = slice(None)): if type(image) == tuple or type(image) == list: image = image[0] - return _extract_mask_distance(image, mask = mask, voxelspacing = voxelspacing) + return _extract_mask_distance(image, mask=mask, voxelspacing=voxelspacing) + -def local_mean_gauss(image, sigma = 5, voxelspacing = None, mask = slice(None)): +def local_mean_gauss(image, sigma=5, voxelspacing=None, mask=slice(None)): r""" Takes a simple or multi-spectral image and returns the approximate mean over a small region around each voxel. A multi-spectral image must be supplied as a list or tuple @@ -308,9 +343,12 @@ def local_mean_gauss(image, sigma = 5, voxelspacing = None, mask = slice(None)): The weighted mean intensities over a region around each voxel. """ - return _extract_feature(_extract_local_mean_gauss, image, mask, sigma = sigma, voxelspacing = voxelspacing) + return _extract_feature( + _extract_local_mean_gauss, image, mask, sigma=sigma, voxelspacing=voxelspacing + ) + -def gaussian_gradient_magnitude(image, sigma = 5, voxelspacing = None, mask = slice(None)): +def gaussian_gradient_magnitude(image, sigma=5, voxelspacing=None, mask=slice(None)): r""" Computes the gradient magnitude (edge-detection) of the supplied image using gaussian derivates and returns the intensity values. @@ -338,9 +376,16 @@ def gaussian_gradient_magnitude(image, sigma = 5, voxelspacing = None, mask = sl The gaussian gradient magnitude of the supplied image. """ - return _extract_feature(_extract_gaussian_gradient_magnitude, image, mask, sigma = sigma, voxelspacing = voxelspacing) + return _extract_feature( + _extract_gaussian_gradient_magnitude, + image, + mask, + sigma=sigma, + voxelspacing=voxelspacing, + ) -def median(image, size = 5, voxelspacing = None, mask = slice(None)): + +def median(image, size=5, voxelspacing=None, mask=slice(None)): """ Computes the multi-dimensional median filter and returns the resulting values per voxel. @@ -368,9 +413,23 @@ def median(image, size = 5, voxelspacing = None, mask = slice(None)): Multi-dimesnional median filtered version of the input images. """ - return _extract_feature(_extract_median, image, mask, size = size, voxelspacing = voxelspacing) + return _extract_feature( + _extract_median, image, mask, size=size, voxelspacing=voxelspacing + ) + -def local_histogram(image, bins=19, rang="image", cutoffp=(0.0, 100.0), size=None, footprint=None, output=None, mode="ignore", origin=0, mask=slice(None)): +def local_histogram( + image, + bins=19, + rang="image", + cutoffp=(0.0, 100.0), + size=None, + footprint=None, + output=None, + mode="ignore", + origin=0, + mask=slice(None), +): r""" Computes multi-dimensional histograms over a region around each voxel. @@ -449,10 +508,29 @@ def local_histogram(image, bins=19, rang="image", cutoffp=(0.0, 100.0), size=Non The bin values of the local histograms for each voxel as a multi-dimensional image. """ - return _extract_feature(_extract_local_histogram, image, mask, bins=bins, rang=rang, cutoffp=cutoffp, size=size, footprint=footprint, output=output, mode=mode, origin=origin) - - -def hemispheric_difference(image, sigma_active = 7, sigma_reference = 7, cut_plane = 0, voxelspacing = None, mask = slice(None)): + return _extract_feature( + _extract_local_histogram, + image, + mask, + bins=bins, + rang=rang, + cutoffp=cutoffp, + size=size, + footprint=footprint, + output=output, + mode=mode, + origin=origin, + ) + + +def hemispheric_difference( + image, + sigma_active=7, + sigma_reference=7, + cut_plane=0, + voxelspacing=None, + mask=slice(None), +): r""" Computes the hemispheric intensity difference between the brain hemispheres of an brain image. @@ -516,23 +594,44 @@ def hemispheric_difference(image, sigma_active = 7, sigma_reference = 7, cut_pla If the supplied cut-plane dimension is invalid. """ - return _extract_feature(_extract_hemispheric_difference, image, mask, sigma_active = sigma_active, sigma_reference = sigma_reference, cut_plane = cut_plane, voxelspacing = voxelspacing) + return _extract_feature( + _extract_hemispheric_difference, + image, + mask, + sigma_active=sigma_active, + sigma_reference=sigma_reference, + cut_plane=cut_plane, + voxelspacing=voxelspacing, + ) -def _extract_hemispheric_difference(image, mask = slice(None), sigma_active = 7, sigma_reference = 7, cut_plane = 0, voxelspacing = None): +def _extract_hemispheric_difference( + image, + mask=slice(None), + sigma_active=7, + sigma_reference=7, + cut_plane=0, + voxelspacing=None, +): """ Internal, single-image version of `hemispheric_difference`. """ # constants - INTERPOLATION_RANGE = int(10) # how many neighbouring values to take into account when interpolating the medial longitudinal fissure slice + INTERPOLATION_RANGE = int( + 10 + ) # how many neighbouring values to take into account when interpolating the medial longitudinal fissure slice # check arguments if cut_plane >= image.ndim: - raise ArgumentError('The suppliedc cut-plane ({}) is invalid, the image has only {} dimensions.'.format(cut_plane, image.ndim)) + raise ArgumentError( + "The suppliedc cut-plane ({}) is invalid, the image has only {} dimensions.".format( + cut_plane, image.ndim + ) + ) # set voxel spacing if voxelspacing is None: - voxelspacing = [1.] * image.ndim + voxelspacing = [1.0] * image.ndim # compute the (presumed) location of the medial longitudinal fissure, treating also the special of an odd number of slices, in which case a cut into two equal halves is not possible medial_longitudinal_fissure = int(image.shape[cut_plane] / 2) @@ -544,7 +643,9 @@ def _extract_hemispheric_difference(image, mask = slice(None), sigma_active = 7, slicer[cut_plane] = slice(None, medial_longitudinal_fissure) left_hemisphere = image[slicer] - slicer[cut_plane] = slice(medial_longitudinal_fissure + medial_longitudinal_fissure_excluded, None) + slicer[cut_plane] = slice( + medial_longitudinal_fissure + medial_longitudinal_fissure_excluded, None + ) right_hemisphere = image[slicer] # flip right hemisphere image along cut plane @@ -552,8 +653,12 @@ def _extract_hemispheric_difference(image, mask = slice(None), sigma_active = 7, right_hemisphere = right_hemisphere[slicer] # substract once left from right and once right from left hemisphere, including smoothing steps - right_hemisphere_difference = _substract_hemispheres(right_hemisphere, left_hemisphere, sigma_active, sigma_reference, voxelspacing) - left_hemisphere_difference = _substract_hemispheres(left_hemisphere, right_hemisphere, sigma_active, sigma_reference, voxelspacing) + right_hemisphere_difference = _substract_hemispheres( + right_hemisphere, left_hemisphere, sigma_active, sigma_reference, voxelspacing + ) + left_hemisphere_difference = _substract_hemispheres( + left_hemisphere, right_hemisphere, sigma_active, sigma_reference, voxelspacing + ) # re-flip right hemisphere image to original orientation right_hemisphere_difference = right_hemisphere_difference[slicer] @@ -568,23 +673,56 @@ def _extract_hemispheric_difference(image, mask = slice(None), sigma_active = 7, interp_data_right = right_hemisphere_difference[right_slicer] interp_indices_left = list(range(-1 * interp_data_left.shape[cut_plane], 0)) interp_indices_right = list(range(1, interp_data_right.shape[cut_plane] + 1)) - interp_data = numpy.concatenate((left_hemisphere_difference[left_slicer], right_hemisphere_difference[right_slicer]), cut_plane) - interp_indices = numpy.concatenate((interp_indices_left, interp_indices_right), 0) - medial_longitudinal_fissure_estimated = interp1d(interp_indices, interp_data, kind='cubic', axis=cut_plane)(0) + interp_data = numpy.concatenate( + ( + left_hemisphere_difference[left_slicer], + right_hemisphere_difference[right_slicer], + ), + cut_plane, + ) + interp_indices = numpy.concatenate( + (interp_indices_left, interp_indices_right), 0 + ) + medial_longitudinal_fissure_estimated = interp1d( + interp_indices, interp_data, kind="cubic", axis=cut_plane + )(0) # add singleton dimension slicer[cut_plane] = numpy.newaxis - medial_longitudinal_fissure_estimated = medial_longitudinal_fissure_estimated[slicer] + medial_longitudinal_fissure_estimated = medial_longitudinal_fissure_estimated[ + slicer + ] # stich images back together if 1 == medial_longitudinal_fissure_excluded: - hemisphere_difference = numpy.concatenate((left_hemisphere_difference, medial_longitudinal_fissure_estimated, right_hemisphere_difference), cut_plane) + hemisphere_difference = numpy.concatenate( + ( + left_hemisphere_difference, + medial_longitudinal_fissure_estimated, + right_hemisphere_difference, + ), + cut_plane, + ) else: - hemisphere_difference = numpy.concatenate((left_hemisphere_difference, right_hemisphere_difference), cut_plane) + hemisphere_difference = numpy.concatenate( + (left_hemisphere_difference, right_hemisphere_difference), cut_plane + ) # extract intensities and return return _extract_intensities(hemisphere_difference, mask) -def _extract_local_histogram(image, mask=slice(None), bins=19, rang="image", cutoffp=(0.0, 100.0), size=None, footprint=None, output=None, mode="ignore", origin=0): + +def _extract_local_histogram( + image, + mask=slice(None), + bins=19, + rang="image", + cutoffp=(0.0, 100.0), + size=None, + footprint=None, + output=None, + mode="ignore", + origin=0, +): """ Internal, single-image version of @see local_histogram @@ -593,16 +731,20 @@ def _extract_local_histogram(image, mask=slice(None), bins=19, rang="image", cut Note: Default dtype of returned values is float. """ if "constant" == mode: - raise RuntimeError('boundary mode not supported') + raise RuntimeError("boundary mode not supported") elif "ignore" == mode: mode = "constant" - if 'image' == rang: + if "image" == rang: rang = tuple(numpy.percentile(image[mask], cutoffp)) elif not 2 == len(rang): - raise RuntimeError('the rang must contain exactly two elements or the string "image"') + raise RuntimeError( + 'the rang must contain exactly two elements or the string "image"' + ) _, bin_edges = numpy.histogram([], bins=bins, range=rang) - output = _get_output(float if None == output else output, image, shape = [bins] + list(image.shape)) + output = _get_output( + float if None == output else output, image, shape=[bins] + list(image.shape) + ) # threshold the image into the histogram bins represented by the output images first dimension, treat last bin separately, since upper border is inclusive for i in range(bins - 1): @@ -611,7 +753,15 @@ def _extract_local_histogram(image, mask=slice(None), bins=19, rang="image", cut # apply the sum filter to each dimension, then normalize by dividing through the sum of elements in the bins of each histogram for i in range(bins): - output[i] = sum_filter(output[i], size=size, footprint=footprint, output=None, mode=mode, cval=0.0, origin=origin) + output[i] = sum_filter( + output[i], + size=size, + footprint=footprint, + output=None, + mode=mode, + cval=0.0, + origin=origin, + ) divident = numpy.sum(output, 0) divident[0 == divident] = 1 output /= divident @@ -624,39 +774,46 @@ def _extract_local_histogram(image, mask=slice(None), bins=19, rang="image", cut # treat as multi-spectral image which intensities to extracted return _extract_feature(_extract_intensities, [h for h in output], mask) -def _extract_median(image, mask = slice(None), size = 1, voxelspacing = None): + +def _extract_median(image, mask=slice(None), size=1, voxelspacing=None): """ Internal, single-image version of `median`. """ # set voxel spacing if voxelspacing is None: - voxelspacing = [1.] * image.ndim + voxelspacing = [1.0] * image.ndim # determine structure element size in voxel units size = _create_structure_array(size, voxelspacing) return _extract_intensities(median_filter(image, size), mask) -def _extract_gaussian_gradient_magnitude(image, mask = slice(None), sigma = 1, voxelspacing = None): + +def _extract_gaussian_gradient_magnitude( + image, mask=slice(None), sigma=1, voxelspacing=None +): """ Internal, single-image version of `gaussian_gradient_magnitude`. """ # set voxel spacing if voxelspacing is None: - voxelspacing = [1.] * image.ndim + voxelspacing = [1.0] * image.ndim # determine gaussian kernel size in voxel units sigma = _create_structure_array(sigma, voxelspacing) return _extract_intensities(scipy_gaussian_gradient_magnitude(image, sigma), mask) -def _extract_shifted_mean_gauss(image, mask = slice(None), offset = None, sigma = 1, voxelspacing = None): + +def _extract_shifted_mean_gauss( + image, mask=slice(None), offset=None, sigma=1, voxelspacing=None +): """ Internal, single-image version of `shifted_mean_gauss`. """ # set voxel spacing if voxelspacing is None: - voxelspacing = [1.] * image.ndim + voxelspacing = [1.0] * image.ndim # set offset if offset is None: offset = [0] * image.ndim @@ -677,7 +834,8 @@ def _extract_shifted_mean_gauss(image, mask = slice(None), offset = None, sigma return _extract_intensities(shifted, mask) -def _extract_mask_distance(image, mask = slice(None), voxelspacing = None): + +def _extract_mask_distance(image, mask=slice(None), voxelspacing=None): """ Internal, single-image version of `mask_distance`. """ @@ -688,13 +846,14 @@ def _extract_mask_distance(image, mask = slice(None), voxelspacing = None): return _extract_intensities(distance_map, mask) -def _extract_local_mean_gauss(image, mask = slice(None), sigma = 1, voxelspacing = None): + +def _extract_local_mean_gauss(image, mask=slice(None), sigma=1, voxelspacing=None): """ Internal, single-image version of `local_mean_gauss`. """ # set voxel spacing if voxelspacing is None: - voxelspacing = [1.] * image.ndim + voxelspacing = [1.0] * image.ndim # determine gaussian kernel size in voxel units sigma = _create_structure_array(sigma, voxelspacing) @@ -702,17 +861,17 @@ def _extract_local_mean_gauss(image, mask = slice(None), sigma = 1, voxelspacing return _extract_intensities(gaussian_filter(image, sigma), mask) -def _extract_centerdistance(image, mask = slice(None), voxelspacing = None): +def _extract_centerdistance(image, mask=slice(None), voxelspacing=None): """ Internal, single-image version of `centerdistance`. """ image = numpy.array(image, copy=False) if None == voxelspacing: - voxelspacing = [1.] * image.ndim + voxelspacing = [1.0] * image.ndim # get image center and an array holding the images indices - centers = [(x - 1) / 2. for x in image.shape] + centers = [(x - 1) / 2.0 for x in image.shape] indices = numpy.indices(image.shape, dtype=float) # shift to center of image and correct spacing to real world coordinates @@ -724,25 +883,29 @@ def _extract_centerdistance(image, mask = slice(None), voxelspacing = None): return numpy.sqrt(numpy.sum(numpy.square(indices), 0))[mask].ravel() -def _extract_intensities(image, mask = slice(None)): +def _extract_intensities(image, mask=slice(None)): """ Internal, single-image version of `intensities`. """ return numpy.array(image, copy=True)[mask].ravel() -def _substract_hemispheres(active, reference, active_sigma, reference_sigma, voxel_spacing): + +def _substract_hemispheres( + active, reference, active_sigma, reference_sigma, voxel_spacing +): """ Helper function for `_extract_hemispheric_difference`. Smoothes both images and then substracts the reference from the active image. """ active_kernel = _create_structure_array(active_sigma, voxel_spacing) - active_smoothed = gaussian_filter(active, sigma = active_kernel) + active_smoothed = gaussian_filter(active, sigma=active_kernel) reference_kernel = _create_structure_array(reference_sigma, voxel_spacing) - reference_smoothed = gaussian_filter(reference, sigma = reference_kernel) + reference_smoothed = gaussian_filter(reference, sigma=reference_kernel) return active_smoothed - reference_smoothed + def _create_structure_array(structure_array, voxelspacing): """ Convenient function to take a structure array (single number valid for all dimensions @@ -751,13 +914,16 @@ def _create_structure_array(structure_array, voxelspacing): voxel spacing. """ try: - structure_array = [s / float(vs) for s, vs in zip(structure_array, voxelspacing)] + structure_array = [ + s / float(vs) for s, vs in zip(structure_array, voxelspacing) + ] except TypeError: structure_array = [structure_array / float(vs) for vs in voxelspacing] return structure_array -def _extract_feature(fun, image, mask = slice(None), **kwargs): + +def _extract_feature(fun, image, mask=slice(None), **kwargs): """ Convenient function to cope with multi-spectral images and feature normalization. diff --git a/medpy/features/texture.py b/medpy/features/texture.py index b1804b17..c5816000 100644 --- a/medpy/features/texture.py +++ b/medpy/features/texture.py @@ -20,17 +20,25 @@ # build-in modules +from math import factorial + # third-party modules import numpy -from scipy.ndimage import uniform_filter, sobel, maximum_filter, minimum_filter, gaussian_filter from scipy import stats -from math import factorial +from scipy.ndimage import ( + gaussian_filter, + maximum_filter, + minimum_filter, + sobel, + uniform_filter, +) # own modules # constants -def coarseness(image, voxelspacing = None, mask = slice(None)): + +def coarseness(image, voxelspacing=None, mask=slice(None)): r""" Takes a simple or multi-spectral image and returns the coarseness of the texture. @@ -67,65 +75,72 @@ def coarseness(image, voxelspacing = None, mask = slice(None)): image = numpy.asarray(image, dtype=numpy.float32) - # set default mask or apply given mask if not type(mask) is slice: if not type(mask[0] is slice): - mask = numpy.array(mask, copy=False, dtype = numpy.bool_) + mask = numpy.array(mask, copy=False, dtype=numpy.bool_) image = image[mask] # set default voxel spacing if not suppliec if None == voxelspacing: - voxelspacing = tuple([1.] * image.ndim) + voxelspacing = tuple([1.0] * image.ndim) if len(voxelspacing) != image.ndim: print("Voxel spacing and image dimensions do not fit.") return None # set padding for image border control - padSize = numpy.asarray([(numpy.rint((2**5.0) * voxelspacing[jj]),0) for jj in range(image.ndim)]).astype(int) - Apad = numpy.pad(image,pad_width=padSize, mode='reflect') + padSize = numpy.asarray( + [(numpy.rint((2**5.0) * voxelspacing[jj]), 0) for jj in range(image.ndim)] + ).astype(int) + Apad = numpy.pad(image, pad_width=padSize, mode="reflect") # Allocate memory - E = numpy.empty((6,image.ndim)+image.shape) + E = numpy.empty((6, image.ndim) + image.shape) # prepare some slicer - rawSlicer = [slice(None)] * image.ndim - slicerForImageInPad = [slice(padSize[d][0],None)for d in range(image.ndim)] + rawSlicer = [slice(None)] * image.ndim + slicerForImageInPad = [slice(padSize[d][0], None) for d in range(image.ndim)] for k in range(6): - - size_vs = tuple(numpy.rint((2**k) * voxelspacing[jj]) for jj in range(image.ndim)) - A = uniform_filter(Apad, size = size_vs, mode = 'mirror') + size_vs = tuple( + numpy.rint((2**k) * voxelspacing[jj]) for jj in range(image.ndim) + ) + A = uniform_filter(Apad, size=size_vs, mode="mirror") # Step2: At each pixel, compute absolute differences E(x,y) between # the pairs of non overlapping averages in the horizontal and vertical directions. for d in range(image.ndim): borders = numpy.rint((2**k) * voxelspacing[d]) - slicerPad_k_d = slicerForImageInPad[:] - slicerPad_k_d[d]= slice((padSize[d][0]-borders if borders < padSize[d][0] else 0),None) - A_k_d = A[slicerPad_k_d] + slicerPad_k_d = slicerForImageInPad[:] + slicerPad_k_d[d] = slice( + (padSize[d][0] - borders if borders < padSize[d][0] else 0), None + ) + A_k_d = A[slicerPad_k_d] - AslicerL = rawSlicer[:] - AslicerL[d] = slice(0, -borders) + AslicerL = rawSlicer[:] + AslicerL[d] = slice(0, -borders) - AslicerR = rawSlicer[:] - AslicerR[d] = slice(borders, None) + AslicerR = rawSlicer[:] + AslicerR[d] = slice(borders, None) - E[k,d,...] = numpy.abs(A_k_d[AslicerL] - A_k_d[AslicerR]) + E[k, d, ...] = numpy.abs(A_k_d[AslicerL] - A_k_d[AslicerR]) # step3: At each pixel, find the value of k that maximises the difference Ek(x,y) # in either direction and set the best size Sbest(x,y)=2**k k_max = E.max(1).argmax(0) dim = E.argmax(1) - dim_vox_space = numpy.asarray([voxelspacing[dim[k_max.flat[i]].flat[i]] for i in range(k_max.size)]).reshape(k_max.shape) + dim_vox_space = numpy.asarray( + [voxelspacing[dim[k_max.flat[i]].flat[i]] for i in range(k_max.size)] + ).reshape(k_max.shape) S = (2**k_max) * dim_vox_space # step4: Compute the coarseness feature Fcrs by averaging Sbest(x,y) over the entire image. return S.mean() -def contrast(image, mask = slice(None)): + +def contrast(image, mask=slice(None)): r""" Takes a simple or multi-spectral image and returns the contrast of the texture. @@ -152,18 +167,21 @@ def contrast(image, mask = slice(None)): # set default mask or apply given mask if not type(mask) is slice: if not type(mask[0] is slice): - mask = numpy.array(mask, copy=False, dtype = numpy.bool_) + mask = numpy.array(mask, copy=False, dtype=numpy.bool_) image = image[mask] standard_deviation = numpy.std(image) kurtosis = stats.kurtosis(image, axis=None, bias=True, fisher=False) - n = 0.25 # The value n=0.25 is recommended as the best for discriminating the textures. + n = 0.25 # The value n=0.25 is recommended as the best for discriminating the textures. Fcon = standard_deviation / (kurtosis**n) return Fcon -def directionality(image, min_distance = 4, threshold = 0.1, voxelspacing = None, mask = slice(None)): + +def directionality( + image, min_distance=4, threshold=0.1, voxelspacing=None, mask=slice(None) +): r""" Takes a simple or multi-spectral image and returns the directionality of the image texture. It is just a value representing the strength of directionality, not the specific direction. @@ -214,32 +232,32 @@ def directionality(image, min_distance = 4, threshold = 0.1, voxelspacing = None # set default mask or apply given mask if not type(mask) is slice: if not type(mask[0] is slice): - mask = numpy.array(mask, copy=False, dtype = numpy.bool_) + mask = numpy.array(mask, copy=False, dtype=numpy.bool_) image = image[mask] # set default voxel spacing if not suppliec if None == voxelspacing: - voxelspacing = tuple([1.] * ndim) + voxelspacing = tuple([1.0] * ndim) if len(voxelspacing) != ndim: print("Voxel spacing and image dimensions do not fit.") return None - # Calculate amount of combinations: n choose k, normalizing factor r and voxel spacing. - n = (factorial(ndim)/(2*factorial(ndim-2))) - pi1_2 = numpy.pi/2.0 - r=1.0 / (pi1_2**2) - vs = [slice(None,None,numpy.rint(ii)) for ii in voxelspacing] + # Calculate amount of combinations: n choose k, normalizing factor r and voxel spacing. + n = factorial(ndim) / (2 * factorial(ndim - 2)) + pi1_2 = numpy.pi / 2.0 + r = 1.0 / (pi1_2**2) + vs = [slice(None, None, numpy.rint(ii)) for ii in voxelspacing] # Allocate memory, define constants Fdir = numpy.empty(n) # calculate differences by using Sobel-filter. (Maybe other filter kernel like Prewitt will do a better job) - E = [sobel(image, axis=ndim-1-i) for i in range(ndim)] + E = [sobel(image, axis=ndim - 1 - i) for i in range(ndim)] # The edge strength e(x,y) is used for thresholding. e = sum(E) / float(ndim) - border = [numpy.percentile(e, 1),numpy.percentile(e, 99)] + border = [numpy.percentile(e, 1), numpy.percentile(e, 99)] e[e < border[0]] = 0 e[e > border[1]] = border[1] e -= border[0] @@ -247,53 +265,66 @@ def directionality(image, min_distance = 4, threshold = 0.1, voxelspacing = None em = e > threshold for i in range(n): - A = numpy.arctan((E[(i + (ndim+i)/ndim) % ndim][vs]) / (E[i%ndim][vs]+numpy.spacing(1))) # [0 , pi/2] + A = numpy.arctan( + (E[(i + (ndim + i) / ndim) % ndim][vs]) + / (E[i % ndim][vs] + numpy.spacing(1)) + ) # [0 , pi/2] A = A[em[vs]] # Calculate number of bins for the histogram. Watch out, this is just a work around! # @TODO: Write a more stable code to prevent for minimum and maximum repetition when the same value in the Histogram appears multiple times in a row. Example: image = numpy.zeros([10,10]), image[:,::3] = 1 bins = numpy.unique(A).size + min_distance - H = numpy.histogram(A, bins = bins, density=True)[0] # [0 , 1] - H[H < numpy.percentile(H,1)] = 0.0 + H = numpy.histogram(A, bins=bins, density=True)[0] # [0 , 1] + H[H < numpy.percentile(H, 1)] = 0.0 H_peaks, H_valleys, H_range = find_valley_range(H) summe = 0.0 for idx_ap in range(len(H_peaks)): - for range_idx in range( H_valleys[idx_ap], H_valleys[idx_ap]+H_range[idx_ap]): - a=range_idx % len(H) - summe += (((pi1_2*a)/bins - (pi1_2 * H_peaks[idx_ap])/bins) **2) * H[a] + for range_idx in range( + H_valleys[idx_ap], H_valleys[idx_ap] + H_range[idx_ap] + ): + a = range_idx % len(H) + summe += ( + ((pi1_2 * a) / bins - (pi1_2 * H_peaks[idx_ap]) / bins) ** 2 + ) * H[a] Fdir[i] = 1.0 - r * summe return Fdir -def local_maxima(vector,min_distance = 4, brd_mode = "wrap"): +def local_maxima(vector, min_distance=4, brd_mode="wrap"): """ Internal finder for local maxima . Returns UNSORTED indices of maxima in input vector. """ - fits = gaussian_filter(numpy.asarray(vector,dtype=numpy.float32),1., mode=brd_mode) + fits = gaussian_filter( + numpy.asarray(vector, dtype=numpy.float32), 1.0, mode=brd_mode + ) for ii in range(len(fits)): - if fits[ii] == fits[ii-1]: - fits[ii-1] = 0.0 - maxfits = maximum_filter(fits, size=min_distance, mode=brd_mode) + if fits[ii] == fits[ii - 1]: + fits[ii - 1] = 0.0 + maxfits = maximum_filter(fits, size=min_distance, mode=brd_mode) maxima_mask = fits == maxfits - maximum = numpy.transpose(maxima_mask.nonzero()) + maximum = numpy.transpose(maxima_mask.nonzero()) return numpy.asarray(maximum) -def local_minima(vector,min_distance = 4, brd_mode = "wrap"): + +def local_minima(vector, min_distance=4, brd_mode="wrap"): """ Internal finder for local minima . Returns UNSORTED indices of minima in input vector. """ - fits = gaussian_filter(numpy.asarray(vector,dtype=numpy.float32),1., mode=brd_mode) + fits = gaussian_filter( + numpy.asarray(vector, dtype=numpy.float32), 1.0, mode=brd_mode + ) for ii in range(len(fits)): - if fits[ii] == fits[ii-1]: - fits[ii-1] = numpy.pi/2.0 + if fits[ii] == fits[ii - 1]: + fits[ii - 1] = numpy.pi / 2.0 minfits = minimum_filter(fits, size=min_distance, mode=brd_mode) minima_mask = fits == minfits minima = numpy.transpose(minima_mask.nonzero()) return numpy.asarray(minima) -def find_valley_range(vector, min_distance = 4): + +def find_valley_range(vector, min_distance=4): """ Internal finder peaks and valley ranges. Returns UNSORTED indices of maxima in input vector. @@ -303,22 +334,27 @@ def find_valley_range(vector, min_distance = 4): # http://users.monash.edu.au/~dengs/resource/papers/icme08.pdf # find min and max with mode = wrap mode = "wrap" - minima = local_minima(vector,min_distance,mode) - maxima = local_maxima(vector,min_distance,mode) + minima = local_minima(vector, min_distance, mode) + maxima = local_maxima(vector, min_distance, mode) - if len(maxima)>len(minima): + if len(maxima) > len(minima): if vector[maxima[0]] >= vector[maxima[-1]]: - maxima=maxima[1:] + maxima = maxima[1:] else: - maxima=maxima[:-1] + maxima = maxima[:-1] - if len(maxima)==len(minima): - valley_range = numpy.asarray([minima[ii+1] - minima[ii] for ii in range(len(minima)-1)] + [len(vector)-minima[-1]+minima[0]]) + if len(maxima) == len(minima): + valley_range = numpy.asarray( + [minima[ii + 1] - minima[ii] for ii in range(len(minima) - 1)] + + [len(vector) - minima[-1] + minima[0]] + ) if minima[0] < maxima[0]: minima = numpy.asarray(list(minima) + [minima[0]]) else: minima = numpy.asarray(list(minima) + [minima[-1]]) else: - valley_range = numpy.asarray([minima[ii+1] - minima[ii] for ii in range(len(maxima))]) + valley_range = numpy.asarray( + [minima[ii + 1] - minima[ii] for ii in range(len(maxima))] + ) return maxima, minima, valley_range diff --git a/medpy/features/utilities.py b/medpy/features/utilities.py index c2004a39..d766b473 100644 --- a/medpy/features/utilities.py +++ b/medpy/features/utilities.py @@ -28,7 +28,7 @@ # code -def normalize(vector, cutoffp = (0, 100), model = False): +def normalize(vector, cutoffp=(0, 100), model=False): r""" Returns a feature-wise normalized version of the supplied vector. Normalization is achieved to [0,1] over the complete vector using shifting and scaling. @@ -88,8 +88,8 @@ def normalize(vector, cutoffp = (0, 100), model = False): # shift outliers to fit range for i in range(vector.shape[1]): - vector[:,i][vector[:,i] < minp[i]] = minp[i] - vector[:,i][vector[:,i] > maxp[i]] = maxp[i] + vector[:, i][vector[:, i] < minp[i]] = minp[i] + vector[:, i][vector[:, i] > maxp[i]] = maxp[i] # normalize minv = vector.min(0) @@ -102,6 +102,7 @@ def normalize(vector, cutoffp = (0, 100), model = False): else: return vector, (minp, maxp, minv, maxv) + def normalize_with_model(vector, model): r""" Normalize as with `normalize`, but not based on the data of the passed feature @@ -131,8 +132,8 @@ def normalize_with_model(vector, model): # shift outliers to fit range for i in range(vector.shape[1]): - vector[:,i][vector[:,i] < minp[i]] = minp[i] - vector[:,i][vector[:,i] > maxp[i]] = maxp[i] + vector[:, i][vector[:, i] < minp[i]] = minp[i] + vector[:, i][vector[:, i] > maxp[i]] = maxp[i] # normalize vector -= minv @@ -140,6 +141,7 @@ def normalize_with_model(vector, model): return vector + def append(*vectors): r""" Takes an arbitrary number of vectors containing features and append them @@ -178,6 +180,7 @@ def append(*vectors): return numpy.squeeze(numpy.concatenate(vectors, 0)) + def join(*vectors): r""" Takes an arbitrary number of aligned vectors of the same length and combines diff --git a/medpy/filter/IntensityRangeStandardization.py b/medpy/filter/IntensityRangeStandardization.py index 21779310..3f50e400 100644 --- a/medpy/filter/IntensityRangeStandardization.py +++ b/medpy/filter/IntensityRangeStandardization.py @@ -28,8 +28,9 @@ # own modules + # code -class IntensityRangeStandardization (object): +class IntensityRangeStandardization(object): r""" Class to standardize intensity ranges between a number of images. @@ -187,54 +188,73 @@ class IntensityRangeStandardization (object): L4 = [10, 20, 30, 40, 50, 60, 70, 80, 90] """9-value landmark points model.""" - def __init__(self, cutoffp = (1, 99), landmarkp = L4, stdrange = 'auto'): + def __init__(self, cutoffp=(1, 99), landmarkp=L4, stdrange="auto"): # check parameters if not IntensityRangeStandardization.is_sequence(cutoffp): - raise ValueError('cutoffp must be a sequence') + raise ValueError("cutoffp must be a sequence") if not 2 == len(cutoffp): - raise ValueError('cutoffp must be of length 2, not {}'.format(len(cutoffp))) + raise ValueError("cutoffp must be of length 2, not {}".format(len(cutoffp))) if not IntensityRangeStandardization.are_numbers(cutoffp): - raise ValueError('cutoffp elements must be numbers') - if not IntensityRangeStandardization.are_in_interval(cutoffp, 0, 100, 'included'): - raise ValueError('cutoffp elements must be in [0, 100]') + raise ValueError("cutoffp elements must be numbers") + if not IntensityRangeStandardization.are_in_interval( + cutoffp, 0, 100, "included" + ): + raise ValueError("cutoffp elements must be in [0, 100]") if not cutoffp[1] > cutoffp[0]: - raise ValueError('the second element of cutoffp must be larger than the first') + raise ValueError( + "the second element of cutoffp must be larger than the first" + ) if not IntensityRangeStandardization.is_sequence(landmarkp): - raise ValueError('landmarkp must be a sequence') + raise ValueError("landmarkp must be a sequence") if not 1 <= len(landmarkp): - raise ValueError('landmarkp must be of length >= 1, not {}'.format(len(landmarkp))) + raise ValueError( + "landmarkp must be of length >= 1, not {}".format(len(landmarkp)) + ) if not IntensityRangeStandardization.are_numbers(landmarkp): - raise ValueError('landmarkp elements must be numbers') - if not IntensityRangeStandardization.are_in_interval(landmarkp, 0, 100, 'included'): - raise ValueError('landmarkp elements must be in [0, 100]') - if not IntensityRangeStandardization.are_in_interval(landmarkp, cutoffp[0], cutoffp[1], 'excluded'): - raise ValueError('landmarkp elements must be in between the elements of cutoffp') + raise ValueError("landmarkp elements must be numbers") + if not IntensityRangeStandardization.are_in_interval( + landmarkp, 0, 100, "included" + ): + raise ValueError("landmarkp elements must be in [0, 100]") + if not IntensityRangeStandardization.are_in_interval( + landmarkp, cutoffp[0], cutoffp[1], "excluded" + ): + raise ValueError( + "landmarkp elements must be in between the elements of cutoffp" + ) if not len(landmarkp) == len(numpy.unique(landmarkp)): - raise ValueError('landmarkp elements must be unique') + raise ValueError("landmarkp elements must be unique") - if 'auto' == stdrange: - stdrange = ('auto', 'auto') + if "auto" == stdrange: + stdrange = ("auto", "auto") else: if not IntensityRangeStandardization.is_sequence(stdrange): - raise ValueError('stdrange must be a sequence or \'auto\'') + raise ValueError("stdrange must be a sequence or 'auto'") if not 2 == len(stdrange): - raise ValueError('stdrange must be of length 2, not {}'.format(len(stdrange))) - if not 'auto' in stdrange: + raise ValueError( + "stdrange must be of length 2, not {}".format(len(stdrange)) + ) + if not "auto" in stdrange: if not IntensityRangeStandardization.are_numbers(stdrange): - raise ValueError('stdrange elements must be numbers or \'auto\'') + raise ValueError("stdrange elements must be numbers or 'auto'") if not stdrange[1] > stdrange[0]: - raise ValueError('the second element of stdrange must be larger than the first') - elif 'auto' == stdrange[0] and not IntensityRangeStandardization.is_number(stdrange[1]): - raise ValueError('stdrange elements must be numbers or \'auto\'') - elif 'auto' == stdrange[1] and not IntensityRangeStandardization.is_number(stdrange[0]): - raise ValueError('stdrange elements must be numbers or \'auto\'') - + raise ValueError( + "the second element of stdrange must be larger than the first" + ) + elif "auto" == stdrange[0] and not IntensityRangeStandardization.is_number( + stdrange[1] + ): + raise ValueError("stdrange elements must be numbers or 'auto'") + elif "auto" == stdrange[1] and not IntensityRangeStandardization.is_number( + stdrange[0] + ): + raise ValueError("stdrange elements must be numbers or 'auto'") # process parameters self.__cutoffp = IntensityRangeStandardization.to_float(cutoffp) self.__landmarkp = IntensityRangeStandardization.to_float(sorted(landmarkp)) - self.__stdrange = ['auto' if 'auto' == x else float(x) for x in stdrange] + self.__stdrange = ["auto" if "auto" == x else float(x) for x in stdrange] # initialize remaining instance parameters self.__model = None @@ -268,15 +288,25 @@ def train(self, images): # treat single intensity accumulation error if not len(numpy.unique(numpy.concatenate((ci, li)))) == len(ci) + len(li): - raise SingleIntensityAccumulationError('Image no.{} shows an unusual single-intensity accumulation that leads to a situation where two percentile values are equal. This situation is usually caused, when the background has not been removed from the image. Another possibility would be to reduce the number of landmark percentiles landmarkp or to change their distribution.'.format(idx)) - - self.__model = [self.__stdrange[0]] + list(numpy.mean(lim, 0)) + [self.__stdrange[1]] - self.__sc_umins = [self.__stdrange[0]] + list(numpy.min(lim, 0)) + [self.__stdrange[1]] - self.__sc_umaxs = [self.__stdrange[0]] + list(numpy.max(lim, 0)) + [self.__stdrange[1]] + raise SingleIntensityAccumulationError( + "Image no.{} shows an unusual single-intensity accumulation that leads to a situation where two percentile values are equal. This situation is usually caused, when the background has not been removed from the image. Another possibility would be to reduce the number of landmark percentiles landmarkp or to change their distribution.".format( + idx + ) + ) + + self.__model = ( + [self.__stdrange[0]] + list(numpy.mean(lim, 0)) + [self.__stdrange[1]] + ) + self.__sc_umins = ( + [self.__stdrange[0]] + list(numpy.min(lim, 0)) + [self.__stdrange[1]] + ) + self.__sc_umaxs = ( + [self.__stdrange[0]] + list(numpy.max(lim, 0)) + [self.__stdrange[1]] + ) return self - def transform(self, image, surpress_mapping_check = False): + def transform(self, image, surpress_mapping_check=False): r""" Transform an images intensity values to the learned standard intensity space. @@ -307,19 +337,23 @@ def transform(self, image, surpress_mapping_check = False): If no model has been trained before """ if None == self.__model: - raise UntrainedException('Model not trained. Call train() first.') + raise UntrainedException("Model not trained. Call train() first.") image = numpy.asarray(image) # determine image intensity values at cut-off percentiles & landmark percentiles - li = numpy.percentile(image, [self.__cutoffp[0]] + self.__landmarkp + [self.__cutoffp[1]]) + li = numpy.percentile( + image, [self.__cutoffp[0]] + self.__landmarkp + [self.__cutoffp[1]] + ) # treat single intensity accumulation error if not len(numpy.unique(li)) == len(li): - raise SingleIntensityAccumulationError('The image shows an unusual single-intensity accumulation that leads to a situation where two percentile values are equal. This situation is usually caused, when the background has not been removed from the image. The only other possibility would be to re-train the model with a reduced number of landmark percentiles landmarkp or a changed distribution.') + raise SingleIntensityAccumulationError( + "The image shows an unusual single-intensity accumulation that leads to a situation where two percentile values are equal. This situation is usually caused, when the background has not been removed from the image. The only other possibility would be to re-train the model with a reduced number of landmark percentiles landmarkp or a changed distribution." + ) # create linear mapping models for the percentile segments to the learned standard intensity space - ipf = interp1d(li, self.__model, bounds_error = False) + ipf = interp1d(li, self.__model, bounds_error=False) # transform the input image intensity values output = ipf(image) @@ -332,11 +366,13 @@ def transform(self, image, surpress_mapping_check = False): output[image > li[-1]] = rlm(image[image > li[-1]]) if not surpress_mapping_check and not self.__check_mapping(li): - raise InformationLossException('Image can not be transformed to the learned standard intensity space without loss of information. Please re-train.') + raise InformationLossException( + "Image can not be transformed to the learned standard intensity space without loss of information. Please re-train." + ) return output - def train_transform(self, images, surpress_mapping_check = False): + def train_transform(self, images, surpress_mapping_check=False): r""" See also -------- @@ -417,7 +453,7 @@ def __compute_stdrange(self, images): stdrange : (float, float) The borders of the computed standard intensity range. """ - if not 'auto' in self.__stdrange: + if not "auto" in self.__stdrange: return self.__stdrange copl, copu = self.__cutoffp @@ -433,7 +469,11 @@ def __compute_stdrange(self, images): # treat single intensity accumulation error if 0 in s[-1]: - raise SingleIntensityAccumulationError('Image no.{} shows an unusual single-intensity accumulation that leads to a situation where two percentile values are equal. This situation is usually caused, when the background has not been removed from the image. Another possibility would be to reduce the number of landmark percentiles landmarkp or to change their distribution.'.format(idx)) + raise SingleIntensityAccumulationError( + "Image no.{} shows an unusual single-intensity accumulation that leads to a situation where two percentile values are equal. This situation is usually caused, when the background has not been removed from the image. Another possibility would be to reduce the number of landmark percentiles landmarkp or to change their distribution.".format( + idx + ) + ) # select the maximum and minimum of each percentile segment over all images maxs = numpy.max(s, 0) @@ -449,9 +489,9 @@ def __compute_stdrange(self, images): im = numpy.mean(m) # return interval with borders according to settings - if 'auto' == self.__stdrange[0] and 'auto' == self.__stdrange[1]: + if "auto" == self.__stdrange[0] and "auto" == self.__stdrange[1]: return im - intv / 2, im + intv / 2 - elif 'auto' == self.__stdrange[0]: + elif "auto" == self.__stdrange[0]: return self.__stdrange[1] - intv, self.__stdrange[1] else: return self.__stdrange[0], self.__stdrange[0] + intv @@ -462,7 +502,9 @@ def __check_mapping(self, landmarks): be transformed to the learned standard intensity space without loss of information. """ - sc_udiff = numpy.asarray(self.__sc_umaxs)[1:] - numpy.asarray(self.__sc_umins)[:-1] + sc_udiff = ( + numpy.asarray(self.__sc_umaxs)[1:] - numpy.asarray(self.__sc_umins)[:-1] + ) l_diff = numpy.asarray(landmarks)[1:] - numpy.asarray(landmarks)[:-1] return numpy.all(sc_udiff > numpy.asarray(l_diff)) @@ -474,9 +516,11 @@ def is_sequence(arg): Credits to Steve R. Hastings a.k.a steveha @ http://stackoverflow.com """ - return (not hasattr(arg, "strip") and - hasattr(arg, "__getitem__") or - hasattr(arg, "__iter__")) + return ( + not hasattr(arg, "strip") + and hasattr(arg, "__getitem__") + or hasattr(arg, "__iter__") + ) @staticmethod def is_number(arg): @@ -484,6 +528,7 @@ def is_number(arg): Checks whether the passed argument is a valid number or not. """ import numbers + return isinstance(arg, numbers.Number) @staticmethod @@ -494,24 +539,26 @@ def are_numbers(arg): return numpy.all([IntensityRangeStandardization.is_number(x) for x in arg]) @staticmethod - def is_in_interval(n, l, r, border = 'included'): + def is_in_interval(n, l, r, border="included"): """ Checks whether a number is inside the interval l, r. """ - if 'included' == border: + if "included" == border: return (n >= l) and (n <= r) - elif 'excluded' == border: + elif "excluded" == border: return (n > l) and (n < r) else: - raise ValueError('borders must be either \'included\' or \'excluded\'') + raise ValueError("borders must be either 'included' or 'excluded'") @staticmethod - def are_in_interval(s, l, r, border = 'included'): + def are_in_interval(s, l, r, border="included"): """ Checks whether all number in the sequence s lie inside the interval formed by l and r. """ - return numpy.all([IntensityRangeStandardization.is_in_interval(x, l, r, border) for x in s]) + return numpy.all( + [IntensityRangeStandardization.is_in_interval(x, l, r, border) for x in s] + ) @staticmethod def to_float(s): @@ -533,20 +580,25 @@ def linear_model(x, y): b = y1 - (m * x1) return lambda x: m * x + b + class SingleIntensityAccumulationError(Exception): """ Thrown when an image shows an unusual single-intensity peaks which would obstruct both, training and transformation. """ + class InformationLossException(Exception): """ Thrown when a transformation can not be guaranteed to be lossless. """ + pass + class UntrainedException(Exception): """ Thrown when a transformation is attempted before training. """ + pass diff --git a/medpy/filter/__init__.py b/medpy/filter/__init__.py index 2df24f62..b6af2461 100644 --- a/medpy/filter/__init__.py +++ b/medpy/filter/__init__.py @@ -6,7 +6,7 @@ This package contains various image filters and image manipulation functions. - + Smoothing :mod:`medpy.filter.smoothing` ======================================= Image smoothing / noise reduction in grayscale images. @@ -14,10 +14,10 @@ .. module:: medpy.filter.smoothing .. autosummary:: :toctree: generated/ - + anisotropic_diffusion gauss_xminus1d - + Binary :mod:`medpy.filter.binary` ================================= Binary image manipulation. @@ -25,7 +25,7 @@ .. module:: medpy.filter.binary .. autosummary:: :toctree: generated/ - + size_threshold largest_connected_component bounding_box @@ -37,7 +37,7 @@ .. module:: medpy.filter.image .. autosummary:: :toctree: generated/ - + sls ssd average_filter @@ -45,7 +45,7 @@ local_minima otsu resample - + Label :mod:`medpy.filter.label` ================================= Label map manipulation. @@ -53,12 +53,12 @@ .. module:: medpy.filter.label .. autosummary:: :toctree: generated/ - + relabel_map relabel relabel_non_zero fit_labels_to_mask - + Noise :mod:`medpy.filter.noise` =============================== Global and local noise estimation in grayscale images. @@ -66,12 +66,12 @@ .. module:: medpy.filter.noise .. autosummary:: :toctree: generated/ - + immerkaer immerkaer_local separable_convolution - - + + Utilities :mod:`medpy.filter.utilities` ======================================= Utilities to apply filters selectively and create your own ones. @@ -79,11 +79,11 @@ .. module:: medpy.filter.utilities .. autosummary:: :toctree: generated/ - + xminus1d intersection pad - + Hough transform :mod:`medpy.filter.houghtransform` ================================================== The hough transform shape detection algorithm. @@ -91,12 +91,12 @@ .. module:: medpy.filter.houghtransform .. autosummary:: :toctree: generated/ - + ght ght_alternative template_ellipsoid template_sphere - + Intensity range standardization :mod:`medpy.filter.IntensityRangeStandardization` ================================================================================= A learning method to align the intensity ranges of images. @@ -104,34 +104,27 @@ .. module:: medpy.filter.IntensityRangeStandardization .. autosummary:: :toctree: generated/ - + IntensityRangeStandardization """ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # if __all__ is not set, only the following, explicit import statements are executed -from .binary import largest_connected_component, size_threshold, bounding_box -from .image import sls, ssd, average_filter, sum_filter, otsu, local_minima, resample -from .smoothing import anisotropic_diffusion, gauss_xminus1d -from .label import fit_labels_to_mask, relabel, relabel_map, relabel_non_zero -from .houghtransform import ght, ght_alternative, template_ellipsoid, template_sphere -from .utilities import pad, intersection, xminus1d -from .IntensityRangeStandardization import IntensityRangeStandardization, UntrainedException, InformationLossException, SingleIntensityAccumulationError # import all sub-modules in the __all__ variable -__all__ = [s for s in dir() if not s.startswith('_')] +__all__ = [s for s in dir() if not s.startswith("_")] diff --git a/medpy/filter/binary.py b/medpy/filter/binary.py index b4f86649..b68fb565 100644 --- a/medpy/filter/binary.py +++ b/medpy/filter/binary.py @@ -19,7 +19,7 @@ # status Release # build-in modules -from operator import lt, le, gt, ge, ne, eq +from operator import eq, ge, gt, le, lt, ne # third-party modules import numpy @@ -27,8 +27,9 @@ # own modules + # code -def size_threshold(img, thr, comp='lt', structure = None): +def size_threshold(img, thr, comp="lt", structure=None): r""" Removes binary objects from an image identified by a size threshold. @@ -70,7 +71,7 @@ def size_threshold(img, thr, comp='lt', structure = None): divide the supplied threshold through the real voxel size. """ - operators = {'lt': lt, 'le': le, 'gt': gt, 'ge': ge, 'eq': eq, 'ne': ne} + operators = {"lt": lt, "le": le, "gt": gt, "ge": ge, "eq": eq, "ne": ne} img = numpy.asarray(img).astype(numpy.bool_) if comp not in operators: @@ -85,7 +86,8 @@ def size_threshold(img, thr, comp='lt', structure = None): return img -def largest_connected_component(img, structure = None): + +def largest_connected_component(img, structure=None): r""" Select the largest connected binary component in an image. @@ -108,13 +110,17 @@ def largest_connected_component(img, structure = None): The supplied binary image with only the largest connected component remaining. """ labeled_array, num_features = label(img, structure) - component_sizes = [numpy.count_nonzero(labeled_array == label_idx) for label_idx in range(1, num_features + 1)] + component_sizes = [ + numpy.count_nonzero(labeled_array == label_idx) + for label_idx in range(1, num_features + 1) + ] largest_component_idx = numpy.argmax(component_sizes) + 1 out = numpy.zeros(img.shape, numpy.bool_) out[labeled_array == largest_component_idx] = True return out + def bounding_box(img): r""" Return the bounding box incorporating all non-zero values in the image. diff --git a/medpy/filter/houghtransform.py b/medpy/filter/houghtransform.py index 380b4d5f..7e94910b 100644 --- a/medpy/filter/houghtransform.py +++ b/medpy/filter/houghtransform.py @@ -27,8 +27,9 @@ # own modules from .utilities import pad + # public methods -def ght_alternative (img, template, indices): +def ght_alternative(img, template, indices): """ Alternative implementation of the general hough transform, which uses iteration over indices rather than broadcasting rules like `ght`. @@ -58,12 +59,16 @@ def ght_alternative (img, template, indices): # check supplied parameters if img.ndim != template.ndim: - raise AttributeError('The supplied image and template must be of the same dimensionality.') + raise AttributeError( + "The supplied image and template must be of the same dimensionality." + ) if not numpy.all(numpy.greater_equal(img.shape, template.shape)): - raise AttributeError('The supplied template is bigger than the image. This setting makes no sense for a hough transform.') + raise AttributeError( + "The supplied template is bigger than the image. This setting makes no sense for a hough transform." + ) # pad the original image - img_padded = pad(img, footprint=template, mode='constant') + img_padded = pad(img, footprint=template, mode="constant") # prepare the hough image if numpy.bool_ == img.dtype: @@ -79,6 +84,7 @@ def ght_alternative (img, template, indices): return img_hough + def ght(img, template): r""" Implementation of the general hough transform for all dimensions. @@ -121,9 +127,13 @@ def ght(img, template): # check supplied parameters if img.ndim != template.ndim: - raise AttributeError('The supplied image and template must be of the same dimensionality.') + raise AttributeError( + "The supplied image and template must be of the same dimensionality." + ) if not numpy.all(numpy.greater_equal(img.shape, template.shape)): - raise AttributeError('The supplied template is bigger than the image. This setting makes no sense for a hough transform.') + raise AttributeError( + "The supplied template is bigger than the image. This setting makes no sense for a hough transform." + ) # compute center of template array center = (numpy.asarray(template.shape) - 1) // 2 @@ -140,20 +150,21 @@ def ght(img, template): slicers_orig = [] for i in range(img.ndim): pos = -1 * (idx[i] - center[i]) - if 0 == pos: # no shift + if 0 == pos: # no shift slicers_hough.append(slice(None, None)) slicers_orig.append(slice(None, None)) - elif pos > 0: # right shifted hough + elif pos > 0: # right shifted hough slicers_hough.append(slice(pos, None)) slicers_orig.append(slice(None, -1 * pos)) - else: # left shifted hough + else: # left shifted hough slicers_hough.append(slice(None, pos)) slicers_orig.append(slice(-1 * pos, None)) img_hough[slicers_hough] += img[slicers_orig] return img_hough -def template_sphere (radius, dimensions): + +def template_sphere(radius, dimensions): r""" Returns a spherical binary structure of a of the supplied radius that can be used as template input to the generalized hough transform. @@ -171,7 +182,7 @@ def template_sphere (radius, dimensions): A boolean array containing a sphere. """ if int(dimensions) != dimensions: - raise TypeError('The supplied dimension parameter must be of type integer.') + raise TypeError("The supplied dimension parameter must be of type integer.") dimensions = int(dimensions) return template_ellipsoid(dimensions * [radius * 2]) @@ -193,16 +204,20 @@ def template_ellipsoid(shape): A boolean array containing an ellipsoid. """ # prepare template array - template = numpy.zeros([int(x // 2 + (x % 2)) for x in shape], dtype=numpy.bool_) # in odd shape cases, this will include the ellipses middle line, otherwise not + template = numpy.zeros( + [int(x // 2 + (x % 2)) for x in shape], dtype=numpy.bool_ + ) # in odd shape cases, this will include the ellipses middle line, otherwise not # get real world offset to compute the ellipsoid membership rw_offset = [] for s in shape: - if int(s) % 2 == 0: rw_offset.append(0.5 - (s % 2) / 2.) # number before point is even - else: rw_offset.append(-1 * (s % int(s)) / 2.) # number before point is odd + if int(s) % 2 == 0: + rw_offset.append(0.5 - (s % 2) / 2.0) # number before point is even + else: + rw_offset.append(-1 * (s % int(s)) / 2.0) # number before point is odd # prepare an array containing the squares of the half axes to avoid computing inside the loop - shape_pow = numpy.power(numpy.asarray(shape) / 2., 2) + shape_pow = numpy.power(numpy.asarray(shape) / 2.0, 2) # we use the ellipse normal form to find all point in its surface as well as volume # e.g. for 2D, all voxels inside the ellipse (or on its surface) with half-axes a and b @@ -210,17 +225,31 @@ def template_ellipsoid(shape): # to not have to iterate over each voxel, we make use of the ellipsoids symmetry # and construct just a part of the whole ellipse here for idx in numpy.ndindex(template.shape): - distance = sum((math.pow(coordinate + rwo, 2) / axes_pow for axes_pow, coordinate, rwo in zip(shape_pow, idx, rw_offset))) # plus once since ndarray is zero based, but real-world coordinates not - if distance <= 1: template[idx] = True + distance = sum( + ( + math.pow(coordinate + rwo, 2) / axes_pow + for axes_pow, coordinate, rwo in zip(shape_pow, idx, rw_offset) + ) + ) # plus once since ndarray is zero based, but real-world coordinates not + if distance <= 1: + template[idx] = True # we take now our ellipse part and flip it once along each dimension, concatenating it in each step # the slicers are constructed to flip in each step the current dimension i.e. to behave like arr[...,::-1,...] for i in range(template.ndim): - slicers = [(slice(None, None, -1) if i == j else slice(None)) for j in range(template.ndim)] - if 0 == int(shape[i]) % 2: # even case + slicers = [ + (slice(None, None, -1) if i == j else slice(None)) + for j in range(template.ndim) + ] + if 0 == int(shape[i]) % 2: # even case template = numpy.concatenate((template[slicers], template), i) - else: # odd case, in which an overlap has to be created - slicers_truncate = [(slice(None, -1) if i == j else slice(None)) for j in range(template.ndim)] - template = numpy.concatenate((template[slicers][slicers_truncate], template), i) + else: # odd case, in which an overlap has to be created + slicers_truncate = [ + (slice(None, -1) if i == j else slice(None)) + for j in range(template.ndim) + ] + template = numpy.concatenate( + (template[slicers][slicers_truncate], template), i + ) return template diff --git a/medpy/filter/image.py b/medpy/filter/image.py index fea445f5..9cfca39a 100644 --- a/medpy/filter/image.py +++ b/medpy/filter/image.py @@ -20,23 +20,36 @@ # build-in modules import itertools -import numbers import math +import numbers # third-party modules import numpy -from scipy.ndimage import convolve, gaussian_filter, minimum_filter +from scipy.ndimage import convolve, gaussian_filter, minimum_filter, zoom from scipy.ndimage._ni_support import _get_output -from scipy.ndimage import zoom -# own modules -from .utilities import pad, __make_footprint from ..io import header +# own modules +from .utilities import __make_footprint, pad + + # code -def sls(minuend, subtrahend, metric = "ssd", noise = "global", signed = True, - sn_size = None, sn_footprint = None, sn_mode = "reflect", sn_cval = 0.0, - pn_size = None, pn_footprint = None, pn_mode = "reflect", pn_cval = 0.0): +def sls( + minuend, + subtrahend, + metric="ssd", + noise="global", + signed=True, + sn_size=None, + sn_footprint=None, + sn_mode="reflect", + sn_cval=0.0, + pn_size=None, + pn_footprint=None, + pn_mode="reflect", + pn_cval=0.0, +): r""" Computes the signed local similarity between two images. @@ -124,9 +137,9 @@ def sls(minuend, subtrahend, metric = "ssd", noise = "global", signed = True, subtrahend = numpy.asarray(subtrahend) if numpy.iscomplexobj(minuend): - raise TypeError('complex type not supported') + raise TypeError("complex type not supported") if numpy.iscomplexobj(subtrahend): - raise TypeError('complex type not supported') + raise TypeError("complex type not supported") mshape = [ii for ii in minuend.shape if ii > 0] sshape = [ii for ii in subtrahend.shape if ii > 0] @@ -138,7 +151,7 @@ def sls(minuend, subtrahend, metric = "ssd", noise = "global", signed = True, sn_footprint = __make_footprint(minuend, sn_size, sn_footprint) sn_fshape = [ii for ii in sn_footprint.shape if ii > 0] if len(sn_fshape) != minuend.ndim: - raise RuntimeError('search neighbourhood footprint array has incorrect shape.') + raise RuntimeError("search neighbourhood footprint array has incorrect shape.") #!TODO: Is this required? if not sn_footprint.flags.contiguous: @@ -148,28 +161,61 @@ def sls(minuend, subtrahend, metric = "ssd", noise = "global", signed = True, subtrahend = pad(subtrahend, footprint=sn_footprint, mode=sn_mode, cval=sn_cval) # compute slicers for position where the search neighbourhood sn_footprint is TRUE - slicers = [[slice(x, (x + 1) - d if 0 != (x + 1) - d else None) for x in range(d)] for d in sn_fshape] - slicers = [sl for sl, tv in zip(itertools.product(*slicers), sn_footprint.flat) if tv] + slicers = [ + [slice(x, (x + 1) - d if 0 != (x + 1) - d else None) for x in range(d)] + for d in sn_fshape + ] + slicers = [ + sl for sl, tv in zip(itertools.product(*slicers), sn_footprint.flat) if tv + ] # compute difference images and sign images for search neighbourhood elements - ssds = [ssd(minuend, subtrahend[slicer], normalized=True, signed=signed, size=pn_size, footprint=pn_footprint, mode=pn_mode, cval=pn_cval) for slicer in slicers] + ssds = [ + ssd( + minuend, + subtrahend[slicer], + normalized=True, + signed=signed, + size=pn_size, + footprint=pn_footprint, + mode=pn_mode, + cval=pn_cval, + ) + for slicer in slicers + ] distance = [x[0] for x in ssds] distance_sign = [x[1] for x in ssds] # compute local variance, which constitutes an approximation of local noise, out of patch-distances over the neighbourhood structure variance = numpy.average(distance, 0) - variance = gaussian_filter(variance, sigma=3) #!TODO: Figure out if a fixed sigma is desirable here... I think that yes - if 'global' == noise: + variance = gaussian_filter( + variance, sigma=3 + ) #!TODO: Figure out if a fixed sigma is desirable here... I think that yes + if "global" == noise: variance = variance.sum() / float(numpy.product(variance.shape)) # variance[variance < variance_global / 10.] = variance_global / 10. #!TODO: Should I keep this i.e. regularizing the variance to be at least 10% of the global one? # compute sls - sls = [dist_sign * numpy.exp(-1 * (dist / variance)) for dist_sign, dist in zip(distance_sign, distance)] + sls = [ + dist_sign * numpy.exp(-1 * (dist / variance)) + for dist_sign, dist in zip(distance_sign, distance) + ] # convert into sls image, swapping dimensions to have varying patches in the last dimension return numpy.rollaxis(numpy.asarray(sls), 0, minuend.ndim + 1) -def ssd(minuend, subtrahend, normalized=True, signed=False, size=None, footprint=None, mode="reflect", cval=0.0, origin=0): + +def ssd( + minuend, + subtrahend, + normalized=True, + signed=False, + size=None, + footprint=None, + mode="reflect", + cval=0.0, + origin=0, +): r""" Computes the sum of squared difference (SSD) between patches of minuend and subtrahend. @@ -219,15 +265,43 @@ def ssd(minuend, subtrahend, normalized=True, signed=False, size=None, footprint if signed: difference = minuend - subtrahend difference_squared = numpy.square(difference) - distance_sign = numpy.sign(convolution_filter(numpy.sign(difference) * difference_squared, size=size, footprint=footprint, mode=mode, cval=cval, origin=origin, output=output)) - distance = convolution_filter(difference_squared, size=size, footprint=footprint, mode=mode, cval=cval, output=output) + distance_sign = numpy.sign( + convolution_filter( + numpy.sign(difference) * difference_squared, + size=size, + footprint=footprint, + mode=mode, + cval=cval, + origin=origin, + output=output, + ) + ) + distance = convolution_filter( + difference_squared, + size=size, + footprint=footprint, + mode=mode, + cval=cval, + output=output, + ) else: - distance = convolution_filter(numpy.square(minuend - subtrahend), size=size, footprint=footprint, mode=mode, cval=cval, origin=origin, output=output) + distance = convolution_filter( + numpy.square(minuend - subtrahend), + size=size, + footprint=footprint, + mode=mode, + cval=cval, + origin=origin, + output=output, + ) distance_sign = 1 return distance, distance_sign -def average_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): + +def average_filter( + input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0 +): r""" Calculates a multi-dimensional average filter. @@ -279,12 +353,16 @@ def average_filter(input, size=None, footprint=None, output=None, mode="reflect" filter_size = footprint.sum() output = _get_output(output, input) - sum_filter(input, footprint=footprint, output=output, mode=mode, cval=cval, origin=origin) + sum_filter( + input, footprint=footprint, output=output, mode=mode, cval=cval, origin=origin + ) output /= filter_size return output -def sum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): +def sum_filter( + input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0 +): r""" Calculates a multi-dimensional sum filter. @@ -336,7 +414,8 @@ def sum_filter(input, size=None, footprint=None, output=None, mode="reflect", cv slicer = [slice(None, None, -1)] * footprint.ndim return convolve(input, footprint[slicer], output, mode, cval, origin) -def otsu (img, bins=64): + +def otsu(img, bins=64): r""" Otsu's method to find the optimal threshold separating an image into fore- and background. @@ -366,7 +445,7 @@ def otsu (img, bins=64): # check supplied parameters if bins <= 1: - raise AttributeError('At least a number two bins have to be provided.') + raise AttributeError("At least a number two bins have to be provided.") # determine initial threshold and threshold step-length steplength = (img.max() - img.min()) / float(bins) @@ -378,13 +457,14 @@ def otsu (img, bins=64): # iterate over the thresholds and find highest between class variance for threshold in numpy.arange(initial_threshold, img.max(), steplength): - mask_fg = (img >= threshold) - mask_bg = (img < threshold) + mask_fg = img >= threshold + mask_bg = img < threshold wfg = numpy.count_nonzero(mask_fg) wbg = numpy.count_nonzero(mask_bg) - if 0 == wfg or 0 == wbg: continue + if 0 == wfg or 0 == wbg: + continue mfg = img[mask_fg].mean() mbg = img[mask_bg].mean() @@ -397,7 +477,8 @@ def otsu (img, bins=64): return best_threshold -def local_minima(img, min_distance = 4): + +def local_minima(img, min_distance=4): r""" Returns all local minima from an image. @@ -417,51 +498,55 @@ def local_minima(img, min_distance = 4): """ # @TODO: Write a unittest for this. fits = numpy.asarray(img) - minfits = minimum_filter(fits, size=min_distance) # default mode is reflect + minfits = minimum_filter(fits, size=min_distance) # default mode is reflect minima_mask = fits == minfits good_indices = numpy.transpose(minima_mask.nonzero()) good_fits = fits[minima_mask] order = good_fits.argsort() return good_indices[order], good_fits[order] -def resample(img, hdr, target_spacing, bspline_order=3, mode='constant'): - """ - Re-sample an image to a new voxel-spacing. - - Parameters - ---------- - img : array_like - The image. - hdr : object - The image header. - target_spacing : number or sequence of numbers - The target voxel spacing to achieve. If a single number, isotropic spacing is assumed. - bspline_order : int - The bspline order used for interpolation. - mode : str - Points outside the boundaries of the input are filled according to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default is 'constant'. - - Warning - ------- - Voxel-spacing of input header will be modified in-place! - - Returns - ------- - img : ndarray - The re-sampled image. - hdr : object - The image header with the new voxel spacing. - """ - if isinstance(target_spacing, numbers.Number): - target_spacing = [target_spacing] * img.ndim - - # compute zoom values - zoom_factors = [old / float(new) for new, old in zip(target_spacing, header.get_pixel_spacing(hdr))] - - # zoom image - img = zoom(img, zoom_factors, order=bspline_order, mode=mode) - - # set new voxel spacing - header.set_pixel_spacing(hdr, target_spacing) - - return img, hdr + +def resample(img, hdr, target_spacing, bspline_order=3, mode="constant"): + """ + Re-sample an image to a new voxel-spacing. + + Parameters + ---------- + img : array_like + The image. + hdr : object + The image header. + target_spacing : number or sequence of numbers + The target voxel spacing to achieve. If a single number, isotropic spacing is assumed. + bspline_order : int + The bspline order used for interpolation. + mode : str + Points outside the boundaries of the input are filled according to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default is 'constant'. + + Warning + ------- + Voxel-spacing of input header will be modified in-place! + + Returns + ------- + img : ndarray + The re-sampled image. + hdr : object + The image header with the new voxel spacing. + """ + if isinstance(target_spacing, numbers.Number): + target_spacing = [target_spacing] * img.ndim + + # compute zoom values + zoom_factors = [ + old / float(new) + for new, old in zip(target_spacing, header.get_pixel_spacing(hdr)) + ] + + # zoom image + img = zoom(img, zoom_factors, order=bspline_order, mode=mode) + + # set new voxel spacing + header.set_pixel_spacing(hdr, target_spacing) + + return img, hdr diff --git a/medpy/filter/label.py b/medpy/filter/label.py index ee5c59bb..a6feb297 100644 --- a/medpy/filter/label.py +++ b/medpy/filter/label.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -26,16 +26,17 @@ # own modules from ..core.exceptions import ArgumentError + # code def relabel_map(label_image, mapping, key=lambda x, y: x[y]): r""" Relabel an image using the supplied mapping. - + The ``mapping`` can be any kind of subscriptable object. The respective region id is used to access the new value from the ``mapping``. The ``key`` keyword parameter can be used to supply another access function. The ``key`` function must have the signature key(mapping, region-id) and return the new region-id to assign. - + Parameters ---------- label_image : array_like @@ -44,46 +45,51 @@ def relabel_map(label_image, mapping, key=lambda x, y: x[y]): A mapping object. key : function Can be used to defined the key-access to the ``mapping`` object. - + Returns ------- relabel_map : ndarray A label map with new region ids. - + Raises ------ ArgumentError If a region id is missing in the supplied mapping - """ + """ label_image = scipy.array(label_image) - + def _map(x): try: return key(mapping, x) except Exception as e: - raise ArgumentError('No conversion for region id {} found in the supplied mapping. Error: {}'.format(x, e)) - + raise ArgumentError( + "No conversion for region id {} found in the supplied mapping. Error: {}".format( + x, e + ) + ) + vmap = scipy.vectorize(_map, otypes=[label_image.dtype]) - + return vmap(label_image) -def relabel(label_image, start = 1): + +def relabel(label_image, start=1): r""" Relabel the regions of a label image. Re-processes the labels to make them consecutively and starting from start. - + Parameters ---------- label_image : array_like A nD label map. start : integer The id of the first label to assign - + Returns ------- relabel_map : ndarray The relabelled label map. - + See also -------- relabel_non_zero @@ -98,37 +104,40 @@ def relabel(label_image, start = 1): rav[i] = mapping[rav[i]] return rav.reshape(label_image.shape) -def relabel_non_zero(label_image, start = 1): - r""" + +def relabel_non_zero(label_image, start=1): + r""" Relabel the regions of a label image. Re-processes the labels to make them consecutively and starting from start. Keeps all zero (0) labels, as they are considered background. - + Parameters ---------- label_image : array_like A nD label map. start : integer The id of the first label to assign - + Returns ------- relabel_map : ndarray The relabelled label map. - + See also -------- - relabel + relabel """ - if start <= 0: raise ArgumentError('The starting value can not be 0 or lower.') - + if start <= 0: + raise ArgumentError("The starting value can not be 0 or lower.") + l = list(scipy.unique(label_image)) - if 0 in l: l.remove(0) + if 0 in l: + l.remove(0) mapping = dict() mapping[0] = 0 for key, item in zip(l, list(range(start, len(l) + start))): mapping[key] = item - + return relabel_map(label_image, mapping) @@ -137,21 +146,21 @@ def fit_labels_to_mask(label_image, mask): Reduces a label images by overlaying it with a binary mask and assign the labels either to the mask or to the background. The resulting binary mask is the nearest expression the label image can form of the supplied binary mask. - + Parameters ---------- label_image : array_like A nD label map. mask : array_like A mask image, i.e., a binary image with False for background and True for foreground. - + Returns ------- best_fit : ndarray The best fit of the labels to the mask. - + Raises - ------ + ------ ValueError If ``label_image`` and ``mask`` are not of the same shape. """ @@ -159,35 +168,36 @@ def fit_labels_to_mask(label_image, mask): mask = scipy.asarray(mask, dtype=scipy.bool_) if label_image.shape != mask.shape: - raise ValueError('The input images must be of the same shape.') - + raise ValueError("The input images must be of the same shape.") + # prepare collection dictionaries labels = scipy.unique(label_image) collection = {} for label in labels: collection[label] = [0, 0, []] # size, union, points - + # iterate over the label images pixels and collect position, size and union for x in range(label_image.shape[0]): for y in range(label_image.shape[1]): for z in range(label_image.shape[2]): - entry = collection[label_image[x,y,z]] + entry = collection[label_image[x, y, z]] entry[0] += 1 - if mask[x,y,z]: entry[1] += 1 - entry[2].append((x,y,z)) - + if mask[x, y, z]: + entry[1] += 1 + entry[2].append((x, y, z)) + # select labels that are more than half in the mask for label in labels: - if collection[label][0] / 2. >= collection[label][1]: + if collection[label][0] / 2.0 >= collection[label][1]: del collection[label] - + # image_result = numpy.zeros_like(mask) this is eq. to mask.copy().fill(0), which directly applied does not allow access to the rows and colums: Why? image_result = mask.copy() - image_result.fill(False) + image_result.fill(False) # add labels to result mask for label, data in list(collection.items()): for point in data[2]: image_result[point] = True - + return image_result diff --git a/medpy/filter/noise.py b/medpy/filter/noise.py index ab9cd9a3..106c85c2 100644 --- a/medpy/filter/noise.py +++ b/medpy/filter/noise.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -22,21 +22,21 @@ # third-party modules import numpy -from scipy.ndimage import _ni_support -from scipy.ndimage import convolve1d +from scipy.ndimage import _ni_support, convolve1d # own modules + # code def immerkaer_local(input, size, output=None, mode="reflect", cval=0.0): r""" Estimate the local noise. - + The input image is assumed to have additive zero mean Gaussian noise. The Immerkaer noise estimation is applied to the image locally over a N-dimensional cube of side-length size. The size of the region should be sufficiently high for a stable noise estimation. - + Parameters ---------- input : array_like @@ -45,82 +45,87 @@ def immerkaer_local(input, size, output=None, mode="reflect", cval=0.0): The local region's side length. output : ndarray, optional The `output` parameter passes an array in which to store the - filter output. + filter output. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default - is 0.0 - + is 0.0 + Returns ------- sigmas : array_like Map of the estimated standard deviation of the images Gaussian noise per voxel. - + Notes ----- Does not take the voxel spacing into account. Works good with medium to strong noise. Tends to underestimate for low noise levels. - + See also -------- immerkaer """ output = _ni_support._get_output(output, input) footprint = numpy.asarray([1] * size) - + # build nd-kernel to acquire square root of sum of squared elements kernel = [1, -2, 1] for _ in range(input.ndim - 1): kernel = numpy.tensordot(kernel, [1, -2, 1], 0) - divider = numpy.square(numpy.abs(kernel)).sum() # 36 for 1d, 216 for 3D, etc. - + divider = numpy.square(numpy.abs(kernel)).sum() # 36 for 1d, 216 for 3D, etc. + # compute laplace of input laplace = separable_convolution(input, [1, -2, 1], numpy.double, mode, cval) - + # compute factor - factor = numpy.sqrt(numpy.pi / 2.) * 1. / ( numpy.sqrt(divider) * numpy.power(footprint.size, laplace.ndim) ) - + factor = ( + numpy.sqrt(numpy.pi / 2.0) + * 1.0 + / (numpy.sqrt(divider) * numpy.power(footprint.size, laplace.ndim)) + ) + # locally sum laplacian values separable_convolution(numpy.abs(laplace), footprint, output, mode, cval) - + output *= factor - + return output + def immerkaer(input, mode="reflect", cval=0.0): r""" Estimate the global noise. - + The input image is assumed to have additive zero mean Gaussian noise. Using a convolution with a Laplacian operator and a subsequent averaging the standard deviation sigma of this noise is estimated. This estimation is global i.e. the noise is assumed to be globally homogeneous over the image. - + Implementation based on [1]_. - - + + Immerkaer suggested a Laplacian-based 2D kernel:: - + [[ 1, -2, 1], [-2, 4, -1], [ 1, -2, 1]] , which is separable and can therefore be applied by consecutive convolutions with the one dimensional kernel [1, -2, 1]. - + We generalize from this 1D-kernel to an ND-kernel by applying N consecutive convolutions with the 1D-kernel along all N dimensions. - + This is equivalent with convolving the image with an ND-kernel constructed by calling - + >>> kernel1d = numpy.asarray([1, -2, 1]) >>> kernel = kernel1d.copy() >>> for _ in range(input.ndim): >>> kernel = numpy.tensordot(kernel, kernel1d, 0) - + Parameters ---------- input : array_like @@ -131,22 +136,22 @@ def immerkaer(input, mode="reflect", cval=0.0): 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default - is 0.0 - + is 0.0 + Returns ------- sigma : float The estimated standard deviation of the images Gaussian noise. - + Notes ----- Does not take the voxel spacing into account. Works good with medium to strong noise. Tends to underestimate for low noise levels. - + See also -------- immerkaer_local - + References ---------- .. [1] John Immerkaer, "Fast Noise Variance Estimation", Computer Vision and Image @@ -156,28 +161,35 @@ def immerkaer(input, mode="reflect", cval=0.0): kernel = [1, -2, 1] for _ in range(input.ndim - 1): kernel = numpy.tensordot(kernel, [1, -2, 1], 0) - divider = numpy.square(numpy.abs(kernel)).sum() # 36 for 1d, 216 for 3D, etc. - + divider = numpy.square(numpy.abs(kernel)).sum() # 36 for 1d, 216 for 3D, etc. + # compute laplace of input and derive noise sigma laplace = separable_convolution(input, [1, -2, 1], None, mode, cval) - factor = numpy.sqrt(numpy.pi / 2.) * 1. / ( numpy.sqrt(divider) * numpy.prod(laplace.shape) ) + factor = ( + numpy.sqrt(numpy.pi / 2.0) + * 1.0 + / (numpy.sqrt(divider) * numpy.prod(laplace.shape)) + ) sigma = factor * numpy.abs(laplace).sum() - + return sigma - -def separable_convolution(input, weights, output=None, mode="reflect", cval=0.0, origin=0): + + +def separable_convolution( + input, weights, output=None, mode="reflect", cval=0.0, origin=0 +): r""" Calculate a n-dimensional convolution of a separable kernel to a n-dimensional input. - + Achieved by calling convolution1d along the first axis, obtaining an intermediate image, on which the next convolution1d along the second axis is called and so on. - + Parameters ---------- input : array_like Array of which to estimate the noise. weights : ndarray - One-dimensional sequence of numbers. + One-dimensional sequence of numbers. output : array, optional The `output` parameter passes an array in which to store the filter output. @@ -191,7 +203,7 @@ def separable_convolution(input, weights, output=None, mode="reflect", cval=0.0, origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0.0. - + Returns ------- output : ndarray @@ -207,5 +219,3 @@ def separable_convolution(input, weights, output=None, mode="reflect", cval=0.0, else: output[...] = input[...] return output - - \ No newline at end of file diff --git a/medpy/filter/smoothing.py b/medpy/filter/smoothing.py index 88f167bf..f49eba96 100644 --- a/medpy/filter/smoothing.py +++ b/medpy/filter/smoothing.py @@ -24,11 +24,11 @@ import numpy from scipy.ndimage import gaussian_filter -# path changes - # own modules from .utilities import xminus1d +# path changes + # code def gauss_xminus1d(img, sigma, dim=2): @@ -55,7 +55,10 @@ def gauss_xminus1d(img, sigma, dim=2): img = numpy.array(img, copy=False) return xminus1d(img, gaussian_filter, dim, sigma=sigma) -def anisotropic_diffusion(img, niter=1, kappa=50, gamma=0.1, voxelspacing=None, option=1): + +def anisotropic_diffusion( + img, niter=1, kappa=50, gamma=0.1, voxelspacing=None, option=1 +): r""" Edge-preserving, XD Anisotropic diffusion. @@ -125,16 +128,20 @@ def anisotropic_diffusion(img, niter=1, kappa=50, gamma=0.1, voxelspacing=None, """ # define conduction gradients functions if option == 1: + def condgradient(delta, spacing): - return numpy.exp(-(delta/kappa)**2.)/float(spacing) + return numpy.exp(-((delta / kappa) ** 2.0)) / float(spacing) + elif option == 2: + def condgradient(delta, spacing): - return 1./(1.+(delta/kappa)**2.)/float(spacing) + return 1.0 / (1.0 + (delta / kappa) ** 2.0) / float(spacing) + elif option == 3: kappa_s = kappa * (2**0.5) def condgradient(delta, spacing): - top = 0.5*((1.-(delta/kappa_s)**2.)**2.)/float(spacing) + top = 0.5 * ((1.0 - (delta / kappa_s) ** 2.0) ** 2.0) / float(spacing) return numpy.where(numpy.abs(delta) <= kappa_s, top, 0) # initialize output array @@ -142,25 +149,31 @@ def condgradient(delta, spacing): # set default voxel spacing if not supplied if voxelspacing is None: - voxelspacing = tuple([1.] * img.ndim) + voxelspacing = tuple([1.0] * img.ndim) # initialize some internal variables deltas = [numpy.zeros_like(out) for _ in range(out.ndim)] for _ in range(niter): - # calculate the diffs for i in range(out.ndim): - slicer = tuple([slice(None, -1) if j == i else slice(None) for j in range(out.ndim)]) + slicer = tuple( + [slice(None, -1) if j == i else slice(None) for j in range(out.ndim)] + ) deltas[i][slicer] = numpy.diff(out, axis=i) # update matrices - matrices = [condgradient(delta, spacing) * delta for delta, spacing in zip(deltas, voxelspacing)] + matrices = [ + condgradient(delta, spacing) * delta + for delta, spacing in zip(deltas, voxelspacing) + ] # subtract a copy that has been shifted ('Up/North/West' in 3D case) by one # pixel. Don't as questions. just do it. trust me. for i in range(out.ndim): - slicer = tuple([slice(1, None) if j == i else slice(None) for j in range(out.ndim)]) + slicer = tuple( + [slice(1, None) if j == i else slice(None) for j in range(out.ndim)] + ) matrices[i][slicer] = numpy.diff(matrices[i], axis=i) # update the image diff --git a/medpy/filter/utilities.py b/medpy/filter/utilities.py index 51b0ef12..88940aeb 100644 --- a/medpy/filter/utilities.py +++ b/medpy/filter/utilities.py @@ -27,6 +27,7 @@ # own modules from ..io import header + # code def xminus1d(img, fun, dim, *args, **kwargs): r""" @@ -62,6 +63,7 @@ def xminus1d(img, fun, dim, *args, **kwargs): output.append(fun(numpy.squeeze(img[slicer]), *args, **kwargs)) return numpy.rollaxis(numpy.asarray(output), 0, dim + 1) + #!TODO: Utilise the numpy.pad function that is available since 1.7.0. The numpy version should go inside this function, since it does not support the supplying of a template/footprint on its own. def pad(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0): r""" @@ -130,118 +132,175 @@ def pad(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0) footprint = numpy.asarray(footprint, dtype=bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: - raise RuntimeError('filter footprint array has incorrect shape.') + raise RuntimeError("filter footprint array has incorrect shape.") - if numpy.any([x > 2*y for x, y in zip(footprint.shape, input.shape)]): - raise ValueError('The size of the padding element is not allowed to be more than double the size of the input array in any dimension.') + if numpy.any([x > 2 * y for x, y in zip(footprint.shape, input.shape)]): + raise ValueError( + "The size of the padding element is not allowed to be more than double the size of the input array in any dimension." + ) padding_offset = [((s - 1) / 2, s / 2) for s in fshape] input_slicer = [slice(l, None if 0 == r else -1 * r) for l, r in padding_offset] output_shape = [s + sum(os) for s, os in zip(input.shape, padding_offset)] output = _ni_support._get_output(output, input, output_shape) - if 'constant' == mode: + if "constant" == mode: output += cval output[input_slicer] = input return output - elif 'nearest' == mode: + elif "nearest" == mode: output[input_slicer] = input - dim_mult_slices = [(d, l, slice(None, l), slice(l, l + 1)) for d, (l, _) in zip(list(range(output.ndim)), padding_offset) if not 0 == l] - dim_mult_slices.extend([(d, r, slice(-1 * r, None), slice(-2 * r, -2 * r + 1)) for d, (_, r) in zip(list(range(output.ndim)), padding_offset) if not 0 == r]) + dim_mult_slices = [ + (d, l, slice(None, l), slice(l, l + 1)) + for d, (l, _) in zip(list(range(output.ndim)), padding_offset) + if not 0 == l + ] + dim_mult_slices.extend( + [ + (d, r, slice(-1 * r, None), slice(-2 * r, -2 * r + 1)) + for d, (_, r) in zip(list(range(output.ndim)), padding_offset) + if not 0 == r + ] + ) for dim, mult, to_slice, from_slice in dim_mult_slices: - slicer_to = [to_slice if d == dim else slice(None) for d in range(output.ndim)] - slicer_from = [from_slice if d == dim else slice(None) for d in range(output.ndim)] + slicer_to = [ + to_slice if d == dim else slice(None) for d in range(output.ndim) + ] + slicer_from = [ + from_slice if d == dim else slice(None) for d in range(output.ndim) + ] if not 0 == mult: output[slicer_to] = numpy.concatenate([output[slicer_from]] * mult, dim) return output - elif 'mirror' == mode: - dim_slices = [(d, slice(None, l), slice(l + 1, 2 * l + 1)) for d, (l, _) in zip(list(range(output.ndim)), padding_offset) if not 0 == l] - dim_slices.extend([(d, slice(-1 * r, None), slice(-2 * r - 1, -1 * r - 1)) for d, (_, r) in zip(list(range(output.ndim)), padding_offset) if not 0 == r]) + elif "mirror" == mode: + dim_slices = [ + (d, slice(None, l), slice(l + 1, 2 * l + 1)) + for d, (l, _) in zip(list(range(output.ndim)), padding_offset) + if not 0 == l + ] + dim_slices.extend( + [ + (d, slice(-1 * r, None), slice(-2 * r - 1, -1 * r - 1)) + for d, (_, r) in zip(list(range(output.ndim)), padding_offset) + if not 0 == r + ] + ) reverse_slice = slice(None, None, -1) - elif 'reflect' == mode: - dim_slices = [(d, slice(None, l), slice(l, 2 * l)) for d, (l, _) in zip(list(range(output.ndim)), padding_offset) if not 0 == l] - dim_slices.extend([(d, slice(-1 * r, None), slice(-2 * r, -1 * r)) for d, (_, r) in zip(list(range(output.ndim)), padding_offset) if not 0 == r]) + elif "reflect" == mode: + dim_slices = [ + (d, slice(None, l), slice(l, 2 * l)) + for d, (l, _) in zip(list(range(output.ndim)), padding_offset) + if not 0 == l + ] + dim_slices.extend( + [ + (d, slice(-1 * r, None), slice(-2 * r, -1 * r)) + for d, (_, r) in zip(list(range(output.ndim)), padding_offset) + if not 0 == r + ] + ) reverse_slice = slice(None, None, -1) - elif 'wrap' == mode: - dim_slices = [(d, slice(None, l), slice(-1 * (l + r), -1 * r if not 0 == r else None)) for d, (l, r) in zip(list(range(output.ndim)), padding_offset) if not 0 == l] - dim_slices.extend([(d, slice(-1 * r, None), slice(l, r + l)) for d, (l, r) in zip(list(range(output.ndim)), padding_offset) if not 0 == r]) + elif "wrap" == mode: + dim_slices = [ + (d, slice(None, l), slice(-1 * (l + r), -1 * r if not 0 == r else None)) + for d, (l, r) in zip(list(range(output.ndim)), padding_offset) + if not 0 == l + ] + dim_slices.extend( + [ + (d, slice(-1 * r, None), slice(l, r + l)) + for d, (l, r) in zip(list(range(output.ndim)), padding_offset) + if not 0 == r + ] + ) reverse_slice = slice(None) else: - raise RuntimeError('boundary mode not supported') + raise RuntimeError("boundary mode not supported") output[input_slicer] = input for dim, to_slice, from_slice in dim_slices: - slicer_reverse = [reverse_slice if d == dim else slice(None) for d in range(output.ndim)] + slicer_reverse = [ + reverse_slice if d == dim else slice(None) for d in range(output.ndim) + ] slicer_to = [to_slice if d == dim else slice(None) for d in range(output.ndim)] - slicer_from = [from_slice if d == dim else slice(None) for d in range(output.ndim)] + slicer_from = [ + from_slice if d == dim else slice(None) for d in range(output.ndim) + ] output[slicer_to] = output[slicer_from][slicer_reverse] return output + def intersection(i1, h1, i2, h2): - r""" - Returns the intersecting parts of two images in real world coordinates. - Takes both, voxelspacing and image offset into account. - - Note that the returned new offset might be inaccurate up to 1/2 voxel size for - each dimension due to averaging. - - Parameters - ---------- - i1 : array_like - i2 : array_like - The two images. - h1 : MedPy image header - h2 : MedPy image header - The corresponding headers. - - Returns - ------- - v1 : ndarray - The intersecting part of ``i1``. - v2 : ndarray - The intersecting part of ``i2``. - offset : tuple of floats - The new offset of ``v1`` and ``v2`` in real world coordinates. - """ - - # compute image bounding boxes in real-world coordinates - os1 = numpy.asarray(header.get_offset(h1)) - ps1 = numpy.asarray(header.get_pixel_spacing(h1)) - bb1 = (os1, numpy.asarray(i1.shape) * ps1 + os1) - - - os2 = numpy.asarray(header.get_offset(h2)) - ps2 = numpy.asarray(header.get_pixel_spacing(h2)) - bb2 = (os2, numpy.asarray(i2.shape) * ps2 + os2) - - # compute intersection - ib = (numpy.maximum(bb1[0], bb2[0]), numpy.minimum(bb1[1], bb2[1])) - - # transfer intersection to respective image coordinates image - ib1 = [ ((ib[0] - os1) / numpy.asarray(ps1)).astype(int), ((ib[1] - os1) / numpy.asarray(ps1)).astype(int) ] - ib2 = [ ((ib[0] - os2) / numpy.asarray(ps2)).astype(int), ((ib[1] - os2) / numpy.asarray(ps2)).astype(int) ] - - # ensure that both sub-volumes are of same size (might be affected by rounding errors); only reduction allowed - s1 = ib1[1] - ib1[0] - s2 = ib2[1] - ib2[0] - d1 = s1 - s2 - d1[d1 > 0] = 0 - d2 = s2 - s1 - d2[d2 > 0] = 0 - ib1[1] -= d1 - ib2[1] -= d2 - - # compute new image offsets (in real-world coordinates); averaged to account for rounding errors due to world-to-voxel mapping - nos1 = ib1[0] * ps1 + os1 # real offset for image 1 - nos2 = ib2[0] * ps2 + os2 # real offset for image 2 - nos = numpy.average([nos1, nos2], 0) - - # build slice lists - sl1 = [slice(l, u) for l, u in zip(*ib1)] - sl2 = [slice(l, u) for l, u in zip(*ib2)] - - return i1[sl1], i2[sl2], nos + r""" + Returns the intersecting parts of two images in real world coordinates. + Takes both, voxelspacing and image offset into account. + + Note that the returned new offset might be inaccurate up to 1/2 voxel size for + each dimension due to averaging. + + Parameters + ---------- + i1 : array_like + i2 : array_like + The two images. + h1 : MedPy image header + h2 : MedPy image header + The corresponding headers. + + Returns + ------- + v1 : ndarray + The intersecting part of ``i1``. + v2 : ndarray + The intersecting part of ``i2``. + offset : tuple of floats + The new offset of ``v1`` and ``v2`` in real world coordinates. + """ + + # compute image bounding boxes in real-world coordinates + os1 = numpy.asarray(header.get_offset(h1)) + ps1 = numpy.asarray(header.get_pixel_spacing(h1)) + bb1 = (os1, numpy.asarray(i1.shape) * ps1 + os1) + + os2 = numpy.asarray(header.get_offset(h2)) + ps2 = numpy.asarray(header.get_pixel_spacing(h2)) + bb2 = (os2, numpy.asarray(i2.shape) * ps2 + os2) + + # compute intersection + ib = (numpy.maximum(bb1[0], bb2[0]), numpy.minimum(bb1[1], bb2[1])) + + # transfer intersection to respective image coordinates image + ib1 = [ + ((ib[0] - os1) / numpy.asarray(ps1)).astype(int), + ((ib[1] - os1) / numpy.asarray(ps1)).astype(int), + ] + ib2 = [ + ((ib[0] - os2) / numpy.asarray(ps2)).astype(int), + ((ib[1] - os2) / numpy.asarray(ps2)).astype(int), + ] + + # ensure that both sub-volumes are of same size (might be affected by rounding errors); only reduction allowed + s1 = ib1[1] - ib1[0] + s2 = ib2[1] - ib2[0] + d1 = s1 - s2 + d1[d1 > 0] = 0 + d2 = s2 - s1 + d2[d2 > 0] = 0 + ib1[1] -= d1 + ib2[1] -= d2 + + # compute new image offsets (in real-world coordinates); averaged to account for rounding errors due to world-to-voxel mapping + nos1 = ib1[0] * ps1 + os1 # real offset for image 1 + nos2 = ib2[0] * ps2 + os2 # real offset for image 2 + nos = numpy.average([nos1, nos2], 0) + + # build slice lists + sl1 = [slice(l, u) for l, u in zip(*ib1)] + sl2 = [slice(l, u) for l, u in zip(*ib2)] + + return i1[sl1], i2[sl2], nos + def __make_footprint(input, size, footprint): "Creates a standard footprint element ala scipy.ndimage." diff --git a/medpy/graphcut/__init__.py b/medpy/graphcut/__init__.py index 3a082e7a..634a8f31 100644 --- a/medpy/graphcut/__init__.py +++ b/medpy/graphcut/__init__.py @@ -196,15 +196,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -# import from compile C++ Python module -from .maxflow import GraphDouble, GraphFloat, GraphInt # this always triggers an error in Eclipse, but is right # import all functions/methods/classes into the module -from .graph import Graph, GCGraph -from .write import graph_to_dimacs -from .generate import graph_from_labels, graph_from_voxels -from . import energy_label -from . import energy_voxel + +# import from compile C++ Python module # import all sub-modules in the __all__ variable -__all__ = [s for s in dir() if not s.startswith('_')] +__all__ = [s for s in dir() if not s.startswith("_")] diff --git a/medpy/graphcut/energy_label.py b/medpy/graphcut/energy_label.py index 0e21803c..bf8bf2cb 100644 --- a/medpy/graphcut/energy_label.py +++ b/medpy/graphcut/energy_label.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -22,39 +22,43 @@ import math import sys +import numpy + # third-party modules import scipy.ndimage -import numpy # own modules + # code -def boundary_difference_of_means(graph, label_image, original_image): # label image is not required to hold continuous ids or to start from 1 +def boundary_difference_of_means( + graph, label_image, original_image +): # label image is not required to hold continuous ids or to start from 1 r""" Boundary term based on the difference of means between adjacent image regions. - + An implementation of the boundary term, suitable to be used with the `~medpy.graphcut.generate.graph_from_labels` function. - + This simple energy function computes the mean values for all regions. The weights of the edges are then determined by the difference in mean values. - + The graph weights generated have to be strictly positive and preferably in the interval :math:`(0, 1]`. To ensure this, the maximum possible difference in mean values is computed as: - + .. math:: - + \alpha = \|\max \bar{I} - \min \bar{I}\| - + , where :math:`\min \bar{I}` constitutes the lowest mean intensity value of all regions in the image, while :math:`\max \bar{I}` constitutes the highest mean intensity value With this value the weights between a region :math:`x` and its neighbour :math:`y` can be computed: - + .. math:: - + w(x,y) = \max \left( 1 - \frac{\|\bar{I}_x - \bar{I}_y\|}{\alpha}, \epsilon \right) - + where :math:`\epsilon` is the smallest floating point step and thus :math:`w(x,y) \in (0, 1]` holds true. - + Parameters ---------- graph : GCGraph @@ -63,31 +67,33 @@ def boundary_difference_of_means(graph, label_image, original_image): # label im The label image. original_image : ndarray The original image. - + Notes ----- This function requires the original image to be passed along. That means that `~medpy.graphcut.generate.graph_from_labels` has to be called with ``boundary_term_args`` set to the - original image. - - This function is tested on 2D and 3D images and theoretically works for all dimensionalities. + original image. + + This function is tested on 2D and 3D images and theoretically works for all dimensionalities. """ # convert to arrays if necessary label_image = scipy.asarray(label_image) original_image = scipy.asarray(original_image) - - if label_image.flags['F_CONTIGUOUS']: # strangely one this one is required to be ctype ordering + + if label_image.flags[ + "F_CONTIGUOUS" + ]: # strangely one this one is required to be ctype ordering label_image = scipy.ascontiguousarray(label_image) - + __check_label_image(label_image) - + # create a lookup-table that translates from a label id to its position in the sorted unique vector labels_unique = scipy.unique(label_image) - + # compute the mean intensities of all regions # Note: Bug in mean implementation: means over labels is only computed if the indexes are also supplied means = scipy.ndimage.mean(original_image, labels=label_image, index=labels_unique) - + # compute the maximum possible intensity difference max_difference = float(abs(min(means) - max(means))) @@ -96,48 +102,57 @@ def boundary_difference_of_means(graph, label_image, original_image): # label im # get the adjuncancy of the labels edges = __compute_edges(label_image) - + # compute the difference of means for each adjunct region and add it as a tuple to the dictionary - if 0. == max_difference: # special case when the divider is zero and therefore all values can be assured to equal zero + if ( + 0.0 == max_difference + ): # special case when the divider is zero and therefore all values can be assured to equal zero for edge in edges: - graph.set_nweight(edge[0] - 1, edge[1] - 1, sys.float_info.min, sys.float_info.min) - else: + graph.set_nweight( + edge[0] - 1, edge[1] - 1, sys.float_info.min, sys.float_info.min + ) + else: # compute the difference of means for each adjunct region and add it as a tuple to the dictionary for edge in edges: - value = max(1. - abs(means[edge[0]] - means[edge[1]]) / max_difference, sys.float_info.min) + value = max( + 1.0 - abs(means[edge[0]] - means[edge[1]]) / max_difference, + sys.float_info.min, + ) graph.set_nweight(edge[0] - 1, edge[1] - 1, value, value) -def boundary_stawiaski(graph, label_image, gradient_image): # label image is not required to hold continuous ids or to start from 1 +def boundary_stawiaski( + graph, label_image, gradient_image +): # label image is not required to hold continuous ids or to start from 1 r""" Boundary term based on the sum of border voxel pairs differences. - + An implementation of the boundary term in [1]_, suitable to be used with the `~medpy.graphcut.generate.graph_from_labels` function. - + Determines for each two supplied regions the voxels forming their border assuming :math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D). From the gradient magnitude values of each end-point voxel the border-voxel pairs, the highest one is selected and passed to a strictly positive and decreasing function :math:`g(x)`, which is defined as: - + .. math:: - + g(x) = \left(\frac{1}{1+|x|}\right)^k - + ,where :math:`k=2`. The final weight :math:`w_{i,j}` between two regions :math:`r_i` and :math:`r_j` is then determined by the sum of all these neighbour values: - + .. math:: - + w_{i,j} = \sum_{e_{m,n}\in F_{(r_i,r_j)}}g(\max(|I(m)|,|I(n)|)) - + , where :math:`F_{(r_i,r_j)}` is the set of border voxel-pairs :math:`e_{m,n}` between the regions :math:`r_i` and :math:`r_j` and :math:`|I(p)|` the absolute of the gradient magnitude at the voxel :math:`p` - + This boundary_function works as an edge indicator in the original image. In simpler words the weight (and therefore the energy) is obtained by summing the local contrast along the boundaries between two regions. - + Parameters ---------- graph : GCGraph @@ -146,16 +161,16 @@ def boundary_stawiaski(graph, label_image, gradient_image): # label image is not The label image. Must contain consecutively labelled regions starting from index 1. gradient_image : ndarray The gradient image. - + Notes ----- This function requires the gradient magnitude image of the original image to be passed along. That means that `~medpy.graphcut.generate.graph_from_labels` has to be called with ``boundary_term_args`` set to the gradient image. This can be obtained e.g. with `generic_gradient_magnitude` and `prewitt` from `scipy.ndimage`. - - This function is tested on 2D and 3D images and theoretically works for all dimensionalities. - + + This function is tested on 2D and 3D images and theoretically works for all dimensionalities. + References ---------- .. [1] Stawiaski J., Decenciere E., Bidlaut F. "Interactive Liver Tumor Segmentation @@ -164,12 +179,14 @@ def boundary_stawiaski(graph, label_image, gradient_image): # label image is not # convert to arrays if necessary label_image = scipy.asarray(label_image) gradient_image = scipy.asarray(gradient_image) - - if label_image.flags['F_CONTIGUOUS']: # strangely, this one is required to be ctype ordering + + if label_image.flags[ + "F_CONTIGUOUS" + ]: # strangely, this one is required to be ctype ordering label_image = scipy.ascontiguousarray(label_image) - + __check_label_image(label_image) - + for dim in range(label_image.ndim): # prepare slicer for all minus last and all minus first "row" slicer_from = [slice(None)] * label_image.ndim @@ -182,39 +199,45 @@ def boundary_stawiaski(graph, label_image, gradient_image): # label image is not # determine not equal keys valid_edges = keys_from != keys_to # determine largest gradient - gradient_max = numpy.maximum(numpy.abs(gradient_image[slicer_from]), numpy.abs(gradient_image[slicer_to]))[valid_edges] + gradient_max = numpy.maximum( + numpy.abs(gradient_image[slicer_from]), numpy.abs(gradient_image[slicer_to]) + )[valid_edges] # determine key order keys_max = numpy.maximum(keys_from, keys_to)[valid_edges] keys_min = numpy.minimum(keys_from, keys_to)[valid_edges] # set edges / nweights for k1, k2, val in zip(keys_min, keys_max, gradient_max): - weight = math.pow(1./(1. + val), 2) # weight contribution of a single pixel + weight = math.pow( + 1.0 / (1.0 + val), 2 + ) # weight contribution of a single pixel weight = max(weight, sys.float_info.min) - graph.set_nweight(k1 - 1 , k2 - 1, weight, weight) + graph.set_nweight(k1 - 1, k2 - 1, weight, weight) -def boundary_stawiaski_directed(graph, label_image, xxx_todo_changeme): # label image is not required to hold continuous ids or to start from 1 +def boundary_stawiaski_directed( + graph, label_image, xxx_todo_changeme +): # label image is not required to hold continuous ids or to start from 1 r""" Boundary term based on the sum of border voxel pairs differences, directed version. - + An implementation of the boundary term in [1]_, suitable to be used with the `~medpy.graphcut.generate.graph_from_labels` function. - + The basic definition of this term is the same as for `boundary_stawiaski`, but the edges of the created graph will be directed. - + This boundary_function works as an edge indicator in the original image. In simpler words the weight (and therefore the energy) is obtained by summing the local contrast along the boundaries between two regions. - + When the ``directedness`` parameter is set to zero, the resulting graph will be undirected and the behaviour equals `boundary_stawiaski`. When it is set to a positive value, light-to-dark transitions are favored i.e. voxels with a lower intensity (darker) than the objects tend to be assigned to the object. The boundary term is thus changed to: - + .. math:: - + g_{ltd}(x) = \left\{ \begin{array}{l l} g(x) + \beta & \quad \textrm{if $I_i > I_j$}\\ @@ -224,9 +247,9 @@ def boundary_stawiaski_directed(graph, label_image, xxx_todo_changeme): # label With a negative value for ``directedness``, the opposite effect can be achieved i.e. voxels with a higher intensity (lighter) than the objects tend to be assigned to the object. The boundary term is thus changed to - + .. math:: - + g_{dtl} = \left\{ \begin{array}{l l} g(x) & \quad \textrm{if $I_i > I_j$}\\ @@ -237,7 +260,7 @@ def boundary_stawiaski_directed(graph, label_image, xxx_todo_changeme): # label :math:`g_{ltd}` resp. :math:`g_{dtl}`. The value :math:`\beta` determines the power of the directedness and corresponds to the absolute value of the supplied ``directedness`` parameter. Experiments showed values between 0.0001 and 0.0003 to be good candidates. - + Parameters ---------- graph : GCGraph @@ -250,60 +273,70 @@ def boundary_stawiaski_directed(graph, label_image, xxx_todo_changeme): # label The weight of the directedness, a positive number to favour light-to-dark and a negative to dark-to-light transitions. See function description for more details. - + Notes ----- This function requires the gradient magnitude image of the original image to be passed along. That means that `~medpy.graphcut.generate.graph_from_labels` has to be called with ``boundary_term_args`` set to the gradient image. This can be obtained e.g. with `generic_gradient_magnitude` and `prewitt` from `scipy.ndimage`. - + This function is tested on 2D and 3D images and theoretically works for all dimensionalities. - + References ---------- .. [1] Stawiaski J., Decenciere E., Bidlaut F. "Interactive Liver Tumor Segmentation - Using Graph-cuts and watershed" MICCAI 2008 participation + Using Graph-cuts and watershed" MICCAI 2008 participation """ (gradient_image, directedness) = xxx_todo_changeme label_image = scipy.asarray(label_image) gradient_image = scipy.asarray(gradient_image) - - if label_image.flags['F_CONTIGUOUS']: # strangely one this one is required to be ctype ordering + + if label_image.flags[ + "F_CONTIGUOUS" + ]: # strangely one this one is required to be ctype ordering label_image = scipy.ascontiguousarray(label_image) - + __check_label_image(label_image) - + beta = abs(directedness) - - def addition_directed_ltd(key1, key2, v1, v2, dic): # for light-to-dark # tested + + def addition_directed_ltd(key1, key2, v1, v2, dic): # for light-to-dark # tested "Takes a key defined by two uints, two voxel intensities and a dict to which it adds g(v1, v2)." - if not key1 == key2: # do not process voxel pairs which belong to the same region + if ( + not key1 == key2 + ): # do not process voxel pairs which belong to the same region # The function used to compute the weight contribution of each voxel pair - weight = math.pow(1./(1. + max(abs(v1), abs(v2))), 2) + weight = math.pow(1.0 / (1.0 + max(abs(v1), abs(v2))), 2) # ensure that no value is zero; this can occur due to rounding errors weight = max(weight, sys.float_info.min) # add weighted values to already existing edge - if v1 > v2: graph.set_nweight(key1 - 1, key2 - 1, min(1, weight + beta), weight) - else: graph.set_nweight(key1 - 1, key2 - 1, weight, min(1, weight + beta)) - - def addition_directed_dtl(key1, key2, v1, v2): # for dark-to-light # tested + if v1 > v2: + graph.set_nweight(key1 - 1, key2 - 1, min(1, weight + beta), weight) + else: + graph.set_nweight(key1 - 1, key2 - 1, weight, min(1, weight + beta)) + + def addition_directed_dtl(key1, key2, v1, v2): # for dark-to-light # tested "Takes a key defined by two uints, two voxel intensities and a dict to which it adds g(v1, v2)." - if not key1 == key2: # do not process voxel pairs which belong to the same region + if ( + not key1 == key2 + ): # do not process voxel pairs which belong to the same region # The function used to compute the weight contribution of each voxel pair - weight = math.pow(1./(1. + max(abs(v1), abs(v2))), 2) + weight = math.pow(1.0 / (1.0 + max(abs(v1), abs(v2))), 2) # ensure that no value is zero; this can occur due to rounding errors weight = max(weight, sys.float_info.min) # add weighted values to already existing edge - if v1 > v2: graph.set_nweight(key1 - 1, key2 - 1, weight, min(1, weight + beta)) - else: graph.set_nweight(key1 - 1, key2 - 1, min(1, weight + beta), weight) - + if v1 > v2: + graph.set_nweight(key1 - 1, key2 - 1, weight, min(1, weight + beta)) + else: + graph.set_nweight(key1 - 1, key2 - 1, min(1, weight + beta), weight) + # pick and vectorize the function to achieve a speedup if 0 > directedness: vaddition = scipy.vectorize(addition_directed_dtl) else: vaddition = scipy.vectorize(addition_directed_ltd) - + # iterate over each dimension for dim in range(label_image.ndim): slices_x = [] @@ -311,18 +344,23 @@ def addition_directed_dtl(key1, key2, v1, v2): # for dark-to-light # tested for di in range(label_image.ndim): slices_x.append(slice(None, -1 if di == dim else None)) slices_y.append(slice(1 if di == dim else None, None)) - vaddition(label_image[slices_x], - label_image[slices_y], - gradient_image[slices_x], - gradient_image[slices_y]) + vaddition( + label_image[slices_x], + label_image[slices_y], + gradient_image[slices_x], + gradient_image[slices_y], + ) -def regional_atlas(graph, label_image, xxx_todo_changeme1): # label image is required to hold continuous ids starting from 1 + +def regional_atlas( + graph, label_image, xxx_todo_changeme1 +): # label image is required to hold continuous ids starting from 1 r""" Regional term based on a probability atlas. - + An implementation of a regional term, suitable to be used with the `~medpy.graphcut.generate.graph_from_labels` function. - + This regional term introduces statistical probability of a voxel to belong to the object to segment. It computes the sum of all statistical atlas voxels under each region and uses this value as terminal node weight for the graph cut. @@ -336,28 +374,32 @@ def regional_atlas(graph, label_image, xxx_todo_changeme1): # label image is req probability_map : ndarray The probability atlas image associated with the object to segment. alpha : float - The energy terms alpha value, balancing between boundary and regional term. - + The energy terms alpha value, balancing between boundary and regional term. + Notes ----- This function requires a probability atlas image of the same shape as the original image to be passed along. That means that `~medpy.graphcut.generate.graph_from_labels` has to be called with ``regional_term_args`` set to the probability atlas image. - - This function is tested on 2D and 3D images and theoretically works for all dimensionalities. + + This function is tested on 2D and 3D images and theoretically works for all dimensionalities. """ (probability_map, alpha) = xxx_todo_changeme1 label_image = scipy.asarray(label_image) probability_map = scipy.asarray(probability_map) __check_label_image(label_image) - + # finding the objects in the label image (bounding boxes around regions) objects = scipy.ndimage.find_objects(label_image) - + # iterate over regions and compute the respective sums of atlas values for rid in range(1, len(objects) + 1): - weight = scipy.sum(probability_map[objects[rid - 1]][label_image[objects[rid - 1]] == rid]) - graph.set_tweight(rid - 1, alpha * weight, -1. * alpha * weight) # !TODO: rid's inside the graph start from 0 or 1? => seems to start from 0 + weight = scipy.sum( + probability_map[objects[rid - 1]][label_image[objects[rid - 1]] == rid] + ) + graph.set_tweight( + rid - 1, alpha * weight, -1.0 * alpha * weight + ) # !TODO: rid's inside the graph start from 0 or 1? => seems to start from 0 # !TODO: I can exclude source and sink nodes from this! # !TODO: I only have to do this in the range of the atlas objects! @@ -369,12 +411,13 @@ def __compute_edges(label_image): supplied region/label image. Note The returned set contains neither duplicates, nor self-references (i.e. (id_1, id_1)), nor reversed references (e.g. (id_1, id_2) and (id_2, id_1). - + @param label_image An image with labeled regions (nD). @param return A set with tuples denoting the edge neighbourhood. """ return __compute_edges_nd(label_image) - + + def __compute_edges_nd(label_image): """ Computes the region neighbourhood defined by a star shaped n-dimensional structuring @@ -382,18 +425,18 @@ def __compute_edges_nd(label_image): supplied region/label image. Note The returned set contains neither duplicates, nor self-references (i.e. (id_1, id_1)), nor reversed references (e.g. (id_1, id_2) and (id_2, id_1). - + @param label_image An image with labeled regions (nD). @param return A set with tuples denoting the edge neighbourhood. """ Er = set() - + def append(v1, v2): if v1 != v2: Er.update([(min(v1, v2), max(v1, v2))]) - + vappend = scipy.vectorize(append) - + for dim in range(label_image.ndim): slices_x = [] slices_y = [] @@ -401,13 +444,18 @@ def append(v1, v2): slices_x.append(slice(None, -1 if di == dim else None)) slices_y.append(slice(1 if di == dim else None, None)) vappend(label_image[slices_x], label_image[slices_y]) - + return Er + def __check_label_image(label_image): """Check the label image for consistent labelling starting from 1.""" encountered_indices = scipy.unique(label_image) expected_indices = scipy.arange(1, label_image.max() + 1) - if not encountered_indices.size == expected_indices.size or \ - not (encountered_indices == expected_indices).all(): - raise AttributeError('The supplied label image does either not contain any regions or they are not labeled consecutively starting from 1.') + if ( + not encountered_indices.size == expected_indices.size + or not (encountered_indices == expected_indices).all() + ): + raise AttributeError( + "The supplied label image does either not contain any regions or they are not labeled consecutively starting from 1." + ) diff --git a/medpy/graphcut/energy_voxel.py b/medpy/graphcut/energy_voxel.py index 54d70426..3aab2c2a 100644 --- a/medpy/graphcut/energy_voxel.py +++ b/medpy/graphcut/energy_voxel.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -18,29 +18,31 @@ # since 2012-03-23 # status Release +import math + # build-in modules import sys # third-party modules import numpy import scipy -import math # own modules + # code def regional_probability_map(graph, xxx_todo_changeme): r""" Regional term based on a probability atlas. - + An implementation of a regional term, suitable to be used with the `~medpy.graphcut.generate.graph_from_voxels` function. - + Takes an image/graph/map as input where each entry contains a probability value for the corresponding GC graph node to belong to the foreground object. The probabilities must be in the range :math:`[0, 1]`. The reverse weights are assigned to the sink (which corresponds to the background). - + Parameters ---------- graph : GCGraph @@ -49,7 +51,7 @@ def regional_probability_map(graph, xxx_todo_changeme): The label image. alpha : float The energy terms alpha value, balancing between boundary and regional term. - + Notes ----- This function requires a probability atlas image of the same shape as the original image @@ -58,20 +60,22 @@ def regional_probability_map(graph, xxx_todo_changeme): """ (probability_map, alpha) = xxx_todo_changeme probability_map = scipy.asarray(probability_map) - probabilities = numpy.vstack([(probability_map * alpha).flat, - ((1 - probability_map) * alpha).flat]).T + probabilities = numpy.vstack( + [(probability_map * alpha).flat, ((1 - probability_map) * alpha).flat] + ).T graph.set_tweights_all(probabilities) + def boundary_maximum_linear(graph, xxx_todo_changeme1): r""" - Boundary term processing adjacent voxels maximum value using a linear relationship. - + Boundary term processing adjacent voxels maximum value using a linear relationship. + An implementation of a boundary term, suitable to be used with the `~medpy.graphcut.generate.graph_from_voxels` function. - + The same as `boundary_difference_linear`, but working on the gradient image instead of the original. See there for details. - + Parameters ---------- graph : GCGraph @@ -82,7 +86,7 @@ def boundary_maximum_linear(graph, xxx_todo_changeme1): A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If `False`, no distance based weighting of the graph edges is performed. - + Notes ----- This function requires the gradient image to be passed along. That means that @@ -91,57 +95,62 @@ def boundary_maximum_linear(graph, xxx_todo_changeme1): """ (gradient_image, spacing) = xxx_todo_changeme1 gradient_image = scipy.asarray(gradient_image) - + # compute maximum intensity to encounter max_intensity = float(numpy.abs(gradient_image).max()) - + def boundary_term_linear(intensities): """ Implementation of a linear boundary term computation over an array. """ # normalize the intensity distances to the interval (0, 1] intensities /= max_intensity - #difference_to_neighbour[difference_to_neighbour > 1] = 1 # this line should not be required, but might be due to rounding errors - intensities = (1. - intensities) # reverse weights such that high intensity difference lead to small weights and hence more likely to a cut at this edge - intensities[intensities == 0.] = sys.float_info.min # required to avoid zero values + # difference_to_neighbour[difference_to_neighbour > 1] = 1 # this line should not be required, but might be due to rounding errors + intensities = ( + 1.0 - intensities + ) # reverse weights such that high intensity difference lead to small weights and hence more likely to a cut at this edge + intensities[ + intensities == 0.0 + ] = sys.float_info.min # required to avoid zero values return intensities - + __skeleton_maximum(graph, gradient_image, boundary_term_linear, spacing) + def boundary_difference_linear(graph, xxx_todo_changeme2): r""" - Boundary term processing adjacent voxels difference value using a linear relationship. - + Boundary term processing adjacent voxels difference value using a linear relationship. + An implementation of a regional term, suitable to be used with the `~medpy.graphcut.generate.graph_from_voxels` function. - + Finds all edges between all neighbours of the image and uses their normalized difference in intensity values as edge weight. - + The weights are linearly normalized using the maximum possible intensity difference of the image. Formally, this value is computed as: - + .. math:: - + \sigma = |max I - \min I| - + , where :math:`\min I` constitutes the lowest intensity value in the image, while :math:`\max I` constitutes the highest. - + The weights between two neighbouring voxels :math:`(p, q)` is then computed as: - + .. math:: - + w(p,q) = 1 - \frac{|I_p - I_q|}{\sigma} + \epsilon - + , where :math:`\epsilon` is a infinitively small number and for which :math:`w(p, q) \in (0, 1]` holds true. - + When the created edge weights should be weighted according to the slice distance, provide the list of slice thicknesses via the ``spacing`` parameter. Then all weights computed for the corresponding direction are divided by the respective slice - thickness. Set this parameter to `False` for equally weighted edges. - + thickness. Set this parameter to `False` for equally weighted edges. + Parameters ---------- graph : GCGraph @@ -152,7 +161,7 @@ def boundary_difference_linear(graph, xxx_todo_changeme2): A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If `False`, no distance based weighting of the graph edges is performed. - + Notes ----- This function requires the original image to be passed along. That means that @@ -161,33 +170,38 @@ def boundary_difference_linear(graph, xxx_todo_changeme2): """ (original_image, spacing) = xxx_todo_changeme2 original_image = scipy.asarray(original_image) - + # compute maximum (possible) intensity difference max_intensity_difference = float(abs(original_image.max() - original_image.min())) - + def boundary_term_linear(intensities): """ Implementation of a linear boundary term computation over an array. """ # normalize the intensity distances to the interval (0, 1] intensities /= max_intensity_difference - #difference_to_neighbour[difference_to_neighbour > 1] = 1 # this line should not be required, but might be due to rounding errors - intensities = (1. - intensities) # reverse weights such that high intensity difference lead to small weights and hence more likely to a cut at this edge - intensities[intensities == 0.] = sys.float_info.min # required to avoid zero values + # difference_to_neighbour[difference_to_neighbour > 1] = 1 # this line should not be required, but might be due to rounding errors + intensities = ( + 1.0 - intensities + ) # reverse weights such that high intensity difference lead to small weights and hence more likely to a cut at this edge + intensities[ + intensities == 0.0 + ] = sys.float_info.min # required to avoid zero values return intensities - + __skeleton_difference(graph, original_image, boundary_term_linear, spacing) + def boundary_maximum_exponential(graph, xxx_todo_changeme3): r""" - Boundary term processing adjacent voxels maximum value using an exponential relationship. - + Boundary term processing adjacent voxels maximum value using an exponential relationship. + An implementation of a boundary term, suitable to be used with the `~medpy.graphcut.generate.graph_from_voxels` function. - + The same as `boundary_difference_exponential`, but working on the gradient image instead of the original. See there for details. - + Parameters ---------- graph : GCGraph @@ -200,7 +214,7 @@ def boundary_maximum_exponential(graph, xxx_todo_changeme3): A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If `False`, no distance based weighting of the graph edges is performed. - + Notes ----- This function requires the gradient image to be passed along. That means that @@ -209,7 +223,7 @@ def boundary_maximum_exponential(graph, xxx_todo_changeme3): """ (gradient_image, sigma, spacing) = xxx_todo_changeme3 gradient_image = scipy.asarray(gradient_image) - + def boundary_term_exponential(intensities): """ Implementation of a exponential boundary term computation over an array. @@ -221,36 +235,37 @@ def boundary_term_exponential(intensities): intensities = scipy.exp(intensities) intensities[intensities <= 0] = sys.float_info.min return intensities - - __skeleton_maximum(graph, gradient_image, boundary_term_exponential, spacing) + + __skeleton_maximum(graph, gradient_image, boundary_term_exponential, spacing) + def boundary_difference_exponential(graph, xxx_todo_changeme4): r""" Boundary term processing adjacent voxels difference value using an exponential relationship. - + An implementation of a boundary term, suitable to be used with the `~medpy.graphcut.generate.graph_from_voxels` function. - + Finds all edges between all neighbours of the image and uses their difference in intensity values as edge weight. - + The weights are normalized using an exponential function and a smoothing factor :math:`\sigma`. The :math:`\sigma` value has to be supplied manually, since its ideal settings differ greatly from application to application. - + The weights between two neighbouring voxels :math:`(p, q)` is then computed as - + .. math:: - + w(p,q) = \exp^{-\frac{|I_p - I_q|^2}{\sigma^2}} - + , for which :math:`w(p, q) \in (0, 1]` holds true. - + When the created edge weights should be weighted according to the slice distance, provide the list of slice thicknesses via the ``spacing`` parameter. Then all weights computed for the corresponding direction are divided by the respective slice - thickness. Set this parameter to `False` for equally weighted edges. - + thickness. Set this parameter to `False` for equally weighted edges. + Parameters ---------- graph : GCGraph @@ -263,7 +278,7 @@ def boundary_difference_exponential(graph, xxx_todo_changeme4): A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If `False`, no distance based weighting of the graph edges is performed. - + Notes ----- This function requires the original image to be passed along. That means that @@ -272,7 +287,7 @@ def boundary_difference_exponential(graph, xxx_todo_changeme4): """ (original_image, sigma, spacing) = xxx_todo_changeme4 original_image = scipy.asarray(original_image) - + def boundary_term_exponential(intensities): """ Implementation of a exponential boundary term computation over an array. @@ -284,19 +299,20 @@ def boundary_term_exponential(intensities): intensities = scipy.exp(intensities) intensities[intensities <= 0] = sys.float_info.min return intensities - + __skeleton_difference(graph, original_image, boundary_term_exponential, spacing) - + + def boundary_maximum_division(graph, xxx_todo_changeme5): r""" - Boundary term processing adjacent voxels maximum value using a division relationship. - + Boundary term processing adjacent voxels maximum value using a division relationship. + An implementation of a boundary term, suitable to be used with the `~medpy.graphcut.generate.graph_from_voxels` function. - + The same as `boundary_difference_division`, but working on the gradient image instead of the original. See there for details. - + Parameters ---------- graph : GCGraph @@ -309,7 +325,7 @@ def boundary_maximum_division(graph, xxx_todo_changeme5): A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If `False`, no distance based weighting of the graph edges is performed. - + Notes ----- This function requires the gradient image to be passed along. That means that @@ -318,46 +334,47 @@ def boundary_maximum_division(graph, xxx_todo_changeme5): """ (gradient_image, sigma, spacing) = xxx_todo_changeme5 gradient_image = scipy.asarray(gradient_image) - + def boundary_term_division(intensities): """ Implementation of a exponential boundary term computation over an array. """ # apply 1 / (1 + x/sigma) intensities /= sigma - intensities = 1. / (intensities + 1) + intensities = 1.0 / (intensities + 1) intensities[intensities <= 0] = sys.float_info.min return intensities - + __skeleton_difference(graph, gradient_image, boundary_term_division, spacing) - + + def boundary_difference_division(graph, xxx_todo_changeme6): r""" - Boundary term processing adjacent voxels difference value using a division relationship. - + Boundary term processing adjacent voxels difference value using a division relationship. + An implementation of a boundary term, suitable to be used with the `~medpy.graphcut.generate.graph_from_voxels` function. - + Finds all edges between all neighbours of the image and uses their difference in intensity values as edge weight. - + The weights are normalized using an division function and a smoothing factor :math:`\sigma`. The :math:`\sigma` value has to be supplied manually, since its ideal settings differ greatly from application to application. - + The weights between two neighbouring voxels :math:`(p, q)` is then computed as - + .. math:: - + w(p,q) = \frac{1}{1 + \frac{|I_p - I_q|}{\sigma}} - + , for which :math:`w(p, q) \in (0, 1]` holds true. - + When the created edge weights should be weighted according to the slice distance, provide the list of slice thicknesses via the ``spacing`` parameter. Then all weights computed for the corresponding direction are divided by the respective slice - thickness. Set this parameter to `False` for equally weighted edges. - + thickness. Set this parameter to `False` for equally weighted edges. + Parameters ---------- graph : GCGraph @@ -370,7 +387,7 @@ def boundary_difference_division(graph, xxx_todo_changeme6): A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If `False`, no distance based weighting of the graph edges is performed. - + Notes ----- This function requires the original image to be passed along. That means that @@ -379,29 +396,30 @@ def boundary_difference_division(graph, xxx_todo_changeme6): """ (original_image, sigma, spacing) = xxx_todo_changeme6 original_image = scipy.asarray(original_image) - + def boundary_term_division(intensities): """ Implementation of a division boundary term computation over an array. """ # apply 1 / (1 + x/sigma) intensities /= sigma - intensities = 1. / (intensities + 1) + intensities = 1.0 / (intensities + 1) intensities[intensities <= 0] = sys.float_info.min return intensities - + __skeleton_difference(graph, original_image, boundary_term_division, spacing) - + + def boundary_maximum_power(graph, xxx_todo_changeme7): """ - Boundary term processing adjacent voxels maximum value using a power relationship. - + Boundary term processing adjacent voxels maximum value using a power relationship. + An implementation of a boundary term, suitable to be used with the `~medpy.graphcut.generate.graph_from_voxels` function. - + The same as `boundary_difference_power`, but working on the gradient image instead of the original. See there for details. - + Parameters ---------- graph : GCGraph @@ -414,56 +432,56 @@ def boundary_maximum_power(graph, xxx_todo_changeme7): A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If `False`, no distance based weighting of the graph edges is performed. - + Notes ----- This function requires the gradient image to be passed along. That means that `~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the - gradient image. + gradient image. """ (gradient_image, sigma, spacing) = xxx_todo_changeme7 gradient_image = scipy.asarray(gradient_image) - + def boundary_term_power(intensities): """ Implementation of a power boundary term computation over an array. """ # apply (1 / (1 + x))^sigma - intensities = 1. / (intensities + 1) + intensities = 1.0 / (intensities + 1) intensities = scipy.power(intensities, sigma) intensities[intensities <= 0] = sys.float_info.min return intensities - - __skeleton_maximum(graph, gradient_image, boundary_term_power, spacing) - - + + __skeleton_maximum(graph, gradient_image, boundary_term_power, spacing) + + def boundary_difference_power(graph, xxx_todo_changeme8): r""" - Boundary term processing adjacent voxels difference value using a power relationship. - + Boundary term processing adjacent voxels difference value using a power relationship. + An implementation of a boundary term, suitable to be used with the `~medpy.graphcut.generate.graph_from_voxels` function. - + Finds all edges between all neighbours of the image and uses their difference in intensity values as edge weight. - + The weights are normalized using an power function and a smoothing factor :math:`\sigma`. The :math:`\sigma` value has to be supplied manually, since its ideal settings differ greatly from application to application. - + The weights between two neighbouring voxels :math:`(p, q)` is then computed as - + .. math:: - + w(p,q) = \frac{1}{1 + |I_p - I_q|}^\sigma - + , for which :math:`w(p, q) \in (0, 1]` holds true. - + When the created edge weights should be weighted according to the slice distance, provide the list of slice thicknesses via the ``spacing`` parameter. Then all weights computed for the corresponding direction are divided by the respective slice - thickness. Set this parameter to `False` for equally weighted edges. - + thickness. Set this parameter to `False` for equally weighted edges. + Parameters ---------- graph : GCGraph @@ -476,7 +494,7 @@ def boundary_difference_power(graph, xxx_todo_changeme8): A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If `False`, no distance based weighting of the graph edges is performed. - + Notes ----- This function requires the original image to be passed along. That means that @@ -485,36 +503,37 @@ def boundary_difference_power(graph, xxx_todo_changeme8): """ (original_image, sigma, spacing) = xxx_todo_changeme8 original_image = scipy.asarray(original_image) - + def boundary_term_power(intensities): """ Implementation of a exponential boundary term computation over an array. """ # apply (1 / (1 + x))^sigma - intensities = 1. / (intensities + 1) + intensities = 1.0 / (intensities + 1) intensities = scipy.power(intensities, sigma) intensities[intensities <= 0] = sys.float_info.min return intensities - - __skeleton_difference(graph, original_image, boundary_term_power, spacing) + + __skeleton_difference(graph, original_image, boundary_term_power, spacing) + def __skeleton_maximum(graph, image, boundary_term, spacing): """ A skeleton for the calculation of maximum intensity based boundary terms. - + This function is equivalent to energy_voxel.__skeleton_difference(), but uses the maximum intensity rather than the intensity difference of neighbouring voxels. It is therefore suitable to be used with the gradient image, rather than the original image. - + The computation of the edge weights follows - + .. math:: - + w(p,q) = g(max(I_p, I_q)) - + ,where :math:`g(\cdot)` is the supplied boundary term function. - + @param graph An initialized graph.GCGraph object @type graph.GCGraph @param image The image to compute on @@ -525,47 +544,48 @@ def __skeleton_maximum(graph, image, boundary_term, spacing): @param spacing A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If False, no distance based weighting of the graph edges is performed. - @param spacing sequence | False - + @param spacing sequence | False + @see energy_voxel.__skeleton_difference() for more details. """ + def intensity_maximum(neighbour_one, neighbour_two): """ Takes two voxel arrays constituting neighbours and computes the maximum between their intensities. """ return scipy.maximum(neighbour_one, neighbour_two) - + __skeleton_base(graph, numpy.abs(image), boundary_term, intensity_maximum, spacing) - + def __skeleton_difference(graph, image, boundary_term, spacing): """ A skeleton for the calculation of intensity difference based boundary terms. - + Iterates over the images dimensions and generates for each an array of absolute neighbouring voxel :math:`(p, q)` intensity differences :math:`|I_p, I_q|`. These are then passed to the supplied function :math:`g(\cdot)` for for boundary term computation. Finally the returned edge weights are added to the graph. - + Formally for each edge :math:`(p, q)` of the image, their edge weight is computed as - + .. math:: - + w(p,q) = g(|I_p - I_q|) - + ,where :math:`g(\cdot)` is the supplied boundary term function. - + The boundary term function has to take an array of intensity differences as only parameter and return an array of the same shape containing the edge weights. For the implemented function the condition :math:`g(\cdot)\in(0, 1]` must hold true, i.e., it has to be strictly positive with :math:`1` as the upper limit. - - @note the underlying neighbourhood connectivity is 4 for 2D, 6 for 3D, etc. - + + @note the underlying neighbourhood connectivity is 4 for 2D, 6 for 3D, etc. + @note This function is able to work with images of arbitrary dimensions, but was only tested for 2D and 3D cases. - + @param graph An initialized graph.GCGraph object @type graph.GCGraph @param image The image to compute on @@ -576,23 +596,25 @@ def __skeleton_difference(graph, image, boundary_term, spacing): @param spacing A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If False, no distance based weighting of the graph edges is performed. - @param spacing sequence | False + @param spacing sequence | False """ + def intensity_difference(neighbour_one, neighbour_two): """ Takes two voxel arrays constituting neighbours and computes the absolute intensity differences. """ return scipy.absolute(neighbour_one - neighbour_two) - + __skeleton_base(graph, image, boundary_term, intensity_difference, spacing) + def __skeleton_base(graph, image, boundary_term, neighbourhood_function, spacing): """ Base of the skeleton for voxel based boundary term calculation. - + This function holds the low level procedures shared by nearly all boundary terms. - + @param graph An initialized graph.GCGraph object @type graph.GCGraph @param image The image containing the voxel intensity values @@ -620,7 +642,9 @@ def __skeleton_base(graph, image, boundary_term, neighbourhood_function, spacing slices_exclude_first = [slice(None)] * image.ndim slices_exclude_first[dim] = slice(1, None) # compute difference between all layers in the current dimensions direction - neighbourhood_intensity_term = neighbourhood_function(image[slices_exclude_last], image[slices_exclude_first]) + neighbourhood_intensity_term = neighbourhood_function( + image[slices_exclude_last], image[slices_exclude_first] + ) # apply boundary term neighbourhood_intensity_term = boundary_term(neighbourhood_intensity_term) # compute key offset for relative key difference @@ -629,16 +653,18 @@ def __skeleton_base(graph, image, boundary_term, neighbourhood_function, spacing # generate index offset function for index dependent offset idx_offset_divider = (image.shape[dim] - 1) * offset idx_offset = lambda x: int(x / idx_offset_divider) * offset - + # weight the computed distanced in dimension dim by the corresponding slice spacing provided - if spacing: neighbourhood_intensity_term /= spacing[dim] - + if spacing: + neighbourhood_intensity_term /= spacing[dim] + for key, value in enumerate(neighbourhood_intensity_term.ravel()): # apply index dependent offset - key += idx_offset(key) + key += idx_offset(key) # add edges and set the weight - graph.set_nweight(key, key + offset, value, value) - + graph.set_nweight(key, key + offset, value, value) + + def __flatten_index(pos, shape): """ Takes a three dimensional index (x,y,z) and computes the index required to access the @@ -650,4 +676,3 @@ def __flatten_index(pos, shape): res += pi * acc acc *= si return res - \ No newline at end of file diff --git a/medpy/graphcut/generate.py b/medpy/graphcut/generate.py index 77a7bf4f..b1c6d31a 100644 --- a/medpy/graphcut/generate.py +++ b/medpy/graphcut/generate.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -24,29 +24,33 @@ # third-party modules import scipy +from medpy.graphcut.energy_label import __check_label_image + # own modules from ..core import Logger from .graph import GCGraph -from medpy.graphcut.energy_label import __check_label_image -def graph_from_voxels(fg_markers, - bg_markers, - regional_term = False, - boundary_term = False, - regional_term_args = False, - boundary_term_args = False): + +def graph_from_voxels( + fg_markers, + bg_markers, + regional_term=False, + boundary_term=False, + regional_term_args=False, + boundary_term_args=False, +): """ Create a graph-cut ready graph to segment a nD image using the voxel neighbourhood. - + Create a `~medpy.graphcut.maxflow.GraphDouble` object for all voxels of an image with a :math:`ndim * 2` neighbourhood. - + Every voxel of the image is regarded as a node. They are connected to their immediate neighbours via arcs. If to voxels are neighbours is determined using :math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D). In the next step the arcs weights (n-weights) are computed using the supplied ``boundary_term`` function (see :mod:`~medpy.graphcut.energy_voxel` for a selection). - + Implicitly the graph holds two additional nodes: the source and the sink, so called terminal nodes. These are connected with all other nodes through arcs of an initial weight (t-weight) of zero. @@ -54,10 +58,10 @@ def graph_from_voxels(fg_markers, to the source: The t-weight of the arc from source to these nodes is set to a maximum value. The same goes for the background markers: The covered voxels receive a maximum (`~medpy.graphcut.graph.GCGraph.MAX`) t-weight for their arc towards the sink. - + All other t-weights are set using the supplied ``regional_term`` function (see :mod:`~medpy.graphcut.energy_voxel` for a selection). - + Parameters ---------- fg_markers : ndarray @@ -83,105 +87,126 @@ def graph_from_voxels(fg_markers, the function via the ``boundary_term_args`` parameter. regional_term_args : tuple Use this to pass some additional parameters to the ``regional_term`` function. - boundary_term_args : tuple + boundary_term_args : tuple Use this to pass some additional parameters to the ``boundary_term`` function. - + Returns ------- graph : `~medpy.graphcut.maxflow.GraphDouble` The created graph, ready to execute the graph-cut. - + Raises ------ AttributeError If an argument is malformed. FunctionError If one of the supplied functions returns unexpected results. - + Notes ----- If a voxel is marked as both, foreground and background, the background marker is given higher priority. - + All arcs whose weight is not explicitly set are assumed to carry a weight of zero. """ # prepare logger logger = Logger.getInstance() - + # prepare result graph - logger.debug('Assuming {} nodes and {} edges for image of shape {}'.format(fg_markers.size, __voxel_4conectedness(fg_markers.shape), fg_markers.shape)) + logger.debug( + "Assuming {} nodes and {} edges for image of shape {}".format( + fg_markers.size, __voxel_4conectedness(fg_markers.shape), fg_markers.shape + ) + ) graph = GCGraph(fg_markers.size, __voxel_4conectedness(fg_markers.shape)) - - logger.info('Performing attribute tests...') - + + logger.info("Performing attribute tests...") + # check, set and convert all supplied parameters fg_markers = scipy.asarray(fg_markers, dtype=scipy.bool_) bg_markers = scipy.asarray(bg_markers, dtype=scipy.bool_) - + # set dummy functions if not supplied - if not regional_term: regional_term = __regional_term_voxel - if not boundary_term: boundary_term = __boundary_term_voxel - + if not regional_term: + regional_term = __regional_term_voxel + if not boundary_term: + boundary_term = __boundary_term_voxel + # check supplied functions and their signature - if not hasattr(regional_term, '__call__') or not 2 == len(inspect.getargspec(regional_term)[0]): - raise AttributeError('regional_term has to be a callable object which takes two parameter.') - if not hasattr(boundary_term, '__call__') or not 2 == len(inspect.getargspec(boundary_term)[0]): - raise AttributeError('boundary_term has to be a callable object which takes two parameters.') - - logger.debug('#nodes={}, #hardwired-nodes source/sink={}/{}'.format(fg_markers.size, - len(fg_markers.ravel().nonzero()[0]), - len(bg_markers.ravel().nonzero()[0]))) - + if not hasattr(regional_term, "__call__") or not 2 == len( + inspect.getargspec(regional_term)[0] + ): + raise AttributeError( + "regional_term has to be a callable object which takes two parameter." + ) + if not hasattr(boundary_term, "__call__") or not 2 == len( + inspect.getargspec(boundary_term)[0] + ): + raise AttributeError( + "boundary_term has to be a callable object which takes two parameters." + ) + + logger.debug( + "#nodes={}, #hardwired-nodes source/sink={}/{}".format( + fg_markers.size, + len(fg_markers.ravel().nonzero()[0]), + len(bg_markers.ravel().nonzero()[0]), + ) + ) + # compute the weights of all edges from the source and to the sink i.e. # compute the weights of the t_edges Wt - logger.info('Computing and adding terminal edge weights...') + logger.info("Computing and adding terminal edge weights...") regional_term(graph, regional_term_args) # compute the weights of the edges between the neighbouring nodes i.e. # compute the weights of the n_edges Wr - logger.info('Computing and adding inter-node edge weights...') + logger.info("Computing and adding inter-node edge weights...") boundary_term(graph, boundary_term_args) - + # collect all voxels that are under the foreground resp. background markers i.e. # collect all nodes that are connected to the source resp. sink - logger.info('Setting terminal weights for the markers...') + logger.info("Setting terminal weights for the markers...") if not 0 == scipy.count_nonzero(fg_markers): graph.set_source_nodes(fg_markers.ravel().nonzero()[0]) if not 0 == scipy.count_nonzero(bg_markers): - graph.set_sink_nodes(bg_markers.ravel().nonzero()[0]) - + graph.set_sink_nodes(bg_markers.ravel().nonzero()[0]) + return graph.get_graph() -def graph_from_labels(label_image, - fg_markers, - bg_markers, - regional_term = False, - boundary_term = False, - regional_term_args = False, - boundary_term_args = False): + +def graph_from_labels( + label_image, + fg_markers, + bg_markers, + regional_term=False, + boundary_term=False, + regional_term_args=False, + boundary_term_args=False, +): """ Create a graph-cut ready graph to segment a nD image using the region neighbourhood. - + Create a `~medpy.graphcut.maxflow.GraphDouble` object for all regions of a nD label image. - + Every region of the label image is regarded as a node. They are connected to their immediate neighbours by arcs. If to regions are neighbours is determined using :math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D). In the next step the arcs weights (n-weights) are computed using the supplied ``boundary_term`` function (see :mod:`~medpy.graphcut.energy_voxel` for a selection). - + Implicitly the graph holds two additional nodes: the source and the sink, so called terminal nodes. These are connected with all other nodes through arcs of an initial weight (t-weight) of zero. All regions that are under the foreground markers are considered to be tightly bound - to the source: The t-weight of the arc from source to these nodes is set to a maximum + to the source: The t-weight of the arc from source to these nodes is set to a maximum value. The same goes for the background markers: The covered regions receive a maximum (`~medpy.graphcut.graph.GCGraph.MAX`) t-weight for their arc towards the sink. - + All other t-weights are set using the supplied ``regional_term`` function (see :mod:`~medpy.graphcut.energy_voxel` for a selection). - + Parameters ---------- label_image: ndarray @@ -211,121 +236,149 @@ def graph_from_labels(label_image, can be passed to the function via the ``boundary_term_args`` parameter. regional_term_args : tuple Use this to pass some additional parameters to the ``regional_term`` function. - boundary_term_args : tuple + boundary_term_args : tuple Use this to pass some additional parameters to the ``boundary_term`` function. Returns ------- graph : `~medpy.graphcut.maxflow.GraphDouble` The created graph, ready to execute the graph-cut. - + Raises ------ AttributeError If an argument is malformed. FunctionError If one of the supplied functions returns unexpected results. - + Notes ----- If a voxel is marked as both, foreground and background, the background marker is given higher priority. - - All arcs whose weight is not explicitly set are assumed to carry a weight of zero. - """ + + All arcs whose weight is not explicitly set are assumed to carry a weight of zero. + """ # prepare logger logger = Logger.getInstance() - - logger.info('Performing attribute tests...') - + + logger.info("Performing attribute tests...") + # check, set and convert all supplied parameters label_image = scipy.asarray(label_image) fg_markers = scipy.asarray(fg_markers, dtype=scipy.bool_) bg_markers = scipy.asarray(bg_markers, dtype=scipy.bool_) - + __check_label_image(label_image) - + # set dummy functions if not supplied - if not regional_term: regional_term = __regional_term_label - if not boundary_term: boundary_term = __boundary_term_label - + if not regional_term: + regional_term = __regional_term_label + if not boundary_term: + boundary_term = __boundary_term_label + # check supplied functions and their signature - if not hasattr(regional_term, '__call__') or not 3 == len(inspect.getargspec(regional_term)[0]): - raise AttributeError('regional_term has to be a callable object which takes three parameters.') - if not hasattr(boundary_term, '__call__') or not 3 == len(inspect.getargspec(boundary_term)[0]): - raise AttributeError('boundary_term has to be a callable object which takes three parameters.') - - logger.info('Determining number of nodes and edges.') - + if not hasattr(regional_term, "__call__") or not 3 == len( + inspect.getargspec(regional_term)[0] + ): + raise AttributeError( + "regional_term has to be a callable object which takes three parameters." + ) + if not hasattr(boundary_term, "__call__") or not 3 == len( + inspect.getargspec(boundary_term)[0] + ): + raise AttributeError( + "boundary_term has to be a callable object which takes three parameters." + ) + + logger.info("Determining number of nodes and edges.") + # compute number of nodes and edges nodes = len(scipy.unique(label_image)) # POSSIBILITY 1: guess the number of edges (in the best situation is faster but requires a little bit more memory. In the worst is slower.) edges = 10 * nodes - logger.debug('guessed: #nodes={} nodes / #edges={}'.format(nodes, edges)) + logger.debug("guessed: #nodes={} nodes / #edges={}".format(nodes, edges)) # POSSIBILITY 2: compute the edges (slow) - #edges = len(__compute_edges(label_image)) - #logger.debug('computed: #nodes={} nodes / #edges={}'.format(nodes, edges)) - + # edges = len(__compute_edges(label_image)) + # logger.debug('computed: #nodes={} nodes / #edges={}'.format(nodes, edges)) + # prepare result graph graph = GCGraph(nodes, edges) - - logger.debug('#hardwired-nodes source/sink={}/{}'.format(len(scipy.unique(label_image[fg_markers])), - len(scipy.unique(label_image[bg_markers])))) - - #logger.info('Extracting the regions bounding boxes...') + + logger.debug( + "#hardwired-nodes source/sink={}/{}".format( + len(scipy.unique(label_image[fg_markers])), + len(scipy.unique(label_image[bg_markers])), + ) + ) + + # logger.info('Extracting the regions bounding boxes...') # extract the bounding boxes - #bounding_boxes = find_objects(label_image) - + # bounding_boxes = find_objects(label_image) + # compute the weights of all edges from the source and to the sink i.e. # compute the weights of the t_edges Wt - logger.info('Computing and adding terminal edge weights...') - #regions = set(graph.get_nodes()) - set(graph.get_source_nodes()) - set(graph.get_sink_nodes()) - regional_term(graph, label_image, regional_term_args) # bounding boxes indexed from 0 # old version: regional_term(graph, label_image, regions, bounding_boxes, regional_term_args) + logger.info("Computing and adding terminal edge weights...") + # regions = set(graph.get_nodes()) - set(graph.get_source_nodes()) - set(graph.get_sink_nodes()) + regional_term( + graph, label_image, regional_term_args + ) # bounding boxes indexed from 0 # old version: regional_term(graph, label_image, regions, bounding_boxes, regional_term_args) # compute the weights of the edges between the neighbouring nodes i.e. # compute the weights of the n_edges Wr - logger.info('Computing and adding inter-node edge weights...') + logger.info("Computing and adding inter-node edge weights...") boundary_term(graph, label_image, boundary_term_args) - + # collect all regions that are under the foreground resp. background markers i.e. # collect all nodes that are connected to the source resp. sink - logger.info('Setting terminal weights for the markers...') - graph.set_source_nodes(scipy.unique(label_image[fg_markers] - 1)) # requires -1 to adapt to node id system + logger.info("Setting terminal weights for the markers...") + graph.set_source_nodes( + scipy.unique(label_image[fg_markers] - 1) + ) # requires -1 to adapt to node id system graph.set_sink_nodes(scipy.unique(label_image[bg_markers] - 1)) - + return graph.get_graph() + def __regional_term_voxel(graph, regional_term_args): """Fake regional_term function with the appropriate signature.""" return {} + def __regional_term_label(graph, label_image, regional_term_args): """Fake regional_term function with the appropriate signature.""" return {} + def __boundary_term_voxel(graph, boundary_term_args): """Fake regional_term function with the appropriate signature.""" # supplying no boundary term contradicts the whole graph cut idea. return {} + def __boundary_term_label(graph, label_image, boundary_term_args): """Fake regional_term function with the appropriate signature.""" # supplying no boundary term contradicts the whole graph cut idea. return {} - + + def __voxel_4conectedness(shape): """ Returns the number of edges for the supplied image shape assuming 4-connectedness. - + The name of the function has historical reasons. Essentially it returns the number of edges assuming 4-connectedness only for 2D. For 3D it assumes 6-connectedness, etc. - + @param shape the shape of the image @type shape sequence @return the number of edges @rtype int """ shape = list(shape) - while 1 in shape: shape.remove(1) # empty resp. 1-sized dimensions have to be removed (equal to scipy.squeeze on the array) - return int(round(sum([(dim - 1)/float(dim) for dim in shape]) * scipy.prod(shape))) + while 1 in shape: + shape.remove( + 1 + ) # empty resp. 1-sized dimensions have to be removed (equal to scipy.squeeze on the array) + return int( + round(sum([(dim - 1) / float(dim) for dim in shape]) * scipy.prod(shape)) + ) diff --git a/medpy/graphcut/graph.py b/medpy/graphcut/graph.py index 8e3851f6..3031a828 100644 --- a/medpy/graphcut/graph.py +++ b/medpy/graphcut/graph.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -23,59 +23,60 @@ # third-party modules # own modules -from .maxflow import GraphDouble, GraphFloat +from .maxflow import GraphDouble + # code class Graph(object): r""" Represents a graph suitable for further processing with the graphcut package. - + The graph contains nodes, edges (directed) between the nodes (n-edges), edges between two terminals (called source and sink) and the nodes (t-edges), and a - weight for each edge. - + weight for each edge. + Notes ----- The node-ids used by the graph are assumed to start with 1 and be continuous. This is not actually checked, except when calling the inconsistent() method, so be careful. """ - + # @var __INT_16_BIT The maximum value of signed int 16bit. __INT_16_BIT = 32767 # @var __UINT_16_BIT: The maximum value of unsigned int 16bit. __UINT_16_BIT = 65535 # @var MAX The maximum value a weight can take. MAX = __UINT_16_BIT - + def __init__(self): self.__nodes = 0 self.__snodes = [] self.__tnodes = [] self.__nweights = {} self.__tweights = {} - + def set_nodes(self, nodes): r""" Set the number of graph nodes (starting from node-id = 1), excluding sink and source. - + Parameters ---------- nodes : int Number of nodes """ self.__nodes = int(nodes) - + def set_source_nodes(self, source_nodes): r""" Set the source nodes and compute their t-weights. - + Parameters ---------- source_nodes : sequence of integers Declare the source nodes via their ids. - + Notes ----- It does not get checked if one of the supplied source-nodes already has @@ -85,20 +86,20 @@ def set_source_nodes(self, source_nodes): the graph-cut result. """ self.__snodes = list(source_nodes) - + # set the source-to-node weights (t-weights) for snode in self.__snodes: - self.__tweights[snode] = (self.MAX, 0) # (weight-to-source, weight-to-sink) - + self.__tweights[snode] = (self.MAX, 0) # (weight-to-source, weight-to-sink) + def set_sink_nodes(self, sink_nodes): r""" Set the sink nodes and compute their t-weights. - + Parameters ---------- sink_nodes : sequence of integers Declare the sink nodes via their ids. - + Notes ----- It does not get checked if one of the supplied sink-nodes already has @@ -108,130 +109,130 @@ def set_sink_nodes(self, sink_nodes): the graph-cut result. """ self.__tnodes = list(sink_nodes) - + # set the source-to-node weights (t-weights) for tnode in self.__tnodes: - self.__tweights[tnode] = (0, self.MAX) # (weight-to-source, weight-to-sink) - + self.__tweights[tnode] = (0, self.MAX) # (weight-to-source, weight-to-sink) + def set_nweights(self, nweights): r""" Sets all n-weights. - + Parameters ---------- nweights : dict A dictionary with (node-id, node-id) tuples as keys and (weight-a-to-b, weight-b-to-a) as values. """ self.__nweights = nweights - + def add_tweights(self, tweights): r""" Adds t-weights to the current collection of t-weights, overwriting already existing ones. - + Parameters ---------- tweights : dict A dictionary with node_ids as keys and (weight-to-source, weight-to-sink) tuples as values. - + Notes ----- The weights for nodes directly connected to either the source or the sink are best set using `set_source_nodes` or `set_sink_nodes` to ensure consistency of their maximum values. """ - self.__tweights.update(tweights) - + self.__tweights.update(tweights) + def get_node_count(self): r""" Get the number of nodes. - + Returns ------- node_count : int The number of nodes (excluding sink and source). """ return self.__nodes - + def get_nodes(self): r""" Get the nodes. - + Returns ------- nodes : list All nodes as an ordered list. """ return list(range(1, self.__nodes + 1)) - + def get_source_nodes(self): r""" Get the source nodes. - + Returns ------- source_nodes : list All nodes that are connected with the source as an unordered list (excluding sink and source). """ return self.__snodes - + def get_sink_nodes(self): r""" Get the sink nodes. - + Returns ------- sink_nodes : list All nodes that are connected with the sink as an unordered list (excluding sink and source). """ return self.__tnodes - + def get_edges(self): r""" Get the edges. - + Returns ------- edges : list All edges as ordered list of tuples (i.e. [(node_id1, node_id2), (..), ...]. """ return list(self.__nweights.keys()) - + def get_nweights(self): r""" Get the nweights. - + Returns ------- nweights : dict All n-weights (inter-node weights) as {edge-tuple: (weight, weight_reverersed)...} dict. """ return self.__nweights - + def get_tweights(self): r""" Get the tweights. - + Returns ------- tweights : dict All t-weights (terminal-node weights) as {node_id: (weight-source-node, weight-node-sink), ...} dict. - + Notes ----- Returns only the t-weights that have been set so far. For nodes with unset t-weight, no entry is returned. """ return self.__tweights - + def inconsistent(self): r""" Perform some consistency tests on the graph represented by this object - + Returns ------- consistent : bool or list False if consistent, else a list of inconsistency messages. - + Notes ----- This check is very time intensive and should not be executed on huge @@ -239,35 +240,45 @@ def inconsistent(self): """ messages = [] for node in list(self.__tweights.keys()): - if not node <= self.__nodes: messages.append("Node {} in t-weights but not in nodes.".format(node)) + if not node <= self.__nodes: + messages.append("Node {} in t-weights but not in nodes.".format(node)) for node in self.__snodes: - if not node <= self.__nodes: messages.append("Node {} in s-nodes but not in nodes.".format(node)) + if not node <= self.__nodes: + messages.append("Node {} in s-nodes but not in nodes.".format(node)) for node in self.__tnodes: - if not node <= self.__nodes: messages.append("Node {} in t-nodes but not in nodes.".format(node)) + if not node <= self.__nodes: + messages.append("Node {} in t-nodes but not in nodes.".format(node)) for e in list(self.__nweights.keys()): - if not e[0] <= self.__nodes: messages.append("Node {} in edge {} but not in nodes.".format(e[0], e)) - if not e[1] <= self.__nodes: messages.append("Node {} in edge {} but not in nodes.".format(e[1], e)) - if (e[1], e[0]) in iter(list(self.__nweights.keys())): messages.append("The reversed edges of {} is also in the n-weights.".format(e)) - - - if 0 == len(messages): return False - else: return messages - + if not e[0] <= self.__nodes: + messages.append("Node {} in edge {} but not in nodes.".format(e[0], e)) + if not e[1] <= self.__nodes: + messages.append("Node {} in edge {} but not in nodes.".format(e[1], e)) + if (e[1], e[0]) in iter(list(self.__nweights.keys())): + messages.append( + "The reversed edges of {} is also in the n-weights.".format(e) + ) + + if 0 == len(messages): + return False + else: + return messages + + class GCGraph: r""" A graph representation that works directly with the maxflow.GraphDouble graph as base. It is therefore less flexible as graph.Graph, but leads to lower memory requirements. - + The graph contains nodes, edges (directed) between the nodes (n-edges), edges between two terminals (called source and sink) and the nodes (t-edges), and a - weight for each edge. - + weight for each edge. + Notes ----- The node-ids used by the graph are assumed to start with 0 and be continuous. This is not actually checked, so be careful. - + This wrapper tries to catch the most usual exception that can occur in the underlying C++ implementation and to convert them into catchable and meaningful error messages. @@ -276,14 +287,14 @@ class GCGraph: __INT_16_BIT = 32767 # @var __UINT_16_BIT: The maximum value of unsigned int 16bit. __UINT_16_BIT = 65535 - + MAX = __UINT_16_BIT """The maximum value a terminal weight can take.""" - + def __init__(self, nodes, edges): r""" Initialize. - + Parameters ---------- nodes : int @@ -295,23 +306,23 @@ def __init__(self, nodes, edges): self.__graph.add_node(nodes) self.__nodes = nodes self.__edges = edges - + def set_source_nodes(self, source_nodes): r""" Set multiple source nodes and compute their t-weights. - + Parameters ---------- source_nodes : sequence of integers Declare the source nodes via their ids. - + Raises ------ - ValueError + ValueError If a passed node id does not refer to any node of the graph (i.e. it is either higher than the initially set number of nodes or lower than zero). - + Notes ----- It does not get checked if one of the supplied source-nodes already has @@ -321,27 +332,33 @@ def set_source_nodes(self, source_nodes): the graph-cut result. """ if max(source_nodes) >= self.__nodes or min(source_nodes) < 0: - raise ValueError('Invalid node id of {} or {}. Valid values are 0 to {}.'.format(max(source_nodes), min(source_nodes), self.__nodes - 1)) + raise ValueError( + "Invalid node id of {} or {}. Valid values are 0 to {}.".format( + max(source_nodes), min(source_nodes), self.__nodes - 1 + ) + ) # set the source-to-node weights (t-weights) for snode in source_nodes: - self.__graph.add_tweights(int(snode), self.MAX, 0) # (weight-to-source, weight-to-sink) - + self.__graph.add_tweights( + int(snode), self.MAX, 0 + ) # (weight-to-source, weight-to-sink) + def set_sink_nodes(self, sink_nodes): r""" Set multiple sink nodes and compute their t-weights. - + Parameters ---------- sink_nodes : sequence of integers Declare the sink nodes via their ids. - + Raises ------ - ValueError + ValueError If a passed node id does not refer to any node of the graph (i.e. it is either higher than the initially set number of - nodes or lower than zero). - + nodes or lower than zero). + Notes ----- It does not get checked if one of the supplied sink-nodes already has @@ -351,15 +368,21 @@ def set_sink_nodes(self, sink_nodes): the graph-cut result. """ if max(sink_nodes) >= self.__nodes or min(sink_nodes) < 0: - raise ValueError('Invalid node id of {} or {}. Valid values are 0 to {}.'.format(max(sink_nodes), min(sink_nodes), self.__nodes - 1)) + raise ValueError( + "Invalid node id of {} or {}. Valid values are 0 to {}.".format( + max(sink_nodes), min(sink_nodes), self.__nodes - 1 + ) + ) # set the node-to-sink weights (t-weights) for snode in sink_nodes: - self.__graph.add_tweights(int(snode), 0, self.MAX) # (weight-to-source, weight-to-sink) - + self.__graph.add_tweights( + int(snode), 0, self.MAX + ) # (weight-to-source, weight-to-sink) + def set_nweight(self, node_from, node_to, weight_there, weight_back): r""" Set a single n-weight / edge-weight. - + Parameters ---------- node_from : int @@ -367,10 +390,10 @@ def set_nweight(self, node_from, node_to, weight_there, weight_back): node_to : int Node-id from the second node of the edge. weight_there : float - Weight from first to second node (>0). + Weight from first to second node (>0). weight_back : float Weight from second to first node (>0). - + Raises ------ ValueError @@ -382,50 +405,64 @@ def set_nweight(self, node_from, node_to, weight_there, weight_back): not allow self-edges). ValueError If one of the passed weights is <= 0. - + Notes ----- The object does not check if the number of supplied edges in total exceeds the number passed to the init-method. If this is the case, the underlying C++ implementation will double the memory, which is very unefficient. - + The underlying C++ implementation allows zero weights, but these are highly undesirable for inter-node weights and therefore raise an error. """ if node_from >= self.__nodes or node_from < 0: - raise ValueError('Invalid node id (node_from) of {}. Valid values are 0 to {}.'.format(node_from, self.__nodes - 1)) + raise ValueError( + "Invalid node id (node_from) of {}. Valid values are 0 to {}.".format( + node_from, self.__nodes - 1 + ) + ) elif node_to >= self.__nodes or node_to < 0: - raise ValueError('Invalid node id (node_to) of {}. Valid values are 0 to {}.'.format(node_to, self.__nodes - 1)) + raise ValueError( + "Invalid node id (node_to) of {}. Valid values are 0 to {}.".format( + node_to, self.__nodes - 1 + ) + ) elif node_from == node_to: - raise ValueError('The node_from ({}) can not be equal to the node_to ({}) (self-connections are forbidden in graph cuts).'.format(node_from, node_to)) + raise ValueError( + "The node_from ({}) can not be equal to the node_to ({}) (self-connections are forbidden in graph cuts).".format( + node_from, node_to + ) + ) elif weight_there <= 0 or weight_back <= 0: - raise ValueError('Negative or zero weights are not allowed.') - self.__graph.sum_edge(int(node_from), int(node_to), float(weight_there), float(weight_back)) - + raise ValueError("Negative or zero weights are not allowed.") + self.__graph.sum_edge( + int(node_from), int(node_to), float(weight_there), float(weight_back) + ) + def set_nweights(self, nweights): r""" Set multiple n-weights / edge-weights. - + Parameters ---------- nweights : dict A dictionary with (node-id, node-id) tuples as keys and (weight-a-to-b, weight-b-to-a) as values. - + Notes ----- The object does not check if the number of supplied edges in total exceeds the number passed to the init-method. If this is the case, the underlying C++ implementation will double the memory, which is very inefficient. - + See `set_nweight` for raised errors. """ for edge, weight in list(nweights.items()): self.set_nweight(edge[0], edge[1], weight[0], weight[1]) - + def set_tweight(self, node, weight_source, weight_sink): r""" Set a single t-weight / terminal-weight. - + Parameters ---------- node : int @@ -434,115 +471,123 @@ def set_tweight(self, node, weight_source, weight_sink): Weight to source terminal. weight_sink : float Weight to sink terminal. - + Raises ------ ValueError If a passed node id does not refer to any node of the graph (i.e. it is either higher than the initially set number of nodes or lower than zero). - + Notes - ----- + ----- The object does not check if the number of supplied edges in total exceeds the number passed to the init-method. If this is the case, the underlying C++ implementation will double the memory, which is very inefficient. - + Terminal weights can be zero or negative. """ if node >= self.__nodes or node < 0: - raise ValueError('Invalid node id of {}. Valid values are 0 to {}.'.format(node, self.__nodes - 1)) - self.__graph.add_tweights(int(node), float(weight_source), float(weight_sink)) # (weight-to-source, weight-to-sink) - + raise ValueError( + "Invalid node id of {}. Valid values are 0 to {}.".format( + node, self.__nodes - 1 + ) + ) + self.__graph.add_tweights( + int(node), float(weight_source), float(weight_sink) + ) # (weight-to-source, weight-to-sink) + def set_tweights(self, tweights): r""" Set multiple t-weights to the current collection of t-weights, overwriting already existing ones. - + Parameters ---------- tweights : dict A dictionary with node_ids as keys and (weight-to-source, weight-to-sink) tuples as values. - + Raises ------ ValueError If a passed node id does not refer to any node of the graph (i.e. it is either higher than the initially set number of - nodes or lower than zero). - + nodes or lower than zero). + Notes ----- Since this method overrides already existing t-weights, it is strongly recommended to run `set_source_nodes` and `set_sink_nodes` after the last call to this method. - + The weights for nodes directly connected to either the source or the sink are best set using `set_source_nodes` or `set_sink_nodes` to ensure consistency of their maximum values. - """ + """ for node, weight in list(tweights.items()): - self.set_tweight(node, weight[0], weight[1]) # (weight-to-source, weight-to-sink) - + self.set_tweight( + node, weight[0], weight[1] + ) # (weight-to-source, weight-to-sink) + def set_tweights_all(self, tweights): r""" Set all t-weights at once. - + Parameters ---------- tweights : iterable Containing a pair of numeric values for each of the graphs nodes. - + Notes ----- Since this method overrides already existing t-weights, it is strongly recommended to run `set_source_nodes` and `set_sink_nodes` after the last call to this method. - + The weights for nodes directly connected to either the source or the sink are best set using `set_source_nodes` or `set_sink_nodes` to ensure consistency of their maximum values. """ for node, (twsource, twsink) in enumerate(tweights): - self.set_tweight(node, twsource, twsink) # source = FG, sink = BG - + self.set_tweight(node, twsource, twsink) # source = FG, sink = BG + def get_graph(self): r""" Get the C++ graph. - + Returns ------- graph : maxflow.GraphDouble The underlying maxflow.GraphDouble C++ implementation of the graph. """ return self.__graph - + def get_node_count(self): r""" Get the number of nodes. - + Returns ------- node_count : int The number of nodes (excluding sink and source). """ return self.__nodes - + def get_nodes(self): r""" Get the nodes. - + Returns ------- nodes : list All nodes as an ordered list (starting from 0). """ return list(range(0, self.__nodes)) - + def get_edge_count(self): r""" Get the number of edges. - + Returns ------- edge_count : int diff --git a/medpy/graphcut/wrapper.py b/medpy/graphcut/wrapper.py index c8194bca..aa31d8ff 100644 --- a/medpy/graphcut/wrapper.py +++ b/medpy/graphcut/wrapper.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -19,34 +19,38 @@ # status Release -# build-in modules -import multiprocessing import itertools import math +# build-in modules +import multiprocessing + # third-party modules import scipy -# own modules -from .energy_label import boundary_stawiaski -from .generate import graph_from_labels from ..core.exceptions import ArgumentError from ..core.logger import Logger from ..filter import relabel, relabel_map + +# own modules +from .energy_label import boundary_stawiaski +from .generate import graph_from_labels + try: from functools import reduce except ImportError: pass + # code -def split_marker(marker, fg_id = 1, bg_id = 2): +def split_marker(marker, fg_id=1, bg_id=2): """ Splits an integer marker image into two binary image containing the foreground and background markers respectively. All encountered 1's are hereby treated as foreground, all 2's as background, all 0's as neutral marker and all others are ignored. This behaviour can be changed by supplying the fg_id and/or bg_id parameters. - + Parameters ---------- marker : ndarray @@ -55,31 +59,41 @@ def split_marker(marker, fg_id = 1, bg_id = 2): The value that should be treated as foreground. bg_id : integer The value that should be treated as background. - + Returns ------- fgmarkers, bgmarkers : nadarray The fore- and background markers as boolean images. """ img_marker = scipy.asarray(marker) - + img_fgmarker = scipy.zeros(img_marker.shape, scipy.bool_) img_fgmarker[img_marker == fg_id] = True - + img_bgmarker = scipy.zeros(img_marker.shape, scipy.bool_) img_bgmarker[img_marker == bg_id] = True - + return img_fgmarker, img_bgmarker -def graphcut_split(graphcut_function, regions, gradient, foreground, background, minimal_edge_length = 100, overlap = 10, processes = None): + +def graphcut_split( + graphcut_function, + regions, + gradient, + foreground, + background, + minimal_edge_length=100, + overlap=10, + processes=None, +): """ Executes a graph cut by splitting the original volume into a number of sub-volumes of a minimal edge length. These are then processed in subprocesses. - + This can be significantly faster than the traditional graph cuts, but should be used with, as it can lead to different results. To minimize this effect, the overlap parameter allows control over how much the respective sub-volumes should overlap. - + Parameters ---------- graphcut_function : function @@ -99,7 +113,7 @@ def graphcut_split(graphcut_function, regions, gradient, foreground, background, processes : integer or None The number of processes to run simultaneously, if not supplied, will be the same as the number of processors. - + Returns ------- segmentation : ndarray @@ -107,68 +121,93 @@ def graphcut_split(graphcut_function, regions, gradient, foreground, background, """ # initialize logger logger = Logger.getInstance() - + # ensure that input images are scipy arrays img_region = scipy.asarray(regions) img_gradient = scipy.asarray(gradient) img_fg = scipy.asarray(foreground, dtype=scipy.bool_) img_bg = scipy.asarray(background, dtype=scipy.bool_) - + # ensure correctness of supplied images - if not (img_region.shape == img_gradient.shape == img_fg.shape == img_bg.shape): raise ArgumentError('All supplied images must be of the same shape.') - + if not (img_region.shape == img_gradient.shape == img_fg.shape == img_bg.shape): + raise ArgumentError("All supplied images must be of the same shape.") + # check and eventually enhance input parameters - if minimal_edge_length < 10: raise ArgumentError('A minimal edge length smaller than 10 is not supported.') - if overlap < 0: raise ArgumentError('A negative overlap is not supported.') - if overlap >= minimal_edge_length: raise ArgumentError('The overlap is not allowed to exceed the minimal edge length.') - + if minimal_edge_length < 10: + raise ArgumentError("A minimal edge length smaller than 10 is not supported.") + if overlap < 0: + raise ArgumentError("A negative overlap is not supported.") + if overlap >= minimal_edge_length: + raise ArgumentError( + "The overlap is not allowed to exceed the minimal edge length." + ) + # compute how to split the volumes into sub-volumes i.e. determine step-size for each image dimension shape = list(img_region.shape) steps = [x // minimal_edge_length for x in shape] - steps = [1 if 0 == x else x for x in steps] # replace zeros by ones + steps = [1 if 0 == x else x for x in steps] # replace zeros by ones stepsizes = [math.ceil(x / y) for x, y in zip(shape, steps)] - logger.debug('Using a minimal edge length of {}, a sub-volume size of {} was determined from the shape {}, which means {} sub-volumes.'.format(minimal_edge_length, stepsizes, shape, reduce(lambda x, y: x*y, steps))) - + logger.debug( + "Using a minimal edge length of {}, a sub-volume size of {} was determined from the shape {}, which means {} sub-volumes.".format( + minimal_edge_length, stepsizes, shape, reduce(lambda x, y: x * y, steps) + ) + ) + # control step-sizes to definitely cover the whole image covered_shape = [x * y for x, y in zip(steps, stepsizes)] for c, o in zip(covered_shape, shape): - if c < o: raise Exception("The computed sub-volumes do not cover the complete image!") - + if c < o: + raise Exception("The computed sub-volumes do not cover the complete image!") + # iterate over the steps and extract subvolumes according to the stepsizes - slicer_steps = [list(range(0, int(step * stepsize), int(stepsize))) for step, stepsize in zip(steps, stepsizes)] - slicers = [[slice(_from, _from + _offset + overlap) for _from, _offset in zip(slicer_step, stepsizes)] for slicer_step in itertools.product(*slicer_steps)] - subvolumes_input = [(img_region[slicer], - img_gradient[slicer], - img_fg[slicer], - img_bg[slicer]) for slicer in slicers] - + slicer_steps = [ + list(range(0, int(step * stepsize), int(stepsize))) + for step, stepsize in zip(steps, stepsizes) + ] + slicers = [ + [ + slice(_from, _from + _offset + overlap) + for _from, _offset in zip(slicer_step, stepsizes) + ] + for slicer_step in itertools.product(*slicer_steps) + ] + subvolumes_input = [ + (img_region[slicer], img_gradient[slicer], img_fg[slicer], img_bg[slicer]) + for slicer in slicers + ] + # execute the graph cuts and collect results - subvolumes_output = graphcut_subprocesses(graphcut_function, subvolumes_input, processes) - + subvolumes_output = graphcut_subprocesses( + graphcut_function, subvolumes_input, processes + ) + # put back data together img_result = scipy.zeros(img_region.shape, dtype=scipy.bool_) for slicer, subvolume in zip(slicers, subvolumes_output): sslicer_antioverlap = [slice(None)] * img_result.ndim - + # treat overlap area using logical-and (&) for dim in range(img_result.ndim): - if 0 == slicer[dim].start: continue + if 0 == slicer[dim].start: + continue sslicer_antioverlap[dim] = slice(overlap, None) sslicer_overlap = [slice(None)] * img_result.ndim sslicer_overlap[dim] = slice(0, overlap) - img_result[slicer][sslicer_overlap] = scipy.logical_and(img_result[slicer][sslicer_overlap], subvolume[sslicer_overlap]) - + img_result[slicer][sslicer_overlap] = scipy.logical_and( + img_result[slicer][sslicer_overlap], subvolume[sslicer_overlap] + ) + # treat remainder through assignment img_result[slicer][sslicer_antioverlap] = subvolume[sslicer_antioverlap] - + return img_result.astype(scipy.bool_) - -def graphcut_subprocesses(graphcut_function, graphcut_arguments, processes = None): + +def graphcut_subprocesses(graphcut_function, graphcut_arguments, processes=None): """ Executes multiple graph cuts in parallel. This can result in a significant speed-up. - + Parameters ---------- graphcut_function : function @@ -178,7 +217,7 @@ def graphcut_subprocesses(graphcut_function, graphcut_arguments, processes = Non processes : integer or None The number of processes to run simultaneously, if not supplied, will be the same as the number of processors. - + Returns ------- segmentations : tuple of ndarray @@ -186,24 +225,28 @@ def graphcut_subprocesses(graphcut_function, graphcut_arguments, processes = Non """ # initialize logger logger = Logger.getInstance() - + # check and eventually enhance input parameters - if not processes: processes = multiprocessing.cpu_count() - if not int == type(processes) or processes <= 0: raise ArgumentError('The number processes can not be zero or negative.') - - logger.debug('Executing graph cuts in {} subprocesses.'.format(multiprocessing.cpu_count())) - + if not processes: + processes = multiprocessing.cpu_count() + if not int == type(processes) or processes <= 0: + raise ArgumentError("The number processes can not be zero or negative.") + + logger.debug( + "Executing graph cuts in {} subprocesses.".format(multiprocessing.cpu_count()) + ) + # creates subprocess pool and execute pool = multiprocessing.Pool(processes) results = pool.map(graphcut_function, graphcut_arguments) - + return results -def graphcut_stawiaski(regions, gradient = False, foreground = False, background = False): +def graphcut_stawiaski(regions, gradient=False, foreground=False, background=False): """ Executes a Stawiaski label graph cut. - + Parameters ---------- regions : ndarray @@ -214,12 +257,12 @@ def graphcut_stawiaski(regions, gradient = False, foreground = False, background The foreground markers. background : ndarray The background markers. - + Returns ------- segmentation : ndarray The graph-cut segmentation result as boolean array. - + Raises ------ ArgumentError @@ -227,36 +270,50 @@ def graphcut_stawiaski(regions, gradient = False, foreground = False, background """ # initialize logger logger = Logger.getInstance() - + # unpack images if required # !TODO: This is an ugly hack, especially since it can be seen inside the function definition # How to overcome this, since I can not use a wrapper function as the whole thing must be pickable - if not gradient and not foreground and not background: + if not gradient and not foreground and not background: regions, gradient, foreground, background = regions - + # ensure that input images are scipy arrays img_region = scipy.asarray(regions) img_gradient = scipy.asarray(gradient) img_fg = scipy.asarray(foreground, dtype=scipy.bool_) img_bg = scipy.asarray(background, dtype=scipy.bool_) - + # ensure correctness of supplied images - if not (img_region.shape == img_gradient.shape == img_fg.shape == img_bg.shape): raise ArgumentError('All supplied images must be of the same shape.') + if not (img_region.shape == img_gradient.shape == img_fg.shape == img_bg.shape): + raise ArgumentError("All supplied images must be of the same shape.") # recompute the label ids to start from id = 1 img_region = relabel(img_region) - + # generate graph - gcgraph = graph_from_labels(img_region, img_fg, img_bg, boundary_term = boundary_stawiaski, boundary_term_args = (img_gradient)) - + gcgraph = graph_from_labels( + img_region, + img_fg, + img_bg, + boundary_term=boundary_stawiaski, + boundary_term_args=(img_gradient), + ) + # execute min-cut - maxflow = gcgraph.maxflow() # executes the cut and returns the maxflow value - - logger.debug('Graph-cut terminated successfully with maxflow of {}.'.format(maxflow)) - + maxflow = gcgraph.maxflow() # executes the cut and returns the maxflow value + + logger.debug( + "Graph-cut terminated successfully with maxflow of {}.".format(maxflow) + ) + # apply results to the region image - mapping = [0] # no regions with id 1 exists in mapping, entry used as padding - mapping.extend([0 if gcgraph.termtype.SINK == gcgraph.what_segment(int(x) - 1) else 1 for x in scipy.unique(img_region)]) + mapping = [0] # no regions with id 1 exists in mapping, entry used as padding + mapping.extend( + [ + 0 if gcgraph.termtype.SINK == gcgraph.what_segment(int(x) - 1) else 1 + for x in scipy.unique(img_region) + ] + ) img_results = relabel_map(img_region, mapping) - + return img_results.astype(scipy.bool_) diff --git a/medpy/graphcut/write.py b/medpy/graphcut/write.py index b80d5bc0..3658ab68 100644 --- a/medpy/graphcut/write.py +++ b/medpy/graphcut/write.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -24,11 +24,12 @@ # own modules + # code def graph_to_dimacs(g, f): """ Persists the supplied graph in valid dimacs format into the file. - + Parameters ---------- g : `~medpy.graphcut.graph.Graph` @@ -37,37 +38,39 @@ def graph_to_dimacs(g, f): A file-like object. """ # write comments - f.write('c Created by medpy\n') - f.write('c Oskar Maier, oskar.maier@googlemail.com\n') - f.write('c\n') - + f.write("c Created by medpy\n") + f.write("c Oskar Maier, oskar.maier@googlemail.com\n") + f.write("c\n") + # write problem - f.write('c problem line\n') - f.write('p max {} {}\n'.format(g.get_node_count() + 2, len(g.get_edges()))) # +2 as terminal nodes also count in dimacs format # no-nodes / no-edges - + f.write("c problem line\n") + f.write( + "p max {} {}\n".format(g.get_node_count() + 2, len(g.get_edges())) + ) # +2 as terminal nodes also count in dimacs format # no-nodes / no-edges + # denote source and sink - f.write('c source descriptor\n') - f.write('n 1 s\n') - f.write('c sink descriptor\n') - f.write('n 2 t\n') - + f.write("c source descriptor\n") + f.write("n 1 s\n") + f.write("c sink descriptor\n") + f.write("n 2 t\n") + # write terminal arcs (t-weights) - f.write('c terminal arcs (t-weights)\n') + f.write("c terminal arcs (t-weights)\n") for node, weight in list(g.get_tweights().items()): # Note: the nodes ids of the graph start from 1, but 1 and 2 are reserved for source and sink respectively, therefore add 2 - if not 0 == weight[0]: # 0 weights are implicit - f.write('a 1 {} {}\n'.format(node + 2, weight[0])) - if not 0 == weight[1]: # 0 weights are implicit - f.write('a {} 2 {}\n'.format(node + 2, weight[1])) - + if not 0 == weight[0]: # 0 weights are implicit + f.write("a 1 {} {}\n".format(node + 2, weight[0])) + if not 0 == weight[1]: # 0 weights are implicit + f.write("a {} 2 {}\n".format(node + 2, weight[1])) + # write inter-node arcs (n-weights) - f.write('c inter-node arcs (n-weights)\n') + f.write("c inter-node arcs (n-weights)\n") for edge, weight in list(g.get_nweights().items()): - if not 0 == weight[0]: # 0 weights are implicit - f.write('a {} {} {}\n'.format(edge[0] + 2, edge[1] + 2, weight[0])) + if not 0 == weight[0]: # 0 weights are implicit + f.write("a {} {} {}\n".format(edge[0] + 2, edge[1] + 2, weight[0])) # reversed weights have to follow directly in the next line - if not 0 == weight[1]: # 0 weights are implicit - f.write('a {} {} {}\n'.format(edge[1] + 2, edge[0] + 2, weight[1])) - + if not 0 == weight[1]: # 0 weights are implicit + f.write("a {} {} {}\n".format(edge[1] + 2, edge[0] + 2, weight[1])) + # end comment - f.write('c end-of-file') + f.write("c end-of-file") diff --git a/medpy/io/__init__.py b/medpy/io/__init__.py index cf3af731..43d6460f 100644 --- a/medpy/io/__init__.py +++ b/medpy/io/__init__.py @@ -13,7 +13,7 @@ .. module:: medpy.io.load .. autosummary:: :toctree: generated/ - + load Saving an image @@ -22,43 +22,37 @@ .. module:: medpy.io.save .. autosummary:: :toctree: generated/ - + save - + Reading / writing metadata (:mod:`medpy.io.header`) =================================================== - + .. module:: medpy.io.header .. autosummary:: :toctree: generated/ - + Header """ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . + # import all functions/methods/classes into the module -from .load import load -from .save import save -from .header import \ - Header, \ - get_voxel_spacing, get_pixel_spacing, get_offset, \ - set_voxel_spacing, set_pixel_spacing, set_offset, \ - copy_meta_data # import all sub-modules in the __all__ variable -__all__ = [s for s in dir() if not s.startswith('_')] +__all__ = [s for s in dir() if not s.startswith("_")] diff --git a/medpy/io/header.py b/medpy/io/header.py index c8905c14..d0e45d39 100644 --- a/medpy/io/header.py +++ b/medpy/io/header.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -27,6 +27,7 @@ # own modules from ..core import Logger + # code def get_voxel_spacing(hdr): r""" @@ -36,12 +37,12 @@ def get_voxel_spacing(hdr): ----- It is recommended to call `hdr.get_voxel_spacing()` instead of this function. - + Parameters ---------- hdr : medpy.io.Header An image header as returned by `load`. - + Returns ------- pixel_spacing : tuple of floats @@ -49,11 +50,16 @@ def get_voxel_spacing(hdr): """ return hdr.get_voxel_spacing() + def get_pixel_spacing(hdr): r"""Depreciated synonym of `~medpy.io.header.get_voxel_spacing`.""" - warnings.warn('get_pixel_spacing() is depreciated, use set_voxel_spacing() instead', category=DeprecationWarning) + warnings.warn( + "get_pixel_spacing() is depreciated, use set_voxel_spacing() instead", + category=DeprecationWarning, + ) return get_voxel_spacing(hdr) + def get_offset(hdr): r""" Extracts the image offset (akak origin) from an image header. @@ -66,12 +72,12 @@ def get_offset(hdr): the first pixel, which SimpleITK promises independent of the file format. Some formats do not specify a header field for the offset, thus zeros are returned. - + Parameters ---------- hdr : medpy.io.Header An image header as returned by `load`. - + Returns ------- offset : tuple of floats @@ -79,14 +85,15 @@ def get_offset(hdr): """ return hdr.get_offset() + def set_voxel_spacing(hdr, spacing): r""" Sets the voxel spacing in an image header. - + Notes ----- It is recommended to call `hdr.set_voxel_spacing()` instead - of this function. + of this function. Parameters ---------- @@ -97,19 +104,24 @@ def set_voxel_spacing(hdr, spacing): """ hdr.set_voxel_spacing(spacing) + def set_pixel_spacing(hdr, spacing): r"""Depreciated synonym of `~medpy.io.header.set_voxel_spacing`.""" - warnings.warn('get_pixel_spacing() is depreciated, use set_voxel_spacing() instead', category=DeprecationWarning) + warnings.warn( + "get_pixel_spacing() is depreciated, use set_voxel_spacing() instead", + category=DeprecationWarning, + ) set_voxel_spacing(hdr, spacing) - + + def set_offset(hdr, offset): r""" Sets the offset (aka origin) in the image header. - + Notes ----- It is recommended to call `hdr.set_offset()` instead - of this function. + of this function. The offset is based on the center of the first voxel. See also `get_offset` for more details. @@ -126,7 +138,7 @@ def set_offset(hdr, offset): def copy_meta_data(hdr_to, hdr_from): r""" Copy image meta data (voxel spacing and offset) from one header to another. - + Parameters ---------- hdr_to : object @@ -134,16 +146,23 @@ def copy_meta_data(hdr_to, hdr_from): hdr_from : object An image header as returned by `load`. """ - warnings.warn('copy_meta_data() is depreciated and may be removed in future versions', category=DeprecationWarning) + warnings.warn( + "copy_meta_data() is depreciated and may be removed in future versions", + category=DeprecationWarning, + ) logger = Logger.getInstance() try: set_pixel_spacing(hdr_to, get_pixel_spacing(hdr_from)) except AttributeError as e: - logger.warning('The voxel spacing could not be set correctly. Signaled error: {}'.format(e)) + logger.warning( + "The voxel spacing could not be set correctly. Signaled error: {}".format(e) + ) try: set_offset(hdr_to, get_offset(hdr_from)) except AttributeError as e: - logger.warning('The image offset could not be set correctly. Signaled error: {}'.format(e)) + logger.warning( + "The image offset could not be set correctly. Signaled error: {}".format(e) + ) class Header: @@ -152,7 +171,7 @@ class Header: Stores spacing, offset/origin, direction, and possibly further meta information. Provide at least one of the parameters. Missing information is extracted from - the ``sitkimage`` or, if not supplied, set to a default value. + the ``sitkimage`` or, if not supplied, set to a default value. Parameters ---------- @@ -171,11 +190,12 @@ class Header: """ def __init__(self, spacing=None, offset=None, direction=None, sitkimage=None): - assert \ - sitkimage is not None or \ - spacing is not None or \ - offset is not None or \ - direction is not None + assert ( + sitkimage is not None + or spacing is not None + or offset is not None + or direction is not None + ) # determin the image's ndim and default data types if direction is not None: @@ -189,15 +209,19 @@ def __init__(self, spacing=None, offset=None, direction=None, sitkimage=None): ndim = len(spacing) else: ndim = len(sitkimage.GetSpacing()) - + # set missing information to extracted or default values if spacing is None: - spacing = sitkimage.GetSpacing() if sitkimage is not None else (1.0, ) * ndim + spacing = sitkimage.GetSpacing() if sitkimage is not None else (1.0,) * ndim if offset is None: - offset = sitkimage.GetOrigin() if sitkimage is not None else (0.0, ) * ndim + offset = sitkimage.GetOrigin() if sitkimage is not None else (0.0,) * ndim if direction is None: - direction = np.asarray(sitkimage.GetDirection()).reshape(ndim, ndim) if sitkimage is not None else np.identity(ndim) - + direction = ( + np.asarray(sitkimage.GetDirection()).reshape(ndim, ndim) + if sitkimage is not None + else np.identity(ndim) + ) + # assert consistency assert len(spacing) == len(offset) assert direction.ndim == 2 @@ -234,13 +258,13 @@ def copy_to(self, sitkimage): ndim = len(sitkimage.GetSize()) spacing, offset, direction = self.get_info_consistent(ndim) - + sitkimage.SetSpacing(spacing) sitkimage.SetOrigin(offset) sitkimage.SetDirection(tuple(direction.flatten())) - + return sitkimage - + def get_info_consistent(self, ndim): """ Returns the main meta-data information adapted to the supplied @@ -253,7 +277,7 @@ def get_info_consistent(self, ndim): ---------- ndim : int image's dimensionality - + Returns ------- spacing : tuple of floats @@ -261,21 +285,23 @@ def get_info_consistent(self, ndim): direction : ndarray """ if ndim > len(self.spacing): - spacing = self.spacing + (1.0, ) * (ndim - len(self.spacing)) + spacing = self.spacing + (1.0,) * (ndim - len(self.spacing)) else: spacing = self.spacing[:ndim] if ndim > len(self.offset): - offset = self.offset + (0.0, ) * (ndim - len(self.offset)) + offset = self.offset + (0.0,) * (ndim - len(self.offset)) else: offset = self.offset[:ndim] if ndim > self.direction.shape[0]: direction = np.identity(ndim) - direction[:self.direction.shape[0], :self.direction.shape[0]] = self.direction + direction[ + : self.direction.shape[0], : self.direction.shape[0] + ] = self.direction else: direction = self.direction[:ndim, :ndim] - + return spacing, offset, direction def set_voxel_spacing(self, spacing): @@ -287,9 +313,9 @@ def set_voxel_spacing(self, spacing): spacing : tuple of floats the new image voxel spacing take care that image and spacing dimensionalities match - """ + """ self.spacing = tuple(spacing) - + def set_offset(self, offset): """ Set image's offset. @@ -314,18 +340,18 @@ def set_direction(self, direction): default to the identity matrix """ self.direction = np.asarray(direction) - + def get_voxel_spacing(self): """ Get image's spacing. - + Returns ------- spacing : tuple of floats the image's spacing """ return self.spacing - + def get_offset(self): """ Get image's offset. @@ -347,12 +373,12 @@ def get_direction(self): the image's direction / affine transformation matrix of square shape """ - return self.direction + return self.direction def get_sitkimage(self): """ Get underlying sitk Image object. - + Returns ------- image-object : sitk.Image or None diff --git a/medpy/io/load.py b/medpy/io/load.py index 78105b5a..38c71a4f 100644 --- a/medpy/io/load.py +++ b/medpy/io/load.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -25,24 +25,25 @@ import numpy as np import SimpleITK as sitk +from ..core import ImageLoadingError, Logger + # own modules from .header import Header -from ..core import Logger -from ..core import ImageLoadingError + # code def load(image): r""" Loads the ``image`` and returns a ndarray with the image's pixel content as well as a header object. - + The header can, with restrictions, be used to extract additional meta-information about the image (e.g. using the methods in `~medpy.io.Header`). Additionally it serves as meta-data container that can be passes to `~medpy.io.save.save` when the altered image is saved to the hard drive again. Note that the transfer of meta-data is only possible, and even then not guaranteed, when the source and target image formats are the same. - + MedPy relies on SimpleITK, which enables the power of ITK for image loading and saving. The supported image file formats should include at least the following. @@ -53,7 +54,7 @@ def load(image): - Analyze (plain, SPM99, SPM2) (.hdr/.img, .img.gz) - Digital Imaging and Communications in Medicine (DICOM) (.dcm, .dicom) - Digital Imaging and Communications in Medicine (DICOM) series (/) - - Nearly Raw Raster Data (Nrrd) (.nrrd, .nhdr) + - Nearly Raw Raster Data (Nrrd) (.nrrd, .nhdr) - Medical Imaging NetCDF (MINC) (.mnc, .MNC) - Guys Image Processing Lab (GIPL) (.gipl, .gipl.gz) @@ -69,49 +70,53 @@ def load(image): - VTK images (.vtk) Other formats: - + - Portable Network Graphics (PNG) (.png, .PNG) - Joint Photographic Experts Group (JPEG) (.jpg, .JPG, .jpeg, .JPEG) - Tagged Image File Format (TIFF) (.tif, .TIF, .tiff, .TIFF) - Windows bitmap (.bmp, .BMP) - Hierarchical Data Format (HDF5) (.h5 , .hdf5 , .he5) - MSX-DOS Screen-x (.ge4, .ge5) - + For informations about which image formats, dimensionalities and pixel data types your current configuration supports, run `python3 tests/support.py > myformats.log`. Further information see https://simpleitk.readthedocs.io . - + Parameters ---------- image : string Path to the image to load. - + Returns ------- image_data : ndarray The image data as numpy array with order `x,y,z,c`. image_header : Header The image metadata as :mod:`medpy.io.Header`. - + Raises ------ ImageLoadingError If the image could not be loaded due to some reason. """ logger = Logger.getInstance() - logger.info('Loading image {}...'.format(image)) + logger.info("Loading image {}...".format(image)) if not os.path.exists(image): - raise ImageLoadingError('The supplied image {} does not exist.'.format(image)) + raise ImageLoadingError("The supplied image {} does not exist.".format(image)) if os.path.isdir(image): # !TODO: this does not load the meta-data, find a way to load it from a series, too - logger.info('Loading image as DICOM series. If more than one found in folder {} defaulting to first.'.format(image)) + logger.info( + "Loading image as DICOM series. If more than one found in folder {} defaulting to first.".format( + image + ) + ) sitkimage = sitk.ReadImage(sitk.ImageSeriesReader_GetGDCMSeriesFileNames(image)) else: sitkimage = sitk.ReadImage(image) - + # Make image array data and header header = Header(sitkimage=sitkimage) image = sitk.GetArrayFromImage(sitkimage) diff --git a/medpy/io/save.py b/medpy/io/save.py index 922c2a8f..a6827cc6 100644 --- a/medpy/io/save.py +++ b/medpy/io/save.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -26,33 +26,33 @@ import SimpleITK as sitk # own modules -from ..core import Logger -from ..core import ImageSavingError +from ..core import ImageSavingError, Logger + # code -def save(arr, filename, hdr = False, force = True, use_compression = False): +def save(arr, filename, hdr=False, force=True, use_compression=False): r""" Save the image ``arr`` as filename using information encoded in ``hdr``. The target image format is determined by the ``filename`` suffix. If the ``force`` parameter is set to true, an already existing image is overwritten silently. Otherwise an error is thrown. - + The header (``hdr``) object is the one returned by `~medpy.io.load.load` and is used opportunistically, possibly loosing some meta-information. - + Generally this function does not guarantee, that metadata other than the image shape and pixel data type are kept. - + MedPy relies on SimpleITK, which enables the power of ITK for image loading and saving. The supported image file formats should include at least the following. Medical formats: - + - ITK MetaImage (.mha/.raw, .mhd) - Neuroimaging Informatics Technology Initiative (NIfTI) (.nia, .nii, .nii.gz, .hdr, .img, .img.gz) - Analyze (plain, SPM99, SPM2) (.hdr/.img, .img.gz) - Digital Imaging and Communications in Medicine (DICOM) (.dcm, .dicom) - Digital Imaging and Communications in Medicine (DICOM) series (/) - - Nearly Raw Raster Data (Nrrd) (.nrrd, .nhdr) + - Nearly Raw Raster Data (Nrrd) (.nrrd, .nhdr) - Medical Imaging NetCDF (MINC) (.mnc, .MNC) - Guys Image Processing Lab (GIPL) (.gipl, .gipl.gz) @@ -75,12 +75,12 @@ def save(arr, filename, hdr = False, force = True, use_compression = False): - Windows bitmap (.bmp, .BMP) - Hierarchical Data Format (HDF5) (.h5 , .hdf5 , .he5) - MSX-DOS Screen-x (.ge4, .ge5) - + For informations about which image formats, dimensionalities and pixel data types your current configuration supports, run `python3 tests/support.py > myformats.log`. - + Further information see https://simpleitk.readthedocs.io . - + Parameters ---------- arr : array_like @@ -93,28 +93,28 @@ def save(arr, filename, hdr = False, force = True, use_compression = False): Set to True to overwrite already exiting image silently. use_compression : bool Use data compression of the target format supports it. - + Raises ------ ImageSavingError If the image could not be saved due to various reasons """ logger = Logger.getInstance() - logger.info('Saving image as {}...'.format(filename)) - + logger.info("Saving image as {}...".format(filename)) + # Check image file existance if not force and os.path.exists(filename): - raise ImageSavingError('The target file {} already exists.'.format(filename)) - + raise ImageSavingError("The target file {} already exists.".format(filename)) + # Roll axes from x,y,z,c to z,y,x,c if arr.ndim == 4: arr = np.moveaxis(arr, -1, 0) arr = arr.T sitkimage = sitk.GetImageFromArray(arr) - + # Copy met-data as far as possible if hdr: hdr.copy_to(sitkimage) - + sitk.WriteImage(sitkimage, filename, use_compression) diff --git a/medpy/iterators/__init__.py b/medpy/iterators/__init__.py index 34a8daa3..44743294 100644 --- a/medpy/iterators/__init__.py +++ b/medpy/iterators/__init__.py @@ -5,7 +5,7 @@ .. currentmodule:: medpy.iterators This package contains iterators for images. - + Patch-wise :mod:`medpy.iterators.patchwise` =========================================== Iterators to extract patches from images. @@ -13,33 +13,30 @@ .. module:: medpy.iterators.patchwise .. autosummary:: :toctree: generated/ - + SlidingWindowIterator CentredPatchIterator CentredPatchIteratorOverlapping - - + + """ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # import all functions/methods/classes into the module -from .patchwise import CentredPatchIterator, CentredPatchIteratorOverlapping, SlidingWindowIterator # import all sub-modules in the __all__ variable -__all__ = [s for s in dir() if not s.startswith('_')] - - +__all__ = [s for s in dir() if not s.startswith("_")] diff --git a/medpy/iterators/patchwise.py b/medpy/iterators/patchwise.py index 33b6d9f5..78cb1f52 100644 --- a/medpy/iterators/patchwise.py +++ b/medpy/iterators/patchwise.py @@ -20,18 +20,19 @@ # build-in modules from itertools import product +from operator import itemgetter # third-party modules import numpy -from operator import itemgetter from scipy.ndimage import find_objects # own modules # constants + # code -class SlidingWindowIterator(): +class SlidingWindowIterator: r""" Moves a sliding window over the array, where the first patch is places centered on the top-left voxel and outside-of-image values filled with `cval`. The returned @@ -56,7 +57,7 @@ class SlidingWindowIterator(): Value to fill undefined positions. """ - def __init__(self, array, psize, cval = 0): + def __init__(self, array, psize, cval=0): # process arguments self.array = numpy.asarray(array) if is_integer(psize): @@ -67,18 +68,24 @@ def __init__(self, array, psize, cval = 0): # validate if numpy.any([x <= 0 for x in self.psize]): - raise ValueError('The patch size must be at least 1 in any dimension.') + raise ValueError("The patch size must be at least 1 in any dimension.") elif len(self.psize) != self.array.ndim: - raise ValueError('The patch dimensionality must equal the array dimensionality.') + raise ValueError( + "The patch dimensionality must equal the array dimensionality." + ) # compute required padding as pairs - self.padding = [(p / 2, p / 2 - (p-1) % 2) for p in self.psize] + self.padding = [(p / 2, p / 2 - (p - 1) % 2) for p in self.psize] # pad array - self.array = numpy.pad(self.array, self.padding, mode='constant', constant_values=self.cval) + self.array = numpy.pad( + self.array, self.padding, mode="constant", constant_values=self.cval + ) # initialize slicers - slicepoints = [list(range(0, s - p + 1)) for s, p in zip(self.array.shape, self.psize)] + slicepoints = [ + list(range(0, s - p + 1)) for s, p in zip(self.array.shape, self.psize) + ] self.__slicepointiter = product(*slicepoints) def __iter__(self): @@ -98,14 +105,18 @@ def __next__(self): List of slicers to apply the same operation to another array (using applyslicer()). """ # trigger internal iterators - spointset = next(self.__slicepointiter) # will raise StopIteration when empty + spointset = next(self.__slicepointiter) # will raise StopIteration when empty # compute slicer object slicer = [] padder = [] for dim, sp in enumerate(spointset): - slicer.append( slice(sp, sp + self.psize[dim]) ) - padder.append( (max(0, -1 * (sp - self.padding[dim][0])), - max(0, (sp + self.psize[dim]) - (self.array.shape[dim] -1) )) ) + slicer.append(slice(sp, sp + self.psize[dim])) + padder.append( + ( + max(0, -1 * (sp - self.padding[dim][0])), + max(0, (sp + self.psize[dim]) - (self.array.shape[dim] - 1)), + ) + ) # create patch and patch mask def_slicer = [slice(x, None if 0 == y else -1 * y) for x, y in padder] @@ -118,7 +129,7 @@ def __next__(self): next = __next__ - def applyslicer(self, array, slicer, cval = None): + def applyslicer(self, array, slicer, cval=None): r""" Apply a slicer returned by the iterator to a new array of the same dimensionality as the one used to initialize the iterator. @@ -145,12 +156,12 @@ def applyslicer(self, array, slicer, cval = None): if cval is None: cval = self.cval _padding = self.padding + [(0, 0)] * (array.ndim - len(self.padding)) - array = numpy.pad(array, _padding, mode='constant', constant_values=cval) - _psize = self.psize + list(array.shape[len(self.psize):]) + array = numpy.pad(array, _padding, mode="constant", constant_values=cval) + _psize = self.psize + list(array.shape[len(self.psize) :]) return array[slicer].reshape(_psize) -class CentredPatchIterator(): +class CentredPatchIterator: r""" Iterated patch-wise over the array, where the central patch is centred on the image centre. @@ -248,7 +259,7 @@ class CentredPatchIterator(): """ - def __init__(self, array, psize, cval = 0): + def __init__(self, array, psize, cval=0): # process arguments self.array = numpy.asarray(array) if is_integer(psize): @@ -259,22 +270,37 @@ def __init__(self, array, psize, cval = 0): # validate if numpy.any([x <= 0 for x in self.psize]): - raise ValueError('The patch size must be at least 1 in any dimension.') + raise ValueError("The patch size must be at least 1 in any dimension.") elif len(self.psize) != self.array.ndim: - raise ValueError('The patch dimensionality must equal the array dimensionality.') + raise ValueError( + "The patch dimensionality must equal the array dimensionality." + ) elif numpy.any([x > y for x, y in zip(self.psize, self.array.shape)]): - raise ValueError('The patch is not allowed to be larger than the array in any dimension.') + raise ValueError( + "The patch is not allowed to be larger than the array in any dimension." + ) # compute required padding - even_even_correction = [(1 - s%2) * (1 - ps%2) for s, ps in zip(self.array.shape, self.psize)] - array_centre = [s/2 - (1 - s%2) for s in self.array.shape] - remainder = [(c - ps/2 + ee, - s - c - (ps+1)/2 - ee) for c, s, ps, ee in zip(array_centre, self.array.shape, self.psize, even_even_correction)] - padding = [((ps - l % ps) % ps, - (ps - r % ps) % ps) for (l, r), ps in zip(remainder, self.psize)] + even_even_correction = [ + (1 - s % 2) * (1 - ps % 2) for s, ps in zip(self.array.shape, self.psize) + ] + array_centre = [s / 2 - (1 - s % 2) for s in self.array.shape] + remainder = [ + (c - ps / 2 + ee, s - c - (ps + 1) / 2 - ee) + for c, s, ps, ee in zip( + array_centre, self.array.shape, self.psize, even_even_correction + ) + ] + padding = [ + ((ps - l % ps) % ps, (ps - r % ps) % ps) + for (l, r), ps in zip(remainder, self.psize) + ] # determine slice-points for each dimension and initialize internal slice-point iterator - slicepoints = [list(range(-l, s + r, ps)) for s, ps, (l, r) in zip(self.array.shape, self.psize, padding)] + slicepoints = [ + list(range(-l, s + r, ps)) + for s, ps, (l, r) in zip(self.array.shape, self.psize, padding) + ] self.__slicepointiter = product(*slicepoints) # initialize internal grid-id iterator @@ -299,25 +325,35 @@ def __next__(self): A list of `slice()` instances definind the patch. """ # trigger internal iterators - spointset = next(self.__slicepointiter) # will raise StopIteration when empty + spointset = next(self.__slicepointiter) # will raise StopIteration when empty gridid = next(self.__grididiter) # compute slicer object and padder tuples slicer = [] padder = [] for dim, sp in enumerate(spointset): - slicer.append( slice(max(0, sp), - min(sp + self.psize[dim], self.array.shape[dim])) ) - padder.append( (max(0, -1 * sp), max(0, sp + self.psize[dim] - self.array.shape[dim])) ) + slicer.append( + slice(max(0, sp), min(sp + self.psize[dim], self.array.shape[dim])) + ) + padder.append( + (max(0, -1 * sp), max(0, sp + self.psize[dim] - self.array.shape[dim])) + ) # create patch and patch mask - patch = numpy.pad(self.array[slicer], padder, mode='constant', constant_values=self.cval) - pmask = numpy.pad(numpy.ones(self.array[slicer].shape, dtype=numpy.bool_), padder, mode='constant', constant_values=0) + patch = numpy.pad( + self.array[slicer], padder, mode="constant", constant_values=self.cval + ) + pmask = numpy.pad( + numpy.ones(self.array[slicer].shape, dtype=numpy.bool_), + padder, + mode="constant", + constant_values=0, + ) return patch, pmask, gridid, slicer next = __next__ @staticmethod - def applyslicer(array, slicer, pmask, cval = 0): + def applyslicer(array, slicer, pmask, cval=0): r""" Apply a slicer returned by the iterator to a new array of the same dimensionality as the one used to initialize the iterator. @@ -352,9 +388,12 @@ def applyslicer(array, slicer, pmask, cval = 0): """ l = len(slicer) patch = numpy.zeros(list(pmask.shape[:l]) + list(array.shape[l:]), array.dtype) - if not 0 == cval: patch.fill(cval) + if not 0 == cval: + patch.fill(cval) sliced = array[slicer] - patch[pmask] = sliced.reshape([numpy.prod(sliced.shape[:l])] + list(sliced.shape[l:])) + patch[pmask] = sliced.reshape( + [numpy.prod(sliced.shape[:l])] + list(sliced.shape[l:]) + ) return patch @staticmethod @@ -411,15 +450,26 @@ def assembleimage(patches, pmasks, gridids): gridids = [] pmasks = [] for groupid, group in list(groups.items()): - patches.append(numpy.concatenate([p for p, _, _ in sorted(group, key=itemgetter(2))], d)) - pmasks.append(numpy.concatenate([m for _, m, _ in sorted(group, key=itemgetter(2))], d)) + patches.append( + numpy.concatenate( + [p for p, _, _ in sorted(group, key=itemgetter(2))], d + ) + ) + pmasks.append( + numpy.concatenate( + [m for _, m, _ in sorted(group, key=itemgetter(2))], d + ) + ) gridids.append(groupid) objs = find_objects(pmasks[0]) if not 1 == len(objs): - raise ValueError('The assembled patch masks contain more than one binary object.') + raise ValueError( + "The assembled patch masks contain more than one binary object." + ) return patches[0][objs[0]] -class CentredPatchIteratorOverlapping(): + +class CentredPatchIteratorOverlapping: r""" Iterated patch-wise over the array, where the central patch is centred on the image centre. @@ -519,7 +569,7 @@ class CentredPatchIteratorOverlapping(): """ - def __init__(self, array, psize, offset=None, cval = 0): + def __init__(self, array, psize, offset=None, cval=0): # process arguments self.array = numpy.asarray(array) if is_integer(psize): @@ -536,22 +586,37 @@ def __init__(self, array, psize, offset=None, cval = 0): # validate if numpy.any([x <= 0 for x in self.psize]): - raise ValueError('The patch size must be at least 1 in any dimension.') + raise ValueError("The patch size must be at least 1 in any dimension.") elif len(self.psize) != self.array.ndim: - raise ValueError('The patch dimensionality must equal the array dimensionality.') + raise ValueError( + "The patch dimensionality must equal the array dimensionality." + ) elif numpy.any([x > y for x, y in zip(self.psize, self.array.shape)]): - raise ValueError('The patch is not allowed to be larger than the array in any dimension.') + raise ValueError( + "The patch is not allowed to be larger than the array in any dimension." + ) # compute required padding - even_even_correction = [(1 - s%2) * (1 - ps%2) for s, ps in zip(self.array.shape, self.psize)] - array_centre = [s/2 - (1 - s%2) for s in self.array.shape] - remainder = [(c - ps/2 + ee, - s - c - (ps+1)/2 - ee) for c, s, ps, ee in zip(array_centre, self.array.shape, self.psize, even_even_correction)] - padding = [((ps - l % ps) % ps, - (ps - r % ps) % ps) for (l, r), ps in zip(remainder, self.psize)] + even_even_correction = [ + (1 - s % 2) * (1 - ps % 2) for s, ps in zip(self.array.shape, self.psize) + ] + array_centre = [s / 2 - (1 - s % 2) for s in self.array.shape] + remainder = [ + (c - ps / 2 + ee, s - c - (ps + 1) / 2 - ee) + for c, s, ps, ee in zip( + array_centre, self.array.shape, self.psize, even_even_correction + ) + ] + padding = [ + ((ps - l % ps) % ps, (ps - r % ps) % ps) + for (l, r), ps in zip(remainder, self.psize) + ] # determine slice-points for each dimension and initialize internal slice-point iterator - slicepoints = [list(range(-l, s + r, os)) for s, os, (l, r) in zip(self.array.shape, offset, padding)] + slicepoints = [ + list(range(-l, s + r, os)) + for s, os, (l, r) in zip(self.array.shape, offset, padding) + ] self.__slicepointiter = product(*slicepoints) # initialize internal grid-id iterator @@ -576,25 +641,35 @@ def __next__(self): A list of `slice()` instances definind the patch. """ # trigger internal iterators - spointset = next(self.__slicepointiter) # will raise StopIteration when empty + spointset = next(self.__slicepointiter) # will raise StopIteration when empty gridid = next(self.__grididiter) # compute slicer object and padder tuples slicer = [] padder = [] for dim, sp in enumerate(spointset): - slicer.append( slice(max(0, sp), - min(sp + self.psize[dim], self.array.shape[dim])) ) - padder.append( (max(0, -1 * sp), max(0, sp + self.psize[dim] - self.array.shape[dim])) ) + slicer.append( + slice(max(0, sp), min(sp + self.psize[dim], self.array.shape[dim])) + ) + padder.append( + (max(0, -1 * sp), max(0, sp + self.psize[dim] - self.array.shape[dim])) + ) # create patch and patch mask - patch = numpy.pad(self.array[slicer], padder, mode='constant', constant_values=self.cval) - pmask = numpy.pad(numpy.ones(self.array[slicer].shape, dtype=numpy.bool_), padder, mode='constant', constant_values=0) + patch = numpy.pad( + self.array[slicer], padder, mode="constant", constant_values=self.cval + ) + pmask = numpy.pad( + numpy.ones(self.array[slicer].shape, dtype=numpy.bool_), + padder, + mode="constant", + constant_values=0, + ) return patch, pmask, gridid, slicer next = __next__ @staticmethod - def applyslicer(array, slicer, pmask, cval = 0): + def applyslicer(array, slicer, pmask, cval=0): r""" Apply a slicer returned by the iterator to a new array of the same dimensionality as the one used to initialize the iterator. @@ -629,9 +704,12 @@ def applyslicer(array, slicer, pmask, cval = 0): """ l = len(slicer) patch = numpy.zeros(list(pmask.shape[:l]) + list(array.shape[l:]), array.dtype) - if not 0 == cval: patch.fill(cval) + if not 0 == cval: + patch.fill(cval) sliced = array[slicer] - patch[pmask] = sliced.reshape([numpy.prod(sliced.shape[:l])] + list(sliced.shape[l:])) + patch[pmask] = sliced.reshape( + [numpy.prod(sliced.shape[:l])] + list(sliced.shape[l:]) + ) return patch @staticmethod @@ -692,12 +770,22 @@ def assembleimage(patches, pmasks, gridids): gridids = [] pmasks = [] for groupid, group in list(groups.items()): - patches.append(numpy.concatenate([p for p, _, _ in sorted(group, key=itemgetter(2))], d)) - pmasks.append(numpy.concatenate([m for _, m, _ in sorted(group, key=itemgetter(2))], d)) + patches.append( + numpy.concatenate( + [p for p, _, _ in sorted(group, key=itemgetter(2))], d + ) + ) + pmasks.append( + numpy.concatenate( + [m for _, m, _ in sorted(group, key=itemgetter(2))], d + ) + ) gridids.append(groupid) objs = find_objects(pmasks[0]) if not 1 == len(objs): - raise ValueError('The assembled patch masks contain more than one binary object.') + raise ValueError( + "The assembled patch masks contain more than one binary object." + ) return patches[0][objs[0]] diff --git a/medpy/metric/__init__.py b/medpy/metric/__init__.py index 451b0adb..f1b62921 100644 --- a/medpy/metric/__init__.py +++ b/medpy/metric/__init__.py @@ -14,12 +14,12 @@ Compare two binary objects ************************** - + .. module:: medpy.metric.binary .. autosummary:: :toctree: generated/ - + dc jc hd @@ -33,45 +33,45 @@ true_negative_rate positive_predictive_value ravd - + Compare two sets of binary objects ********************************** .. autosummary:: :toctree: generated/ - + obj_tpr obj_fpr obj_asd obj_assd - + Compare to sequences of binary objects ************************************** .. autosummary:: :toctree: generated/ - + volume_correlation volume_change_correlation - + Image metrics (:mod:`medpy.metric.image`) ========================================= Some more image metrics (e.g. `~medpy.filter.image.sls` and `~medpy.filter.image.ssd`) -can be found in :mod:`medpy.filter.image`. +can be found in :mod:`medpy.filter.image`. .. module:: medpy.metric.image .. autosummary:: :toctree: generated/ - + mutual_information - + Histogram metrics (:mod:`medpy.metric.histogram`) ================================================= .. module:: medpy.metric.histogram .. autosummary:: :toctree: generated/ - + chebyshev chebyshev_neg chi_square @@ -101,29 +101,21 @@ """ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # import all functions/methods/classes into the module -from .binary import asd, assd, dc, hd, jc, positive_predictive_value, precision, ravd, recall, sensitivity, specificity, true_negative_rate, true_positive_rate, hd95 -from .binary import obj_asd, obj_assd, obj_fpr, obj_tpr -from .binary import volume_change_correlation, volume_correlation -from .histogram import chebyshev, chebyshev_neg, chi_square, correlate, correlate_1, cosine,\ - cosine_1, cosine_2, cosine_alt, euclidean, fidelity_based, histogram_intersection,\ - histogram_intersection_1, jensen_shannon, kullback_leibler, manhattan, minowski, noelle_1,\ - noelle_2, noelle_3, noelle_4, noelle_5, quadratic_forms, relative_bin_deviation, relative_deviation -from .image import mutual_information # import all sub-modules in the __all__ variable -__all__ = [s for s in dir() if not s.startswith('_')] +__all__ = [s for s in dir() if not s.startswith("_")] diff --git a/medpy/metric/binary.py b/medpy/metric/binary.py index 9906d70a..fbab502d 100644 --- a/medpy/metric/binary.py +++ b/medpy/metric/binary.py @@ -22,14 +22,19 @@ # third-party modules import numpy -from scipy.ndimage import _ni_support -from scipy.ndimage import distance_transform_edt, binary_erosion,\ - generate_binary_structure -from scipy.ndimage import label, find_objects +from scipy.ndimage import ( + _ni_support, + binary_erosion, + distance_transform_edt, + find_objects, + generate_binary_structure, + label, +) from scipy.stats import pearsonr # own modules + # code def dc(result, reference): r""" @@ -74,12 +79,13 @@ def dc(result, reference): size_i2 = numpy.count_nonzero(reference) try: - dc = 2. * intersection / float(size_i1 + size_i2) + dc = 2.0 * intersection / float(size_i1 + size_i2) except ZeroDivisionError: dc = 1.0 return dc + def jc(result, reference): """ Jaccard coefficient @@ -118,6 +124,7 @@ def jc(result, reference): return jc + def precision(result, reference): """ Precison. @@ -165,6 +172,7 @@ def precision(result, reference): return precision + def recall(result, reference): """ Recall. @@ -212,6 +220,7 @@ def recall(result, reference): return recall + def sensitivity(result, reference): """ Sensitivity. @@ -223,6 +232,7 @@ def sensitivity(result, reference): """ return recall(result, reference) + def specificity(result, reference): """ Specificity. @@ -270,6 +280,7 @@ def specificity(result, reference): return specificity + def true_negative_rate(result, reference): """ True negative rate. @@ -282,6 +293,7 @@ def true_negative_rate(result, reference): """ return specificity(result, reference) + def true_positive_rate(result, reference): """ True positive rate. @@ -294,6 +306,7 @@ def true_positive_rate(result, reference): """ return recall(result, reference) + def positive_predictive_value(result, reference): """ Positive predictive value. @@ -306,6 +319,7 @@ def positive_predictive_value(result, reference): """ return precision(result, reference) + def hd(result, reference, voxelspacing=None, connectivity=1): """ Hausdorff Distance. @@ -453,9 +467,15 @@ def assd(result, reference, voxelspacing=None, connectivity=1): and then averaging the two lists. The binary images can therefore be supplied in any order. """ - assd = numpy.mean( (__surface_distances(result, reference, voxelspacing, connectivity), __surface_distances(reference, result, voxelspacing, connectivity)) ) + assd = numpy.mean( + ( + __surface_distances(result, reference, voxelspacing, connectivity), + __surface_distances(reference, result, voxelspacing, connectivity), + ) + ) return assd + def asd(result, reference, voxelspacing=None, connectivity=1): """ Average surface distance metric. @@ -565,6 +585,7 @@ def asd(result, reference, voxelspacing=None, connectivity=1): asd = sds.mean() return asd + def ravd(result, reference): """ Relative absolute volume difference. @@ -648,10 +669,13 @@ def ravd(result, reference): vol2 = numpy.count_nonzero(reference) if 0 == vol2: - raise RuntimeError('The second supplied array does not contain any binary object.') + raise RuntimeError( + "The second supplied array does not contain any binary object." + ) return (vol1 - vol2) / float(vol2) + def volume_correlation(results, references): r""" Volume correlation. @@ -684,7 +708,8 @@ def volume_correlation(results, references): results_volumes = [numpy.count_nonzero(r) for r in results] references_volumes = [numpy.count_nonzero(r) for r in references] - return pearsonr(results_volumes, references_volumes) # returns (Pearson' + return pearsonr(results_volumes, references_volumes) # returns (Pearson' + def volume_change_correlation(results, references): r""" @@ -721,7 +746,10 @@ def volume_change_correlation(results, references): results_volumes_changes = results_volumes[1:] - results_volumes[:-1] references_volumes_changes = references_volumes[1:] - references_volumes[:-1] - return pearsonr(results_volumes_changes, references_volumes_changes) # returns (Pearson's correlation coefficient, 2-tailed p-value) + return pearsonr( + results_volumes_changes, references_volumes_changes + ) # returns (Pearson's correlation coefficient, 2-tailed p-value) + def obj_assd(result, reference, voxelspacing=None, connectivity=1): """ @@ -774,9 +802,15 @@ def obj_assd(result, reference, voxelspacing=None, connectivity=1): and then averaging the two lists. The binary images can therefore be supplied in any order. """ - assd = numpy.mean( (__obj_surface_distances(result, reference, voxelspacing, connectivity), __obj_surface_distances(reference, result, voxelspacing, connectivity)) ) + assd = numpy.mean( + ( + __obj_surface_distances(result, reference, voxelspacing, connectivity), + __obj_surface_distances(reference, result, voxelspacing, connectivity), + ) + ) return assd + def obj_asd(result, reference, voxelspacing=None, connectivity=1): """ Average surface distance between objects. @@ -911,6 +945,7 @@ def obj_asd(result, reference, voxelspacing=None, connectivity=1): asd = numpy.mean(sds) return asd + def obj_fpr(result, reference, connectivity=1): """ The false positive rate of distinct binary object detection. @@ -1019,9 +1054,12 @@ def obj_fpr(result, reference, connectivity=1): >>> obj_fpr(arr2, arr1) 0.2 """ - _, _, _, n_obj_reference, mapping = __distinct_binary_object_correspondences(reference, result, connectivity) + _, _, _, n_obj_reference, mapping = __distinct_binary_object_correspondences( + reference, result, connectivity + ) return (n_obj_reference - len(mapping)) / float(n_obj_reference) + def obj_tpr(result, reference, connectivity=1): """ The true positive rate of distinct binary object detection. @@ -1129,9 +1167,12 @@ def obj_tpr(result, reference, connectivity=1): >>> obj_tpr(arr2, arr1) 1.0 """ - _, _, n_obj_result, _, mapping = __distinct_binary_object_correspondences(reference, result, connectivity) + _, _, n_obj_result, _, mapping = __distinct_binary_object_correspondences( + reference, result, connectivity + ) return len(mapping) / float(n_obj_result) + def __distinct_binary_object_correspondences(reference, result, connectivity=1): """ Determines all distinct (where connectivity is defined by the connectivity parameter @@ -1155,37 +1196,54 @@ def __distinct_binary_object_correspondences(reference, result, connectivity=1): labelmap2, n_obj_reference = label(reference, footprint) # find all overlaps from labelmap2 to labelmap1; collect one-to-one relationships and store all one-two-many for later processing - slicers = find_objects(labelmap2) # get windows of labelled objects - mapping = dict() # mappings from labels in labelmap2 to corresponding object labels in labelmap1 - used_labels = set() # set to collect all already used labels from labelmap2 - one_to_many = list() # list to collect all one-to-many mappings - for l1id, slicer in enumerate(slicers): # iterate over object in labelmap2 and their windows - l1id += 1 # labelled objects have ids sarting from 1 - bobj = (l1id) == labelmap2[slicer] # find binary object corresponding to the label1 id in the segmentation - l2ids = numpy.unique(labelmap1[slicer][bobj]) # extract all unique object identifiers at the corresponding positions in the reference (i.e. the mapping) - l2ids = l2ids[0 != l2ids] # remove background identifiers (=0) - if 1 == len(l2ids): # one-to-one mapping: if target label not already used, add to final list of object-to-object mappings and mark target label as used + slicers = find_objects(labelmap2) # get windows of labelled objects + mapping = ( + dict() + ) # mappings from labels in labelmap2 to corresponding object labels in labelmap1 + used_labels = set() # set to collect all already used labels from labelmap2 + one_to_many = list() # list to collect all one-to-many mappings + for l1id, slicer in enumerate( + slicers + ): # iterate over object in labelmap2 and their windows + l1id += 1 # labelled objects have ids sarting from 1 + bobj = (l1id) == labelmap2[ + slicer + ] # find binary object corresponding to the label1 id in the segmentation + l2ids = numpy.unique( + labelmap1[slicer][bobj] + ) # extract all unique object identifiers at the corresponding positions in the reference (i.e. the mapping) + l2ids = l2ids[0 != l2ids] # remove background identifiers (=0) + if 1 == len( + l2ids + ): # one-to-one mapping: if target label not already used, add to final list of object-to-object mappings and mark target label as used l2id = l2ids[0] if not l2id in used_labels: mapping[l1id] = l2id used_labels.add(l2id) - elif 1 < len(l2ids): # one-to-many mapping: store relationship for later processing + elif 1 < len( + l2ids + ): # one-to-many mapping: store relationship for later processing one_to_many.append((l1id, set(l2ids))) # process one-to-many mappings, always choosing the one with the least labelmap2 correspondences first while True: - one_to_many = [(l1id, l2ids - used_labels) for l1id, l2ids in one_to_many] # remove already used ids from all sets - one_to_many = [x for x in one_to_many if x[1]] # remove empty sets - one_to_many = sorted(one_to_many, key=lambda x: len(x[1])) # sort by set length + one_to_many = [ + (l1id, l2ids - used_labels) for l1id, l2ids in one_to_many + ] # remove already used ids from all sets + one_to_many = [x for x in one_to_many if x[1]] # remove empty sets + one_to_many = sorted(one_to_many, key=lambda x: len(x[1])) # sort by set length if 0 == len(one_to_many): break - l2id = one_to_many[0][1].pop() # select an arbitrary target label id from the shortest set - mapping[one_to_many[0][0]] = l2id # add to one-to-one mappings - used_labels.add(l2id) # mark target label as used - one_to_many = one_to_many[1:] # delete the processed set from all sets + l2id = one_to_many[0][ + 1 + ].pop() # select an arbitrary target label id from the shortest set + mapping[one_to_many[0][0]] = l2id # add to one-to-one mappings + used_labels.add(l2id) # mark target label as used + one_to_many = one_to_many[1:] # delete the processed set from all sets return labelmap1, labelmap2, n_obj_result, n_obj_reference, mapping + def __surface_distances(result, reference, voxelspacing=None, connectivity=1): """ The distances between the surface voxel of binary objects in result and their @@ -1204,13 +1262,19 @@ def __surface_distances(result, reference, voxelspacing=None, connectivity=1): # test for emptiness if 0 == numpy.count_nonzero(result): - raise RuntimeError('The first supplied array does not contain any binary object.') + raise RuntimeError( + "The first supplied array does not contain any binary object." + ) if 0 == numpy.count_nonzero(reference): - raise RuntimeError('The second supplied array does not contain any binary object.') + raise RuntimeError( + "The second supplied array does not contain any binary object." + ) # extract only 1-pixel border line of objects result_border = result ^ binary_erosion(result, structure=footprint, iterations=1) - reference_border = reference ^ binary_erosion(reference, structure=footprint, iterations=1) + reference_border = reference ^ binary_erosion( + reference, structure=footprint, iterations=1 + ) # compute average surface distance # Note: scipys distance transform is calculated only inside the borders of the @@ -1220,13 +1284,16 @@ def __surface_distances(result, reference, voxelspacing=None, connectivity=1): return sds + def __obj_surface_distances(result, reference, voxelspacing=None, connectivity=1): """ The distances between the surface voxel between all corresponding binary objects in result and reference. Correspondence is defined as unique and at least one voxel overlap. """ sds = list() - labelmap1, labelmap2, _a, _b, mapping = __distinct_binary_object_correspondences(result, reference, connectivity) + labelmap1, labelmap2, _a, _b, mapping = __distinct_binary_object_correspondences( + result, reference, connectivity + ) slicers1 = find_objects(labelmap1) slicers2 = find_objects(labelmap2) for lid2, lid1 in list(mapping.items()): @@ -1236,6 +1303,7 @@ def __obj_surface_distances(result, reference, voxelspacing=None, connectivity=1 sds.extend(__surface_distances(object1, object2, voxelspacing, connectivity)) return sds + def __combine_windows(w1, w2): """ Joins two windows (defined by tuple of slices) such that their maximum diff --git a/medpy/metric/histogram.py b/medpy/metric/histogram.py index 5fa6fe37..a244cf7b 100644 --- a/medpy/metric/histogram.py +++ b/medpy/metric/histogram.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -31,41 +31,44 @@ # Bin-by-bin comparison measures # # ////////////////////////////// # -def minowski(h1, h2, p = 2): # 46..45..14,11..43..44 / 45 us for p=int(-inf..-24..-1,1..24..inf) / float @array, +20 us @list \w 100 bins + +def minowski( + h1, h2, p=2 +): # 46..45..14,11..43..44 / 45 us for p=int(-inf..-24..-1,1..24..inf) / float @array, +20 us @list \w 100 bins r""" Minowski distance. - + With :math:`p=2` equal to the Euclidean distance, with :math:`p=1` equal to the Manhattan distance, and the Chebyshev distance implementation represents the case of :math:`p=\pm inf`. - + The Minowksi distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: - + .. math:: - - d_p(H, H') = \left(\sum_{m=1}^M|H_m - H'_m|^p + + d_p(H, H') = \left(\sum_{m=1}^M|H_m - H'_m|^p \right)^{\frac{1}{p}} *Attributes:* - + - a real metric - + *Attributes for normalized histograms:* - + - :math:`d(H, H')\in[0, \sqrt[p]{2}]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - + - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-equal histograms:* - + - not applicable - + Parameters ---------- h1 : sequence @@ -74,25 +77,31 @@ def minowski(h1, h2, p = 2): # 46..45..14,11..43..44 / 45 us for p=int(-inf..-24 The second histogram. p : float The :math:`p` value in the Minowksi distance formula. - + Returns ------- minowski : float Minowski distance. - + Raises ------ ValueError If ``p`` is zero. """ h1, h2 = __prepare_histogram(h1, h2) - if 0 == p: raise ValueError('p can not be zero') + if 0 == p: + raise ValueError("p can not be zero") elif int == type(p): - if p > 0 and p < 25: return __minowski_low_positive_integer_p(h1, h2, p) - elif p < 0 and p > -25: return __minowski_low_negative_integer_p(h1, h2, p) - return math.pow(scipy.sum(scipy.power(scipy.absolute(h1 - h2), p)), 1./p) + if p > 0 and p < 25: + return __minowski_low_positive_integer_p(h1, h2, p) + elif p < 0 and p > -25: + return __minowski_low_negative_integer_p(h1, h2, p) + return math.pow(scipy.sum(scipy.power(scipy.absolute(h1 - h2), p)), 1.0 / p) -def __minowski_low_positive_integer_p(h1, h2, p = 2): # 11..43 us for p = 1..24 \w 100 bins + +def __minowski_low_positive_integer_p( + h1, h2, p=2 +): # 11..43 us for p = 1..24 \w 100 bins """ A faster implementation of the Minowski distance for positive integer < 25. @note do not use this function directly, but the general @link minowski() method. @@ -100,10 +109,14 @@ def __minowski_low_positive_integer_p(h1, h2, p = 2): # 11..43 us for p = 1..24 """ mult = scipy.absolute(h1 - h2) dif = mult - for _ in range(p - 1): dif = scipy.multiply(dif, mult) - return math.pow(scipy.sum(dif), 1./p) + for _ in range(p - 1): + dif = scipy.multiply(dif, mult) + return math.pow(scipy.sum(dif), 1.0 / p) + -def __minowski_low_negative_integer_p(h1, h2, p = 2): # 14..46 us for p = -1..-24 \w 100 bins +def __minowski_low_negative_integer_p( + h1, h2, p=2 +): # 14..46 us for p = -1..-24 \w 100 bins """ A faster implementation of the Minowski distance for negative integer > -25. @note do not use this function directly, but the general @link minowski() method. @@ -111,13 +124,15 @@ def __minowski_low_negative_integer_p(h1, h2, p = 2): # 14..46 us for p = -1..-2 """ mult = scipy.absolute(h1 - h2) dif = mult - for _ in range(-p + 1): dif = scipy.multiply(dif, mult) - return math.pow(scipy.sum(1./dif), 1./p) + for _ in range(-p + 1): + dif = scipy.multiply(dif, mult) + return math.pow(scipy.sum(1.0 / dif), 1.0 / p) + -def manhattan(h1, h2): # # 7 us @array, 31 us @list \w 100 bins +def manhattan(h1, h2): # # 7 us @array, 31 us @list \w 100 bins r""" Equal to Minowski distance with :math:`p=1`. - + See also -------- minowski @@ -125,10 +140,11 @@ def manhattan(h1, h2): # # 7 us @array, 31 us @list \w 100 bins h1, h2 = __prepare_histogram(h1, h2) return scipy.sum(scipy.absolute(h1 - h2)) -def euclidean(h1, h2): # 9 us @array, 33 us @list \w 100 bins + +def euclidean(h1, h2): # 9 us @array, 33 us @list \w 100 bins r""" Equal to Minowski distance with :math:`p=2`. - + See also -------- minowski @@ -136,52 +152,53 @@ def euclidean(h1, h2): # 9 us @array, 33 us @list \w 100 bins h1, h2 = __prepare_histogram(h1, h2) return math.sqrt(scipy.sum(scipy.square(scipy.absolute(h1 - h2)))) -def chebyshev(h1, h2): # 12 us @array, 36 us @list \w 100 bins + +def chebyshev(h1, h2): # 12 us @array, 36 us @list \w 100 bins r""" Chebyshev distance. - + Also Tchebychev distance, Maximum or :math:`L_{\infty}` metric; equal to Minowski distance with :math:`p=+\infty`. For the case of :math:`p=-\infty`, use `chebyshev_neg`. - + The Chebyshev distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: - + .. math:: - + d_{\infty}(H, H') = \max_{m=1}^M|H_m-H'_m| - + *Attributes:* - + - semimetric (triangle equation satisfied?) - + *Attributes for normalized histograms:* - + - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - + - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-equal histograms:* - + - not applicable - + Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram. - + Returns ------- chebyshev : float Chebyshev distance. - + See also -------- minowski, chebyshev_neg @@ -189,52 +206,53 @@ def chebyshev(h1, h2): # 12 us @array, 36 us @list \w 100 bins h1, h2 = __prepare_histogram(h1, h2) return max(scipy.absolute(h1 - h2)) -def chebyshev_neg(h1, h2): # 12 us @array, 36 us @list \w 100 bins + +def chebyshev_neg(h1, h2): # 12 us @array, 36 us @list \w 100 bins r""" Chebyshev negative distance. - + Also Tchebychev distance, Minimum or :math:`L_{-\infty}` metric; equal to Minowski distance with :math:`p=-\infty`. For the case of :math:`p=+\infty`, use `chebyshev`. - + The Chebyshev distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: - + .. math:: - + d_{-\infty}(H, H') = \min_{m=1}^M|H_m-H'_m| - + *Attributes:* - semimetric (triangle equation satisfied?) - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram. - + Returns ------- chebyshev_neg : float Chebyshev negative distance. - + See also -------- minowski, chebyshev @@ -242,42 +260,43 @@ def chebyshev_neg(h1, h2): # 12 us @array, 36 us @list \w 100 bins h1, h2 = __prepare_histogram(h1, h2) return min(scipy.absolute(h1 - h2)) -def histogram_intersection(h1, h2): # 6 us @array, 30 us @list \w 100 bins + +def histogram_intersection(h1, h2): # 6 us @array, 30 us @list \w 100 bins r""" Calculate the common part of two histograms. - + The histogram intersection between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: - + .. math:: - + d_{\cap}(H, H') = \sum_{m=1}^M\min(H_m, H'_m) - + *Attributes:* - a real metric - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 1` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - not applicable - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. - + Returns ------- histogram_intersection : float @@ -286,58 +305,60 @@ def histogram_intersection(h1, h2): # 6 us @array, 30 us @list \w 100 bins h1, h2 = __prepare_histogram(h1, h2) return scipy.sum(scipy.minimum(h1, h2)) -def histogram_intersection_1(h1, h2): # 7 us @array, 31 us @list \w 100 bins + +def histogram_intersection_1(h1, h2): # 7 us @array, 31 us @list \w 100 bins r""" Turns the histogram intersection similarity into a distance measure for normalized, positive histograms. - + .. math:: - + d_{\bar{\cos}}(H, H') = 1 - d_{\cap}(H, H') - + See `histogram_intersection` for the definition of :math:`d_{\cap}(H, H')`. - + *Attributes:* - semimetric - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - not applicable - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. - + Returns ------- histogram_intersection : float Intersection between the two histograms. """ - return 1. - histogram_intersection(h1, h2) + return 1.0 - histogram_intersection(h1, h2) -def relative_deviation(h1, h2): # 18 us @array, 42 us @list \w 100 bins + +def relative_deviation(h1, h2): # 18 us @array, 42 us @list \w 100 bins r""" Calculate the deviation between two histograms. - + The relative deviation between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: - + .. math:: - + d_{rd}(H, H') = \frac{ \sqrt{\sum_{m=1}^M(H_m - H'_m)^2} @@ -348,34 +369,34 @@ def relative_deviation(h1, h2): # 18 us @array, 42 us @list \w 100 bins \sqrt{\sum_{m=1}^M {H'}_m^2} \right) } - + *Attributes:* - semimetric (triangle equation satisfied?) - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, \sqrt{2}]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, 2]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-equal histograms:* - - not applicable - + - not applicable + Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram, same bins as ``h1``. - + Returns ------- relative_deviation : float @@ -383,18 +404,21 @@ def relative_deviation(h1, h2): # 18 us @array, 42 us @list \w 100 bins """ h1, h2 = __prepare_histogram(h1, h2) numerator = math.sqrt(scipy.sum(scipy.square(h1 - h2))) - denominator = (math.sqrt(scipy.sum(scipy.square(h1))) + math.sqrt(scipy.sum(scipy.square(h2)))) / 2. + denominator = ( + math.sqrt(scipy.sum(scipy.square(h1))) + math.sqrt(scipy.sum(scipy.square(h2))) + ) / 2.0 return numerator / denominator -def relative_bin_deviation(h1, h2): # 79 us @array, 104 us @list \w 100 bins + +def relative_bin_deviation(h1, h2): # 79 us @array, 104 us @list \w 100 bins r""" Calculate the bin-wise deviation between two histograms. - + The relative bin deviation between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: - + .. math:: - + d_{rbd}(H, H') = \sum_{m=1}^M \frac{ \sqrt{(H_m - H'_m)^2} @@ -405,34 +429,34 @@ def relative_bin_deviation(h1, h2): # 79 us @array, 104 us @list \w 100 bins \sqrt{{H'}_m^2} \right) } - + *Attributes:* - a real metric - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-equal histograms:* - - not applicable - + - not applicable + Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram, same bins as ``h1``. - + Returns ------- relative_bin_deviation : float @@ -440,126 +464,138 @@ def relative_bin_deviation(h1, h2): # 79 us @array, 104 us @list \w 100 bins """ h1, h2 = __prepare_histogram(h1, h2) numerator = scipy.sqrt(scipy.square(h1 - h2)) - denominator = (scipy.sqrt(scipy.square(h1)) + scipy.sqrt(scipy.square(h2))) / 2. - old_err_state = scipy.seterr(invalid='ignore') # divide through zero only occurs when the bin is zero in both histograms, in which case the division is 0/0 and leads to (and should lead to) 0 + denominator = (scipy.sqrt(scipy.square(h1)) + scipy.sqrt(scipy.square(h2))) / 2.0 + old_err_state = scipy.seterr( + invalid="ignore" + ) # divide through zero only occurs when the bin is zero in both histograms, in which case the division is 0/0 and leads to (and should lead to) 0 result = numerator / denominator scipy.seterr(**old_err_state) - result[scipy.isnan(result)] = 0 # faster than scipy.nan_to_num, which checks for +inf and -inf also + result[ + scipy.isnan(result) + ] = 0 # faster than scipy.nan_to_num, which checks for +inf and -inf also return scipy.sum(result) -def chi_square(h1, h2): # 23 us @array, 49 us @list \w 100 + +def chi_square(h1, h2): # 23 us @array, 49 us @list \w 100 r""" Chi-square distance. - + Measure how unlikely it is that one distribution (histogram) was drawn from the other. The Chi-square distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: - + .. math:: - + d_{\chi^2}(H, H') = \sum_{m=1}^M \frac{ (H_m - H'_m)^2 }{ H_m + H'_m } - + *Attributes:* - semimetric - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 2]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-equal histograms:* - - not applicable - + - not applicable + Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram. - + Returns ------- chi_square : float Chi-square distance. """ h1, h2 = __prepare_histogram(h1, h2) - old_err_state = scipy.seterr(invalid='ignore') # divide through zero only occurs when the bin is zero in both histograms, in which case the division is 0/0 and leads to (and should lead to) 0 + old_err_state = scipy.seterr( + invalid="ignore" + ) # divide through zero only occurs when the bin is zero in both histograms, in which case the division is 0/0 and leads to (and should lead to) 0 result = scipy.square(h1 - h2) / (h1 + h2) scipy.seterr(**old_err_state) - result[scipy.isnan(result)] = 0 # faster than scipy.nan_to_num, which checks for +inf and -inf also + result[ + scipy.isnan(result) + ] = 0 # faster than scipy.nan_to_num, which checks for +inf and -inf also return scipy.sum(result) - -def kullback_leibler(h1, h2): # 83 us @array, 109 us @list \w 100 bins + +def kullback_leibler(h1, h2): # 83 us @array, 109 us @list \w 100 bins r""" Kullback-Leibler divergence. - + Compute how inefficient it would to be code one histogram into another. Actually computes :math:`\frac{d_{KL}(h1, h2) + d_{KL}(h2, h1)}{2}` to achieve symmetry. - + The Kullback-Leibler divergence between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: - + .. math:: - + d_{KL}(H, H') = \sum_{m=1}^M H_m\log\frac{H_m}{H'_m} - + *Attributes:* - quasimetric (but made symetric) - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - not applicable - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram, where h1[i] > 0 for any i such that h2[i] > 0, normalized. h2 : sequence The second histogram, where h2[i] > 0 for any i such that h1[i] > 0, normalized, same bins as ``h1``. - + Returns ------- kullback_leibler : float Kullback-Leibler divergence. """ - old_err_state = scipy.seterr(divide='raise') + old_err_state = scipy.seterr(divide="raise") try: h1, h2 = __prepare_histogram(h1, h2) - result = (__kullback_leibler(h1, h2) + __kullback_leibler(h2, h1)) / 2. + result = (__kullback_leibler(h1, h2) + __kullback_leibler(h2, h1)) / 2.0 scipy.seterr(**old_err_state) return result except FloatingPointError: scipy.seterr(**old_err_state) - raise ValueError('h1 can only contain zero values where h2 also contains zero values and vice-versa') - -def __kullback_leibler(h1, h2): # 36.3 us + raise ValueError( + "h1 can only contain zero values where h2 also contains zero values and vice-versa" + ) + + +def __kullback_leibler(h1, h2): # 36.3 us """ The actual KL implementation. @see kullback_leibler() for details. Expects the histograms to be of type scipy.ndarray. @@ -568,105 +604,107 @@ def __kullback_leibler(h1, h2): # 36.3 us mask = h1 != 0 result[mask] = scipy.multiply(h1[mask], scipy.log(h1[mask] / h2[mask])) return scipy.sum(result) - -def jensen_shannon(h1, h2): # 85 us @array, 110 us @list \w 100 bins + + +def jensen_shannon(h1, h2): # 85 us @array, 110 us @list \w 100 bins r""" Jensen-Shannon divergence. - + A symmetric and numerically more stable empirical extension of the Kullback-Leibler divergence. - + The Jensen Shannon divergence between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: - + .. math:: - + d_{JSD}(H, H') = \frac{1}{2} d_{KL}(H, H^*) + \frac{1}{2} d_{KL}(H', H^*) - + with :math:`H^*=\frac{1}{2}(H + H')`. - + *Attributes:* - semimetric - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram, same bins as ``h1``. - + Returns ------- jensen_shannon : float - Jensen-Shannon divergence. + Jensen-Shannon divergence. """ h1, h2 = __prepare_histogram(h1, h2) - s = (h1 + h2) / 2. - return __kullback_leibler(h1, s) / 2. + __kullback_leibler(h2, s) / 2. - -def fidelity_based(h1, h2): # 25 us @array, 51 us @list \w 100 bins + s = (h1 + h2) / 2.0 + return __kullback_leibler(h1, s) / 2.0 + __kullback_leibler(h2, s) / 2.0 + + +def fidelity_based(h1, h2): # 25 us @array, 51 us @list \w 100 bins r""" Fidelity based distance. - + Also Bhattacharyya distance; see also the extensions `noelle_1` to `noelle_5`. - + The metric between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: - + .. math:: - + d_{F}(H, H') = \sum_{m=1}^M\sqrt{H_m * H'_m} - - + + *Attributes:* - not a metric, a similarity - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 1` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - not applicable - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. - + Returns ------- fidelity_based : float Fidelity based distance. - + Notes ----- The fidelity between two histograms :math:`H` and :math:`H'` is the same as the @@ -674,234 +712,239 @@ def fidelity_based(h1, h2): # 25 us @array, 51 us @list \w 100 bins """ h1, h2 = __prepare_histogram(h1, h2) result = scipy.sum(scipy.sqrt(h1 * h2)) - result = 0 if 0 > result else result # for rounding errors - result = 1 if 1 < result else result # for rounding errors + result = 0 if 0 > result else result # for rounding errors + result = 1 if 1 < result else result # for rounding errors return result -def noelle_1(h1, h2): # 26 us @array, 52 us @list \w 100 bins + +def noelle_1(h1, h2): # 26 us @array, 52 us @list \w 100 bins r""" Extension of `fidelity_based` proposed by [1]_. - + .. math:: - + d_{\bar{F}}(H, H') = 1 - d_{F}(H, H') - + See `fidelity_based` for the definition of :math:`d_{F}(H, H')`. - + *Attributes:* - semimetric - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - not applicable - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. - + Returns ------- fidelity_based : float Fidelity based distance. - + References ---------- .. [1] M. Noelle "Distribution Distance Measures Applied to 3-D Object Recognition", 2003 """ - return 1. - fidelity_based(h1, h2) + return 1.0 - fidelity_based(h1, h2) -def noelle_2(h1, h2): # 26 us @array, 52 us @list \w 100 bins + +def noelle_2(h1, h2): # 26 us @array, 52 us @list \w 100 bins r""" Extension of `fidelity_based` proposed by [1]_. - + .. math:: - + d_{\sqrt{1-F}}(H, H') = \sqrt{1 - d_{F}(H, H')} - + See `fidelity_based` for the definition of :math:`d_{F}(H, H')`. - + *Attributes:* - metric - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - not applicable - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. - + Returns ------- fidelity_based : float Fidelity based distance. - + References ---------- .. [1] M. Noelle "Distribution Distance Measures Applied to 3-D Object Recognition", 2003 """ - return math.sqrt(1. - fidelity_based(h1, h2)) + return math.sqrt(1.0 - fidelity_based(h1, h2)) -def noelle_3(h1, h2): # 26 us @array, 52 us @list \w 100 bins + +def noelle_3(h1, h2): # 26 us @array, 52 us @list \w 100 bins r""" Extension of `fidelity_based` proposed by [1]_. - + .. math:: - + d_{\log(2-F)}(H, H') = \log(2 - d_{F}(H, H')) - + See `fidelity_based` for the definition of :math:`d_{F}(H, H')`. - + *Attributes:* - semimetric - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, log(2)]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - not applicable - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. - + Returns ------- fidelity_based : float Fidelity based distance. - + References ---------- .. [1] M. Noelle "Distribution Distance Measures Applied to 3-D Object Recognition", 2003 """ return math.log(2 - fidelity_based(h1, h2)) -def noelle_4(h1, h2): # 26 us @array, 52 us @list \w 100 bins + +def noelle_4(h1, h2): # 26 us @array, 52 us @list \w 100 bins r""" Extension of `fidelity_based` proposed by [1]_. - + .. math:: - + d_{\arccos F}(H, H') = \frac{2}{\pi} \arccos d_{F}(H, H') - + See `fidelity_based` for the definition of :math:`d_{F}(H, H')`. - + *Attributes:* - metric - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - not applicable - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. - + Returns ------- fidelity_based : float Fidelity based distance. - + References ---------- .. [1] M. Noelle "Distribution Distance Measures Applied to 3-D Object Recognition", 2003 """ - return 2. / math.pi * math.acos(fidelity_based(h1, h2)) + return 2.0 / math.pi * math.acos(fidelity_based(h1, h2)) + -def noelle_5(h1, h2): # 26 us @array, 52 us @list \w 100 bins +def noelle_5(h1, h2): # 26 us @array, 52 us @list \w 100 bins r""" Extension of `fidelity_based` proposed by [1]_. - + .. math:: - + d_{\sin F}(H, H') = \sqrt{1 -d_{F}^2(H, H')} - + See `fidelity_based` for the definition of :math:`d_{F}(H, H')`. - + *Attributes:* - metric - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - not applicable - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. - + Returns ------- fidelity_based : float Fidelity based distance. - + References ---------- .. [1] M. Noelle "Distribution Distance Measures Applied to 3-D Object Recognition", 2003 @@ -909,62 +952,67 @@ def noelle_5(h1, h2): # 26 us @array, 52 us @list \w 100 bins return math.sqrt(1 - math.pow(fidelity_based(h1, h2), 2)) -def cosine_alt(h1, h2): # 17 us @array, 42 us @list \w 100 bins +def cosine_alt(h1, h2): # 17 us @array, 42 us @list \w 100 bins r""" Alternative implementation of the `cosine` distance measure. - + Notes ----- Under development. """ h1, h2 = __prepare_histogram(h1, h2) - return -1 * float(scipy.sum(h1 * h2)) / (scipy.sum(scipy.power(h1, 2)) * scipy.sum(scipy.power(h2, 2))) + return ( + -1 + * float(scipy.sum(h1 * h2)) + / (scipy.sum(scipy.power(h1, 2)) * scipy.sum(scipy.power(h2, 2))) + ) -def cosine(h1, h2): # 17 us @array, 42 us @list \w 100 bins + +def cosine(h1, h2): # 17 us @array, 42 us @list \w 100 bins r""" Cosine simmilarity. - + Compute the angle between the two histograms in vector space irrespective of their length. The cosine similarity between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: - + .. math:: - + d_{\cos}(H, H') = \cos\alpha = \frac{H * H'}{\|H\| \|H'\|} = \frac{\sum_{m=1}^M H_m*H'_m}{\sqrt{\sum_{m=1}^M H_m^2} * \sqrt{\sum_{m=1}^M {H'}_m^2}} - - + + *Attributes:* - not a metric, a similarity - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 1` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[-1, 1]` - :math:`d(H, H) = 1` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-equal histograms:* - - not applicable - + - not applicable + Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram, same bins as ``h1``. - + Returns ------- cosine : float Cosine simmilarity. - + Notes ----- The resulting similarity ranges from -1 meaning exactly opposite, to 1 meaning @@ -972,152 +1020,157 @@ def cosine(h1, h2): # 17 us @array, 42 us @list \w 100 bins indicating intermediate similarity or dissimilarity. """ h1, h2 = __prepare_histogram(h1, h2) - return scipy.sum(h1 * h2) / math.sqrt(scipy.sum(scipy.square(h1)) * scipy.sum(scipy.square(h2))) + return scipy.sum(h1 * h2) / math.sqrt( + scipy.sum(scipy.square(h1)) * scipy.sum(scipy.square(h2)) + ) -def cosine_1(h1, h2): # 18 us @array, 43 us @list \w 100 bins + +def cosine_1(h1, h2): # 18 us @array, 43 us @list \w 100 bins r""" Cosine simmilarity. - + Turns the cosine similarity into a distance measure for normalized, positive histograms. - + .. math:: - + d_{\bar{\cos}}(H, H') = 1 - d_{\cos}(H, H') - + See `cosine` for the definition of :math:`d_{\cos}(H, H')`. - + *Attributes:* - metric - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - not applicable - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. - + Returns ------- cosine : float Cosine distance. """ - return 1. - cosine(h1, h2) + return 1.0 - cosine(h1, h2) -def cosine_2(h1, h2): # 19 us @array, 44 us @list \w 100 bins + +def cosine_2(h1, h2): # 19 us @array, 44 us @list \w 100 bins r""" Cosine simmilarity. - + Turns the cosine similarity into a distance measure for normalized, positive histograms. - + .. math:: - + d_{\bar{\cos}}(H, H') = 1 - \frac{2*\arccos d_{\cos}(H, H')}{pi} - + See `cosine` for the definition of :math:`d_{\cos}(H, H')`. - + *Attributes:* - metric - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - not applicable - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. - + Returns ------- cosine : float - Cosine distance. + Cosine distance. """ - return 1. - (2 * cosine(h1, h2)) / math.pi + return 1.0 - (2 * cosine(h1, h2)) / math.pi + -def correlate(h1, h2): # 31 us @array, 55 us @list \w 100 bins +def correlate(h1, h2): # 31 us @array, 55 us @list \w 100 bins r""" Correlation between two histograms. - + The histogram correlation between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: - + .. math:: - - d_{corr}(H, H') = + + d_{corr}(H, H') = \frac{ \sum_{m=1}^M (H_m-\bar{H}) \cdot (H'_m-\bar{H'}) }{ \sqrt{\sum_{m=1}^M (H_m-\bar{H})^2 \cdot \sum_{m=1}^M (H'_m-\bar{H'})^2} } - + with :math:`\bar{H}` and :math:`\bar{H'}` being the mean values of :math:`H` resp. :math:`H'` - + *Attributes:* - not a metric, a similarity - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[-1, 1]` - :math:`d(H, H) = 1` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[-1, 1]` - :math:`d(H, H) = 1` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram, same bins as ``h1``. - + Returns ------- correlate : float Correlation between the histograms. - + Notes ----- Returns 0 if one of h1 or h2 contain only zeros. - + """ h1, h2 = __prepare_histogram(h1, h2) h1m = h1 - scipy.sum(h1) / float(h1.size) @@ -1126,74 +1179,76 @@ def correlate(h1, h2): # 31 us @array, 55 us @list \w 100 bins b = math.sqrt(scipy.sum(scipy.square(h1m)) * scipy.sum(scipy.square(h2m))) return 0 if 0 == b else a / b -def correlate_1(h1, h2): # 32 us @array, 56 us @list \w 100 bins + +def correlate_1(h1, h2): # 32 us @array, 56 us @list \w 100 bins r""" Correlation distance. - + Turns the histogram correlation into a distance measure for normalized, positive histograms. - + .. math:: - + d_{\bar{corr}}(H, H') = 1-\frac{d_{corr}(H, H')}{2}. - + See `correlate` for the definition of :math:`d_{corr}(H, H')`. - + *Attributes:* - semimetric - + *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` - + *Attributes for not-equal histograms:* - not applicable - + Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram, same bins as ``h1``. - + Returns ------- correlate : float Correlation distnace between the histograms. - + Notes ----- Returns 0.5 if one of h1 or h2 contains only zeros. """ - return (1. - correlate(h1, h2))/2. + return (1.0 - correlate(h1, h2)) / 2.0 # ///////////////////////////// # # Cross-bin comparison measures # # ///////////////////////////// # + def quadratic_forms(h1, h2): r""" Quadrativ forms metric. - + Notes ----- UNDER DEVELOPMENT - + This distance measure shows very strange behaviour. The expression transpose(h1-h2) * A * (h1-h2) yields egative values that can not be processed by the square root. Some examples:: - + h1 h2 transpose(h1-h2) * A * (h1-h2) [1, 0] to [0.0, 1.0] : -2.0 [1, 0] to [0.5, 0.5] : 0.0 @@ -1206,36 +1261,39 @@ def quadratic_forms(h1, h2): [1, 0] to [0.8888888888888888, 0.1111111111111111] : 0.0216049382716 [1, 0] to [0.9, 0.1] : 0.0177777777778 [1, 0] to [1, 0]: 0.0 - + It is clearly undesireable to recieve negative values and even worse to get a value of zero for other cases than the same histograms. """ h1, h2 = __prepare_histogram(h1, h2) A = __quadratic_forms_matrix_euclidean(h1, h2) - return math.sqrt((h1-h2).dot(A.dot(h1-h2))) # transpose(h1-h2) * A * (h1-h2) - + return math.sqrt((h1 - h2).dot(A.dot(h1 - h2))) # transpose(h1-h2) * A * (h1-h2) + + def __quadratic_forms_matrix_euclidean(h1, h2): r""" Compute the bin-similarity matrix for the quadratic form distance measure. The matric :math:`A` for two histograms :math:`H` and :math:`H'` of size :math:`m` and :math:`n` respectively is defined as - + .. math:: - + A_{m,n} = 1 - \frac{d_2(H_m, {H'}_n)}{d_{max}} - + with - + .. math:: - + d_{max} = \max_{m,n}d_2(H_m, {H'}_n) - + See also -------- quadratic_forms """ - A = scipy.repeat(h2[:,scipy.newaxis], h1.size, 1) # repeat second array to form a matrix - A = scipy.absolute(A - h1) # euclidean distances + A = scipy.repeat( + h2[:, scipy.newaxis], h1.size, 1 + ) # repeat second array to form a matrix + A = scipy.absolute(A - h1) # euclidean distances return 1 - (A / float(A.max())) @@ -1243,10 +1301,11 @@ def __quadratic_forms_matrix_euclidean(h1, h2): # Helper functions # # //////////////// # + def __prepare_histogram(h1, h2): """Convert the histograms to scipy.ndarrays if required.""" h1 = h1 if scipy.ndarray == type(h1) else scipy.asarray(h1) h2 = h2 if scipy.ndarray == type(h2) else scipy.asarray(h2) if h1.shape != h2.shape or h1.size != h2.size: - raise ValueError('h1 and h2 must be of same shape and size') + raise ValueError("h1 and h2 must be of same shape and size") return h1, h2 diff --git a/medpy/metric/image.py b/medpy/metric/image.py index bc995e05..187a4598 100644 --- a/medpy/metric/image.py +++ b/medpy/metric/image.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -26,6 +26,7 @@ # own modules from ..core import ArgumentError + # code def mutual_information(i1, i2, bins=256): r""" @@ -33,29 +34,29 @@ def mutual_information(i1, i2, bins=256): MI is not real metric, but a symmetric and nonnegative similarity measures that takes high values for similar images. Negative values are also possible. - + Intuitively, mutual information measures the information that ``i1`` and ``i2`` share: it measures how much knowing one of these variables reduces uncertainty about the other. - + The Entropy is defined as: - + .. math:: - + H(X) = - \sum_i p(g_i) * ln(p(g_i) with :math:`p(g_i)` being the intensity probability of the images grey value :math:`g_i`. - + Assuming two images :math:`R` and :math:`T`, the mutual information is then computed by comparing the images entropy values (i.e. a measure how well-structured the common histogram is). The distance metric is then calculated as follows: - + .. math:: - + MI(R,T) = H(R) + H(T) - H(R,T) = H(R) - H(R|T) = H(T) - H(T|R) - + A maximization of the mutual information is equal to a minimization of the joint entropy. - + Parameters ---------- i1 : array_like @@ -64,12 +65,12 @@ def mutual_information(i1, i2, bins=256): The second image. bins : integer The number of histogram bins (squared for the joined histogram). - + Returns ------- mutual_information : float The mutual information distance value between the supplied images. - + Raises ------ ArgumentError @@ -78,43 +79,48 @@ def mutual_information(i1, i2, bins=256): # pre-process function arguments i1 = numpy.asarray(i1) i2 = numpy.asarray(i2) - + # validate function arguments if not i1.shape == i2.shape: - raise ArgumentError('the two supplied array-like sequences i1 and i2 must be of the same shape') - + raise ArgumentError( + "the two supplied array-like sequences i1 and i2 must be of the same shape" + ) + # compute i1 and i2 histogram range i1_range = __range(i1, bins) i2_range = __range(i2, bins) - + # compute joined and separated normed histograms - i1i2_hist, _, _ = numpy.histogram2d(i1.flatten(), i2.flatten(), bins=bins, range=[i1_range, i2_range]) # Note: histogram2d does not flatten array on its own + i1i2_hist, _, _ = numpy.histogram2d( + i1.flatten(), i2.flatten(), bins=bins, range=[i1_range, i2_range] + ) # Note: histogram2d does not flatten array on its own i1_hist, _ = numpy.histogram(i1, bins=bins, range=i1_range) i2_hist, _ = numpy.histogram(i2, bins=bins, range=i2_range) - + # compute joined and separated entropy i1i2_entropy = __entropy(i1i2_hist) i1_entropy = __entropy(i1_hist) i2_entropy = __entropy(i2_hist) - + # compute and return the mutual information distance return i1_entropy + i2_entropy - i1i2_entropy + def __range(a, bins): - '''Compute the histogram range of the values in the array a according to - scipy.stats.histogram.''' + """Compute the histogram range of the values in the array a according to + scipy.stats.histogram.""" a = numpy.asarray(a) a_max = a.max() a_min = a.min() s = 0.5 * (a_max - a_min) / float(bins - 1) return (a_min - s, a_max + s) - + + def __entropy(data): - '''Compute entropy of the flattened data set (e.g. a density distribution).''' + """Compute entropy of the flattened data set (e.g. a density distribution).""" # normalize and convert to float - data = data/float(numpy.sum(data)) + data = data / float(numpy.sum(data)) # for each grey-value g with a probability p(g) = 0, the entropy is defined as 0, therefore we remove these values and also flatten the histogram data = data[numpy.nonzero(data)] # compute entropy - return -1. * numpy.sum(data * numpy.log2(data)) - \ No newline at end of file + return -1.0 * numpy.sum(data * numpy.log2(data)) diff --git a/medpy/neighbours/__init__.py b/medpy/neighbours/__init__.py index c5afe810..9d30aac8 100644 --- a/medpy/neighbours/__init__.py +++ b/medpy/neighbours/__init__.py @@ -5,7 +5,7 @@ .. currentmodule:: medpy.neighbours This package contains nearest neighbour methods. - + Patch-wise :mod:`medpy.neighbours.knn` =========================================== K-nearest-neighbours based methods. The interfaces are loosely based on the @@ -15,31 +15,28 @@ .. module:: medpy.neighbours.knn .. autosummary:: :toctree: generated/ - + mkneighbors_graph pdist - + """ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # import all functions/methods/classes into the module -from .knn import mkneighbors_graph, pdist # import all sub-modules in the __all__ variable -__all__ = [s for s in dir() if not s.startswith('_')] - - +__all__ = [s for s in dir() if not s.startswith("_")] diff --git a/medpy/neighbours/knn.py b/medpy/neighbours/knn.py index 7ccf9c3e..51027e57 100644 --- a/medpy/neighbours/knn.py +++ b/medpy/neighbours/knn.py @@ -18,9 +18,10 @@ # since 2014-10-15 # status Release +import warnings + # build-in modules from itertools import combinations -import warnings # third-party modules import numpy @@ -30,8 +31,11 @@ # constants + # code -def mkneighbors_graph(observations, n_neighbours, metric, mode='connectivity', metric_params = None): +def mkneighbors_graph( + observations, n_neighbours, metric, mode="connectivity", metric_params=None +): """ Computes the (weighted) graph of mutual k-Neighbors for observations. @@ -68,7 +72,7 @@ def mkneighbors_graph(observations, n_neighbours, metric, mode='connectivity', m pdists = pdist(observations, metric) # get the k nearest neighbours for each patch - k_nearest_nbhs = numpy.argsort(pdists)[:,:n_neighbours] + k_nearest_nbhs = numpy.argsort(pdists)[:, :n_neighbours] # create a mask denoting the k nearest neighbours in image_pdist k_nearest_mutual_nbhs_mask = numpy.zeros(pdists.shape, numpy.bool_) @@ -85,14 +89,15 @@ def mkneighbors_graph(observations, n_neighbours, metric, mode='connectivity', m if numpy.any(pdists[k_nearest_mutual_nbhs_mask] == 0): warnings.warn('The graph contains at least one edge with a weight of "0".') - if 'connectivity' == mode: + if "connectivity" == mode: return csr_matrix(k_nearest_mutual_nbhs_mask) - elif 'distance' == mode: + elif "distance" == mode: return csr_matrix(pdists) else: return csr_matrix(k_nearest_mutual_nbhs_mask), csr_matrix(pdists) -def pdist(objects, dmeasure, diagval = numpy.inf): + +def pdist(objects, dmeasure, diagval=numpy.inf): """ Compute the pair-wise distances between arbitrary objects. diff --git a/medpy/utilities/__init__.py b/medpy/utilities/__init__.py index 92d4061b..e0e935a7 100644 --- a/medpy/utilities/__init__.py +++ b/medpy/utilities/__init__.py @@ -15,7 +15,7 @@ .. module:: medpy.utilities.argparseu .. autosummary:: :toctree: generated/ - + sequenceOfIntegers sequenceOfIntegersGt sequenceOfIntegersGe @@ -29,4 +29,6 @@ sequenceOfFloatsLe """ -from . import argparseu \ No newline at end of file +from . import argparseu as argparseu # nopycln: import + +_all__ = ["argparseu"] diff --git a/medpy/utilities/argparseu.py b/medpy/utilities/argparseu.py index 4715125e..8226911e 100644 --- a/medpy/utilities/argparseu.py +++ b/medpy/utilities/argparseu.py @@ -1,15 +1,15 @@ # Copyright (C) 2013 Oskar Maier -# +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . # @@ -20,46 +20,48 @@ # build-in modules import argparse -import itertools import os # third-party modules # own modules + # code def existingDirectory(string): """ A custom type for the argparse commandline parser. Check whether the supplied string points to a valid directory. - + Examples -------- - - >>> parser.add_argument('argname', type=existingDirectory, help='help') + + >>> parser.add_argument('argname', type=existingDirectory, help='help') """ if not os.path.isdir(string): - argparse.ArgumentTypeError('{} is not a valid directory.'.format(string)) + argparse.ArgumentTypeError("{} is not a valid directory.".format(string)) return string + def sequenceOfStrings(string): """ A custom type for the argparse commandline parser. Accepts colon-separated lists of strings. - + Examples -------- - + >>> parser.add_argument('argname', type=sequenceOfStrings, help='help') """ - return string.split(',') + return string.split(",") + def sequenceOfIntegersGeAscendingStrict(string): """ A custom type for the argparse commandline parser. Accepts only colon-separated lists of valid integer values that are greater than or equal to 0 and in ascending order. - + Examples -------- @@ -67,6 +69,7 @@ def sequenceOfIntegersGeAscendingStrict(string): """ return __sequenceAscendingStrict(__sequenceGe(sequenceOfIntegers(string))) + def sequenceOfIntegers(string): """ A custom type for the argparse commandline parser. @@ -78,9 +81,10 @@ def sequenceOfIntegers(string): >>> parser.add_argument('argname', type=sequenceOfIntegers, help='help') """ - value = list(map(int, string.split(','))) + value = list(map(int, string.split(","))) return value + def sequenceOfIntegersGt(string): """ A custom type for the argparse commandline parser. @@ -95,6 +99,7 @@ def sequenceOfIntegersGt(string): value = sequenceOfIntegers(string) return __sequenceGt(value) + def sequenceOfIntegersGe(string): """ A custom type for the argparse commandline parser. @@ -110,6 +115,7 @@ def sequenceOfIntegersGe(string): value = sequenceOfIntegers(string) return __sequenceGe(value) + def sequenceOfIntegersLt(string): """ A custom type for the argparse commandline parser. @@ -124,6 +130,7 @@ def sequenceOfIntegersLt(string): value = sequenceOfIntegers(string) return __sequenceLt(value) + def sequenceOfIntegersLe(string): """ A custom type for the argparse commandline parser. @@ -139,6 +146,7 @@ def sequenceOfIntegersLe(string): value = sequenceOfIntegers(string) return __sequenceLe(value) + def sequenceOfFloats(string): """ A custom type for the argparse commandline parser. @@ -150,9 +158,10 @@ def sequenceOfFloats(string): >>> parser.add_argument('argname', type=sequenceOfFloats, help='help') """ - value = list(map(float, string.split(','))) + value = list(map(float, string.split(","))) return value + def sequenceOfFloatsGt(string): """ A custom type for the argparse commandline parser. @@ -167,6 +176,7 @@ def sequenceOfFloatsGt(string): value = sequenceOfFloats(string) return __sequenceGt(value) + def sequenceOfFloatsGe(string): """ A custom type for the argparse commandline parser. @@ -182,6 +192,7 @@ def sequenceOfFloatsGe(string): value = sequenceOfFloats(string) return __sequenceGe(value) + def sequenceOfFloatsLt(string): """ A custom type for the argparse commandline parser. @@ -196,6 +207,7 @@ def sequenceOfFloatsLt(string): value = sequenceOfFloats(string) return __sequenceLt(value) + def sequenceOfFloatsLe(string): """ A custom type for the argparse commandline parser. @@ -211,42 +223,60 @@ def sequenceOfFloatsLe(string): value = sequenceOfFloats(string) return __sequenceLe(value) + def __sequenceGt(l): "Test a sequences values for being greater than 0." for e in l: - if 0 >= e: raise argparse.ArgumentTypeError('All values have to be greater than 0.') + if 0 >= e: + raise argparse.ArgumentTypeError("All values have to be greater than 0.") return l + def __sequenceGe(l): "Test a sequences values for being greater than or equal to 0." for e in l: - if 0 > e: raise argparse.ArgumentTypeError('All values have to be greater than or equal to 0.') + if 0 > e: + raise argparse.ArgumentTypeError( + "All values have to be greater than or equal to 0." + ) return l + def __sequenceLt(l): "Test a sequences values for being less than 0." for e in l: - if 0 <= e: raise argparse.ArgumentTypeError('All values have to be less than 0.') + if 0 <= e: + raise argparse.ArgumentTypeError("All values have to be less than 0.") return l + def __sequenceLe(l): "Test a sequences values for being less than or equal to 0." for e in l: - if 0 < e: raise argparse.ArgumentTypeError('All values have to be less than or equal to 0.') + if 0 < e: + raise argparse.ArgumentTypeError( + "All values have to be less than or equal to 0." + ) return l + def __sequenceAscendingStrict(l): "Test a sequences values to be in strictly ascending order." it = iter(l) next(it) if not all(b > a for a, b in zip(l, it)): - raise argparse.ArgumentTypeError('All values must be given in strictly ascending order.') + raise argparse.ArgumentTypeError( + "All values must be given in strictly ascending order." + ) return l + def __sequenceDescendingStrict(l): "Test a sequences values to be in strictly descending order." it = iter(l) next(it) if not all(b < a for a, b in zip(l, it)): - raise argparse.ArgumentTypeError('All values must be given in strictly descending order.') - return l \ No newline at end of file + raise argparse.ArgumentTypeError( + "All values must be given in strictly descending order." + ) + return l diff --git a/setup.py b/setup.py index 60ff667b..0cb48a2b 100755 --- a/setup.py +++ b/setup.py @@ -9,59 +9,77 @@ from ctypes.util import find_library from distutils.command.build_ext import build_ext from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError -from setuptools import setup, Extension, Command + +from setuptools import Command, Extension, setup # CONSTANTS -IS_PYPY = hasattr(sys, 'pypy_translation_info') # why this? -PACKAGES= [ - 'medpy', - 'medpy.core', - 'medpy.features', - 'medpy.filter', - 'medpy.graphcut', - 'medpy.io', - 'medpy.metric', - 'medpy.utilities' +IS_PYPY = hasattr(sys, "pypy_translation_info") # why this? +PACKAGES = [ + "medpy", + "medpy.core", + "medpy.features", + "medpy.filter", + "medpy.graphcut", + "medpy.io", + "medpy.metric", + "medpy.utilities", ] + #### FUNCTIONS def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() + ### PREDEFINED MODULES # The maxflow graphcut wrapper using boost.python # Special handling for homebrew Boost Python library if sys.platform == "darwin": if sys.version_info.major > 2: - boost_python_library = 'boost_python' + str(sys.version_info.major) + boost_python_library = "boost_python" + str(sys.version_info.major) else: - boost_python_library = 'boost_python' + boost_python_library = "boost_python" else: - boost_python_library = 'boost_python-py' + str(sys.version_info.major) + str(sys.version_info.minor) + boost_python_library = ( + "boost_python-py" + str(sys.version_info.major) + str(sys.version_info.minor) + ) if not find_library(boost_python_library): # exact version not find, trying with major fit only as fallback - boost_python_library = 'boost_python' + str(sys.version_info.major) - -maxflow = Extension('medpy.graphcut.maxflow', - define_macros = [('MAJOR_VERSION', '0'), - ('MINOR_VERSION', '1')], - sources = ['lib/maxflow/src/maxflow.cpp', 'lib/maxflow/src/wrapper.cpp', 'lib/maxflow/src/graph.cpp'], - libraries = [boost_python_library], - extra_compile_args = ['-O0']) + boost_python_library = "boost_python" + str(sys.version_info.major) + +maxflow = Extension( + "medpy.graphcut.maxflow", + define_macros=[("MAJOR_VERSION", "0"), ("MINOR_VERSION", "1")], + sources=[ + "lib/maxflow/src/maxflow.cpp", + "lib/maxflow/src/wrapper.cpp", + "lib/maxflow/src/graph.cpp", + ], + libraries=[boost_python_library], + extra_compile_args=["-O0"], +) ### FUNCTIONALITY FOR CONDITIONAL C++ BUILD -if sys.platform == 'win32' and sys.version_info > (2, 6): +if sys.platform == "win32" and sys.version_info > (2, 6): # 2.6's distutils.msvc9compiler can raise an IOError when failing to # find the compiler # It can also raise ValueError http://bugs.python.org/issue7511 - ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError, ValueError) + ext_errors = ( + CCompilerError, + DistutilsExecError, + DistutilsPlatformError, + IOError, + ValueError, + ) else: ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) + class BuildFailed(Exception): pass + class TestCommand(Command): user_options = [] @@ -74,6 +92,7 @@ def finalize_options(self): def run(self): raise SystemExit(1) + class ve_build_ext(build_ext): # This class allows C++ extension building to fail. def run(self): @@ -88,115 +107,109 @@ def build_extension(self, ext): except ext_errors: raise BuildFailed() + ### MAIN SETUP FUNCTION def run_setup(with_compilation): cmdclass = dict(test=TestCommand) if with_compilation: - kw = dict(ext_modules = [maxflow], - cmdclass=dict(cmdclass, build_ext=ve_build_ext)) - ap = ['medpy.graphcut'] + kw = dict( + ext_modules=[maxflow], cmdclass=dict(cmdclass, build_ext=ve_build_ext) + ) + ap = ["medpy.graphcut"] else: kw = dict(cmdclass=cmdclass) ap = [] setup( - name='MedPy', - version='0.4.0', # major.minor.micro - description='Medical image processing in Python', - author='Oskar Maier', - author_email='oskar.maier@gmail.com', - url='https://github.com/loli/medpy', - license='LICENSE.txt', - keywords='medical image processing dicom itk insight tool kit MRI CT US graph cut max-flow min-cut', - long_description=read('README_PYPI.md'), - long_description_content_type='text/markdown', - - classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Environment :: Console', - 'Environment :: Other Environment', - 'Intended Audience :: End Users/Desktop', - 'Intended Audience :: Developers', - 'Intended Audience :: Healthcare Industry', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: GNU General Public License (GPL)', - 'Operating System :: MacOS :: MacOS X', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: POSIX', - 'Operating System :: Unix', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: C++', - 'Topic :: Scientific/Engineering :: Medical Science Apps.', - 'Topic :: Scientific/Engineering :: Image Recognition' - ], - - install_requires=[ - "scipy >= 1.1.0", - "numpy >= 1.11.0", - "SimpleITK >= 1.1.0" - ], - - packages = PACKAGES + ap, - - scripts=[ - 'bin/medpy_anisotropic_diffusion.py', - 'bin/medpy_apparent_diffusion_coefficient.py', - 'bin/medpy_binary_resampling.py', - 'bin/medpy_convert.py', - 'bin/medpy_create_empty_volume_by_example.py', - 'bin/medpy_dicom_slices_to_volume.py', - 'bin/medpy_dicom_to_4D.py', - 'bin/medpy_diff.py', - 'bin/medpy_extract_contour.py', - 'bin/medpy_extract_min_max.py', - 'bin/medpy_extract_sub_volume_auto.py', - 'bin/medpy_extract_sub_volume_by_example.py', - 'bin/medpy_extract_sub_volume.py', - 'bin/medpy_fit_into_shape.py', - 'bin/medpy_gradient.py', - 'bin/medpy_graphcut_label_bgreduced.py', - 'bin/medpy_graphcut_label_w_regional.py', - 'bin/medpy_graphcut_label_wsplit.py', - 'bin/medpy_graphcut_label.py', - 'bin/medpy_graphcut_voxel.py', - 'bin/medpy_grid.py', - 'bin/medpy_info.py', - 'bin/medpy_intensity_range_standardization.py', - 'bin/medpy_intersection.py', - 'bin/medpy_join_masks.py', - 'bin/medpy_join_xd_to_xplus1d.py', - 'bin/medpy_label_count.py', - 'bin/medpy_label_fit_to_mask.py', - 'bin/medpy_label_superimposition.py', - 'bin/medpy_merge.py', - 'bin/medpy_morphology.py', - 'bin/medpy_resample.py', - 'bin/medpy_reslice_3d_to_4d.py', - 'bin/medpy_set_pixel_spacing.py', - 'bin/medpy_shrink_image.py', - 'bin/medpy_split_xd_to_xminus1d.py', - 'bin/medpy_stack_sub_volumes.py', - 'bin/medpy_swap_dimensions.py', - 'bin/medpy_watershed.py', - 'bin/medpy_zoom_image.py' - ], - - **kw - ) + name="MedPy", + version="0.4.0", # major.minor.micro + description="Medical image processing in Python", + author="Oskar Maier", + author_email="oskar.maier@gmail.com", + url="https://github.com/loli/medpy", + license="LICENSE.txt", + keywords="medical image processing dicom itk insight tool kit MRI CT US graph cut max-flow min-cut", + long_description=read("README_PYPI.md"), + long_description_content_type="text/markdown", + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Environment :: Other Environment", + "Intended Audience :: End Users/Desktop", + "Intended Audience :: Developers", + "Intended Audience :: Healthcare Industry", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: GNU General Public License (GPL)", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Operating System :: Unix", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: C++", + "Topic :: Scientific/Engineering :: Medical Science Apps.", + "Topic :: Scientific/Engineering :: Image Recognition", + ], + install_requires=["scipy >= 1.1.0", "numpy >= 1.11.0", "SimpleITK >= 1.1.0"], + packages=PACKAGES + ap, + scripts=[ + "bin/medpy_anisotropic_diffusion.py", + "bin/medpy_apparent_diffusion_coefficient.py", + "bin/medpy_binary_resampling.py", + "bin/medpy_convert.py", + "bin/medpy_create_empty_volume_by_example.py", + "bin/medpy_dicom_slices_to_volume.py", + "bin/medpy_dicom_to_4D.py", + "bin/medpy_diff.py", + "bin/medpy_extract_contour.py", + "bin/medpy_extract_min_max.py", + "bin/medpy_extract_sub_volume_auto.py", + "bin/medpy_extract_sub_volume_by_example.py", + "bin/medpy_extract_sub_volume.py", + "bin/medpy_fit_into_shape.py", + "bin/medpy_gradient.py", + "bin/medpy_graphcut_label_bgreduced.py", + "bin/medpy_graphcut_label_w_regional.py", + "bin/medpy_graphcut_label_wsplit.py", + "bin/medpy_graphcut_label.py", + "bin/medpy_graphcut_voxel.py", + "bin/medpy_grid.py", + "bin/medpy_info.py", + "bin/medpy_intensity_range_standardization.py", + "bin/medpy_intersection.py", + "bin/medpy_join_masks.py", + "bin/medpy_join_xd_to_xplus1d.py", + "bin/medpy_label_count.py", + "bin/medpy_label_fit_to_mask.py", + "bin/medpy_label_superimposition.py", + "bin/medpy_merge.py", + "bin/medpy_morphology.py", + "bin/medpy_resample.py", + "bin/medpy_reslice_3d_to_4d.py", + "bin/medpy_set_pixel_spacing.py", + "bin/medpy_shrink_image.py", + "bin/medpy_split_xd_to_xminus1d.py", + "bin/medpy_stack_sub_volumes.py", + "bin/medpy_swap_dimensions.py", + "bin/medpy_watershed.py", + "bin/medpy_zoom_image.py", + ], + **kw + ) + ### INSTALLATION try: run_setup(not IS_PYPY) except BuildFailed: - BUILD_EXT_WARNING = ("WARNING: The medpy.graphcut.maxflow external C++ package could not be compiled, all graphcut functionality will be disabled. You might be missing Boost.Python or some build essentials like g++.") - print(('*' * 75)) + BUILD_EXT_WARNING = "WARNING: The medpy.graphcut.maxflow external C++ package could not be compiled, all graphcut functionality will be disabled. You might be missing Boost.Python or some build essentials like g++." + print(("*" * 75)) print(BUILD_EXT_WARNING) print("Failure information, if any, is above.") print("I'm retrying the build without the graphcut C++ module now.") - print(('*' * 75)) + print(("*" * 75)) run_setup(False) - print(('*' * 75)) + print(("*" * 75)) print(BUILD_EXT_WARNING) print("Plain-Python installation succeeded.") - print(('*' * 75)) + print(("*" * 75)) diff --git a/tests/__init__.py b/tests/__init__.py index 13df546a..685cc62d 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1 +1 @@ -# Holds the unittests for various classes \ No newline at end of file +# Holds the unittests for various classes diff --git a/tests/features_/__init__.py b/tests/features_/__init__.py index d94cc9a9..380f885d 100644 --- a/tests/features_/__init__.py +++ b/tests/features_/__init__.py @@ -1,3 +1,5 @@ -from .histogram import TestHistogramFeatures -from .intensity import TestIntensityFeatures -from .texture import TestTextureFeatures +from .histogram import TestHistogramFeatures as TestHistogramFeatures +from .intensity import TestIntensityFeatures as TestIntensityFeatures +from .texture import TestTextureFeatures as TestTextureFeatures + +__all__ = ["TestHistogramFeatures", "TestIntensityFeatures", "TestTextureFeatures"] diff --git a/tests/features_/histogram.py b/tests/features_/histogram.py index 89868c38..e1ffea0a 100644 --- a/tests/features_/histogram.py +++ b/tests/features_/histogram.py @@ -8,175 +8,342 @@ """ +import math + # build-in modules import unittest -import math # third-party modules import scipy # own modules -from medpy.features.histogram import fuzzy_histogram, triangular_membership, trapezoid_membership, gaussian_membership, sigmoidal_difference_membership +from medpy.features.histogram import ( + fuzzy_histogram, + gaussian_membership, + sigmoidal_difference_membership, + trapezoid_membership, + triangular_membership, +) + # code class TestHistogramFeatures(unittest.TestCase): - def test_fuzzy_histogram_contribution(self): """Test if all values contribute with nearly one to the created histograms.""" values = scipy.random.randint(0, 100, 1000) - + # test triangular - h, _ = fuzzy_histogram(values, membership='triangular', normed=False, guarantee=True) - self.assertAlmostEqual(sum(h), values.size, msg='Triangular contribution does not equal out. {} != {}.'.format(sum(h), values.size)) - + h, _ = fuzzy_histogram( + values, membership="triangular", normed=False, guarantee=True + ) + self.assertAlmostEqual( + sum(h), + values.size, + msg="Triangular contribution does not equal out. {} != {}.".format( + sum(h), values.size + ), + ) + # test trapezoid - h, _ = fuzzy_histogram(values, membership='trapezoid', normed=False, guarantee=True) - self.assertAlmostEqual(sum(h), values.size, msg='Trapezoid contribution does not equal out. {} != {}.'.format(sum(h), values.size)) - + h, _ = fuzzy_histogram( + values, membership="trapezoid", normed=False, guarantee=True + ) + self.assertAlmostEqual( + sum(h), + values.size, + msg="Trapezoid contribution does not equal out. {} != {}.".format( + sum(h), values.size + ), + ) + # test gaussian - h, _ = fuzzy_histogram(values, membership='gaussian', normed=False, guarantee=True) - self.assertAlmostEqual(sum(h), values.size, msg='Gaussian contribution does not equal out. {} != {}.'.format(sum(h), values.size), delta=values.size * 0.001) # gaussian maximal error eps - + h, _ = fuzzy_histogram( + values, membership="gaussian", normed=False, guarantee=True + ) + self.assertAlmostEqual( + sum(h), + values.size, + msg="Gaussian contribution does not equal out. {} != {}.".format( + sum(h), values.size + ), + delta=values.size * 0.001, + ) # gaussian maximal error eps + # test sigmoid - h, _ = fuzzy_histogram(values, membership='sigmoid', normed=False, guarantee=True) - self.assertAlmostEqual(sum(h), values.size, msg='Sigmoid contribution does not equal out. {} != {}.'.format(sum(h), values.size), delta=values.size * 0.001) # sigmoidal maximal error eps - + h, _ = fuzzy_histogram( + values, membership="sigmoid", normed=False, guarantee=True + ) + self.assertAlmostEqual( + sum(h), + values.size, + msg="Sigmoid contribution does not equal out. {} != {}.".format( + sum(h), values.size + ), + delta=values.size * 0.001, + ) # sigmoidal maximal error eps def test_triangular_membership_contribution(self): """Tests if all values contribute equally using the triangular membership function.""" contribution = 1.0 - + for smoothness in [0.5]: for bin_width in [0.5, 1, 1.5, 10]: mbs = [] - for bin_idx in range(-int(math.ceil(smoothness)), int(math.ceil(smoothness)) + 1): - mbs.append(triangular_membership(bin_idx * bin_width, bin_width, smoothness)) + for bin_idx in range( + -int(math.ceil(smoothness)), int(math.ceil(smoothness)) + 1 + ): + mbs.append( + triangular_membership( + bin_idx * bin_width, bin_width, smoothness + ) + ) value = -0.5 * bin_width for _ in range(1, 11): result = 0 for bin_idx in range(len(mbs)): result += mbs[bin_idx](value) - self.assertAlmostEqual(contribution, result, msg='invalid contribution of {} instead of expected {}'.format(result, contribution)) - value += 1./10 * bin_width - + self.assertAlmostEqual( + contribution, + result, + msg="invalid contribution of {} instead of expected {}".format( + result, contribution + ), + ) + value += 1.0 / 10 * bin_width + def test_trapezoid_membership_contribution(self): """Tests if all values contribute equally using the trapezoid membership function.""" contribution = 1.0 - + for smoothness in [0.1, 0.2, 0.3, 0.4, 0.49]: for bin_width in [0.5, 1, 1.5, 10]: mbs = [] - for bin_idx in range(-int(math.ceil(smoothness)), int(math.ceil(smoothness)) + 1): - mbs.append(trapezoid_membership(bin_idx * bin_width, bin_width, smoothness)) + for bin_idx in range( + -int(math.ceil(smoothness)), int(math.ceil(smoothness)) + 1 + ): + mbs.append( + trapezoid_membership(bin_idx * bin_width, bin_width, smoothness) + ) value = -0.5 * bin_width for _ in range(1, 11): result = 0 for bin_idx in range(len(mbs)): result += mbs[bin_idx](value) - self.assertAlmostEqual(contribution, result, msg='invalid contribution of {} instead of expected {}'.format(result, contribution)) - value += 1./10 * bin_width - + self.assertAlmostEqual( + contribution, + result, + msg="invalid contribution of {} instead of expected {}".format( + result, contribution + ), + ) + value += 1.0 / 10 * bin_width + def test_gaussian_membership_contribution(self): """Tests if all values contribute equally using the gaussian membership function.""" contribution = 1.0 - eps = 0.001 # maximal error per value - - for smoothness in [0.1, 0.2, 0.3, 0.4, 0.5, 1, 2, 2.51, 3, 4, 5, 6, 7, 7.49, 8, 9, 10]: + eps = 0.001 # maximal error per value + + for smoothness in [ + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 1, + 2, + 2.51, + 3, + 4, + 5, + 6, + 7, + 7.49, + 8, + 9, + 10, + ]: for bin_width in [0.5, 1, 1.5, 10]: mbs = [] - for bin_idx in range(-int(math.ceil(smoothness)), int(math.ceil(smoothness)) + 1): - mbs.append(gaussian_membership(bin_idx * bin_width, bin_width, smoothness)) + for bin_idx in range( + -int(math.ceil(smoothness)), int(math.ceil(smoothness)) + 1 + ): + mbs.append( + gaussian_membership(bin_idx * bin_width, bin_width, smoothness) + ) value = -0.5 * bin_width for _ in range(1, 11): result = 0 for bin_idx in range(len(mbs)): result += mbs[bin_idx](value) - self.assertAlmostEqual(contribution, result, delta=eps, msg='invalid contribution of {} instead of expected {}'.format(result, contribution)) - value += 1./10 * bin_width - + self.assertAlmostEqual( + contribution, + result, + delta=eps, + msg="invalid contribution of {} instead of expected {}".format( + result, contribution + ), + ) + value += 1.0 / 10 * bin_width + def test_sigmoidal_difference_membership_contribution(self): """Tests if all values contribute equally using the gaussian membership function.""" contribution = 1.0 - eps = 0.001 # maximal error per value - - for smoothness in [0.1, 0.2, 0.3, 0.4, 0.5, 1, 2, 2.51, 3, 4, 5, 6, 7, 7.49, 8, 9, 10]: + eps = 0.001 # maximal error per value + + for smoothness in [ + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 1, + 2, + 2.51, + 3, + 4, + 5, + 6, + 7, + 7.49, + 8, + 9, + 10, + ]: for bin_width in [0.5, 1, 1.5, 10]: mbs = [] - for bin_idx in range(-int(math.ceil(smoothness)), int(math.ceil(smoothness)) + 1): - mbs.append(sigmoidal_difference_membership(bin_idx * bin_width, bin_width, smoothness)) + for bin_idx in range( + -int(math.ceil(smoothness)), int(math.ceil(smoothness)) + 1 + ): + mbs.append( + sigmoidal_difference_membership( + bin_idx * bin_width, bin_width, smoothness + ) + ) value = -0.5 * bin_width for _ in range(1, 11): result = 0 for bin_idx in range(len(mbs)): result += mbs[bin_idx](value) - self.assertAlmostEqual(contribution, result, delta=eps, msg='invalid contribution of {} instead of expected {}'.format(result, contribution)) - value += 1./10 * bin_width + self.assertAlmostEqual( + contribution, + result, + delta=eps, + msg="invalid contribution of {} instead of expected {}".format( + result, contribution + ), + ) + value += 1.0 / 10 * bin_width def test_fuzzy_histogram_std_behaviour(self): """Test the standard behaviour of fuzzy histogram.""" values = scipy.random.randint(0, 10, 100) - + _, b = fuzzy_histogram(values, bins=12) - self.assertEqual(len(b), 13, 'violation of requested histogram size.') - self.assertEqual(b[0], values.min(), 'invalid lower histogram border.') - self.assertEqual(b[-1], values.max(), 'invalid upper histogram border.') - + self.assertEqual(len(b), 13, "violation of requested histogram size.") + self.assertEqual(b[0], values.min(), "invalid lower histogram border.") + self.assertEqual(b[-1], values.max(), "invalid upper histogram border.") + h, _ = fuzzy_histogram(values, normed=True) - self.assertAlmostEqual(sum(h), 1.0, msg='histogram not normed.') - + self.assertAlmostEqual(sum(h), 1.0, msg="histogram not normed.") + _, b = fuzzy_histogram(values, bins=12, guarantee=True) - self.assertEqual(len(b), 13, 'violation of requested histogram size with guarantee set to True.') - + self.assertEqual( + len(b), + 13, + "violation of requested histogram size with guarantee set to True.", + ) + _, b = fuzzy_histogram(values, range=(-5, 5)) - self.assertEqual(b[0], -5.0, 'violation of requested ranges lower bound.') - self.assertEqual(b[-1], 5.0, 'violation of requested ranges lower bound.') + self.assertEqual(b[0], -5.0, "violation of requested ranges lower bound.") + self.assertEqual(b[-1], 5.0, "violation of requested ranges lower bound.") def test_fuzzy_histogram_parameters(self): values = scipy.random.randint(0, 10, 100) - + # membership functions - fuzzy_histogram(values, membership='triangular') - fuzzy_histogram(values, membership='trapezoid') - fuzzy_histogram(values, membership='gaussian') - fuzzy_histogram(values, membership='sigmoid') - + fuzzy_histogram(values, membership="triangular") + fuzzy_histogram(values, membership="trapezoid") + fuzzy_histogram(values, membership="gaussian") + fuzzy_histogram(values, membership="sigmoid") + # int/float - fuzzy_histogram(values, range=(0,10)) # int in range - fuzzy_histogram(values, range=(0.,10.)) # float in range - fuzzy_histogram(values, bins=10) # int in bins - fuzzy_histogram(values, membership='sigmoid', smoothness=1) # int in smoothness - fuzzy_histogram(values, membership='sigmoid', smoothness=1.) # float in smoothness + fuzzy_histogram(values, range=(0, 10)) # int in range + fuzzy_histogram(values, range=(0.0, 10.0)) # float in range + fuzzy_histogram(values, bins=10) # int in bins + fuzzy_histogram(values, membership="sigmoid", smoothness=1) # int in smoothness + fuzzy_histogram( + values, membership="sigmoid", smoothness=1.0 + ) # float in smoothness def test_fuzzy_histogram_exceptions(self): values = scipy.random.randint(0, 10, 100) - + # test fuzzy histogram exceptions - self.assertRaises(AttributeError, fuzzy_histogram, values, range=(0,0)) - self.assertRaises(AttributeError, fuzzy_histogram, values, range=(0,-1)) + self.assertRaises(AttributeError, fuzzy_histogram, values, range=(0, 0)) + self.assertRaises(AttributeError, fuzzy_histogram, values, range=(0, -1)) self.assertRaises(AttributeError, fuzzy_histogram, values, bins=0) self.assertRaises(AttributeError, fuzzy_histogram, values, bins=-1) self.assertRaises(AttributeError, fuzzy_histogram, values, bins=0.5) - self.assertRaises(AttributeError, fuzzy_histogram, values, membership='') - self.assertRaises(AttributeError, fuzzy_histogram, values, membership='x') + self.assertRaises(AttributeError, fuzzy_histogram, values, membership="") + self.assertRaises(AttributeError, fuzzy_histogram, values, membership="x") self.assertRaises(AttributeError, fuzzy_histogram, values, membership=True) self.assertRaises(AttributeError, fuzzy_histogram, values, membership=None) self.assertRaises(AttributeError, fuzzy_histogram, values, smoothness=-1.0) self.assertRaises(AttributeError, fuzzy_histogram, values, smoothness=-1) self.assertRaises(AttributeError, fuzzy_histogram, values, smoothness=-1.0) self.assertRaises(AttributeError, fuzzy_histogram, values, smoothness=-1) - + # test triangular and trapezium exceptions - self.assertRaises(AttributeError, fuzzy_histogram, values, membership='triangular', smoothness=0.51) - self.assertRaises(AttributeError, fuzzy_histogram, values, membership='trapezoid', smoothness=0.51) - self.assertRaises(AttributeError, fuzzy_histogram, values, membership='trapezoid', smoothness=0.09) - + self.assertRaises( + AttributeError, + fuzzy_histogram, + values, + membership="triangular", + smoothness=0.51, + ) + self.assertRaises( + AttributeError, + fuzzy_histogram, + values, + membership="trapezoid", + smoothness=0.51, + ) + self.assertRaises( + AttributeError, + fuzzy_histogram, + values, + membership="trapezoid", + smoothness=0.09, + ) + # test gaussian exceptions - self.assertRaises(AttributeError, fuzzy_histogram, values, membership='gaussian', smoothness=1./11) - self.assertRaises(AttributeError, fuzzy_histogram, values, membership='gaussian', smoothness=11) - + self.assertRaises( + AttributeError, + fuzzy_histogram, + values, + membership="gaussian", + smoothness=1.0 / 11, + ) + self.assertRaises( + AttributeError, + fuzzy_histogram, + values, + membership="gaussian", + smoothness=11, + ) + # test sigmoidal exceptions - self.assertRaises(AttributeError, fuzzy_histogram, values, membership='sigmoid', smoothness=1./11) - self.assertRaises(AttributeError, fuzzy_histogram, values, membership='sigmoid', smoothness=11) - -if __name__ == '__main__': - unittest.main() \ No newline at end of file + self.assertRaises( + AttributeError, + fuzzy_histogram, + values, + membership="sigmoid", + smoothness=1.0 / 11, + ) + self.assertRaises( + AttributeError, fuzzy_histogram, values, membership="sigmoid", smoothness=11 + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/features_/intensity.py b/tests/features_/intensity.py index b63b3024..d3b5ae3a 100644 --- a/tests/features_/intensity.py +++ b/tests/features_/intensity.py @@ -8,380 +8,559 @@ """ +import math + # build-in modules import unittest -import math # third-party modules import numpy -# own modules -from medpy.features.intensity import intensities, centerdistance,\ - centerdistance_xdminus1, indices, local_mean_gauss, local_histogram -from medpy.features.utilities import join, append from medpy.core.exceptions import ArgumentError +# own modules +from medpy.features.intensity import ( + centerdistance, + centerdistance_xdminus1, + indices, + intensities, + local_histogram, + local_mean_gauss, +) +from medpy.features.utilities import append, join + + # code class TestIntensityFeatures(unittest.TestCase): - def test_local_histogram(self): """Test the feature: local_histogram.""" - - i = numpy.asarray([[0, 1, 1, 1], - [0, 1, 0, 1], - [0, 0, 0, 1], - [0, 0, 0, 1]]) - e = numpy.asarray([[ 0.5 , 0.5 ], - [ 0.5 , 0.5 ], - [ 0.16666667, 0.83333333], - [ 0.25 , 0.75 ], - [ 0.66666667, 0.33333333], - [ 0.66666667, 0.33333333], - [ 0.33333333, 0.66666667], - [ 0.33333333, 0.66666667], - [ 0.83333333, 0.16666667], - [ 0.88888889, 0.11111111], - [ 0.55555556, 0.44444444], - [ 0.5 , 0.5 ], - [ 1. , 0. ], - [ 1. , 0. ], - [ 0.66666667, 0.33333333], - [ 0.5 , 0.5 ]]) + + i = numpy.asarray([[0, 1, 1, 1], [0, 1, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1]]) + e = numpy.asarray( + [ + [0.5, 0.5], + [0.5, 0.5], + [0.16666667, 0.83333333], + [0.25, 0.75], + [0.66666667, 0.33333333], + [0.66666667, 0.33333333], + [0.33333333, 0.66666667], + [0.33333333, 0.66666667], + [0.83333333, 0.16666667], + [0.88888889, 0.11111111], + [0.55555556, 0.44444444], + [0.5, 0.5], + [1.0, 0.0], + [1.0, 0.0], + [0.66666667, 0.33333333], + [0.5, 0.5], + ] + ) r = local_histogram(i, bins=2, size=3) - numpy.testing.assert_allclose(r, e, err_msg = 'local histogram: 2D image range failed') - - m = [[False, False, False], - [False, True, False], - [False, False, False]] + numpy.testing.assert_allclose( + r, e, err_msg="local histogram: 2D image range failed" + ) + + m = [[False, False, False], [False, True, False], [False, False, False]] e = e[:9][numpy.asarray(m).flatten()] r = local_histogram(i, bins=2, size=3, rang=(0, 1), mask=m) - self.assertEqual(len(r), 1, 'local histogram: 2D local range masked failed') - numpy.testing.assert_allclose(r, e, err_msg = 'local histogram: 2D local range masked failed') - - i = numpy.asarray([[0, 1, 1, 1], - [0, 1, 0, 1], - [0, 0, 0, 1], - [1, 0, 0, 1]]) + self.assertEqual(len(r), 1, "local histogram: 2D local range masked failed") + numpy.testing.assert_allclose( + r, e, err_msg="local histogram: 2D local range masked failed" + ) + + i = numpy.asarray([[0, 1, 1, 1], [0, 1, 0, 1], [0, 0, 0, 1], [1, 0, 0, 1]]) e = numpy.asarray([(0, 1)] * 16) - r = local_histogram(i, size = 3, bins = 2, rang = (0.1, 1)) - numpy.testing.assert_allclose(r, e, err_msg = 'local histogram: 2D fixed range with excluded elements failed') - + r = local_histogram(i, size=3, bins=2, rang=(0.1, 1)) + numpy.testing.assert_allclose( + r, + e, + err_msg="local histogram: 2D fixed range with excluded elements failed", + ) + e = numpy.asarray([(0, 1)] * 16) - r = local_histogram(i, size = 3, bins = 2, cutoffp = (50, 100)) - numpy.testing.assert_allclose(r, e, err_msg = 'local histogram: 2D rang over complete image \w cutoffp failed') - - i = numpy.asarray([[1, 1, 1], - [1, 1, 1], - [1, 1, 1]]) + r = local_histogram(i, size=3, bins=2, cutoffp=(50, 100)) + numpy.testing.assert_allclose( + r, + e, + err_msg="local histogram: 2D rang over complete image \w cutoffp failed", + ) + + i = numpy.asarray([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) i = numpy.asarray([i, i, i]) e = numpy.asarray([(0, 1)] * (9 * 3)) - r = local_histogram(i, size = 3, bins = 2, rang=(0,1)) - numpy.testing.assert_allclose(r, e, err_msg = 'local histogram: 3D local range failed') - + r = local_histogram(i, size=3, bins=2, rang=(0, 1)) + numpy.testing.assert_allclose( + r, e, err_msg="local histogram: 3D local range failed" + ) + i = numpy.asarray([i, i, i]) e = numpy.asarray([(0, 1)] * (9 * 3 * 3)) - r = local_histogram(i, size = 3, bins = 2, rang=(0,1)) - numpy.testing.assert_allclose(r, e, err_msg = 'local histogram: 4D local range failed') - - + r = local_histogram(i, size=3, bins=2, rang=(0, 1)) + numpy.testing.assert_allclose( + r, e, err_msg="local histogram: 4D local range failed" + ) + def test_local_mean_gauss(self): """Test the feature: local_mean_gauss.""" # 2D to zero case - i = numpy.asarray([[0, 1, 2], - [1, 2, 3], - [2, 3, 4]]) - e = [0, 1, 1,\ - 1, 2, 2,\ - 1, 2, 2] + i = numpy.asarray([[0, 1, 2], [1, 2, 3], [2, 3, 4]]) + e = [0, 1, 1, 1, 2, 2, 1, 2, 2] r = local_mean_gauss(i, 1) - numpy.testing.assert_allclose(r, e, err_msg = 'local mean gauss: 2D failed') + numpy.testing.assert_allclose(r, e, err_msg="local mean gauss: 2D failed") # 2D to zero case - i = numpy.asarray([[0, 1], - [1, 0]]) - e = [0, 0,\ - 0, 0] + i = numpy.asarray([[0, 1], [1, 0]]) + e = [0, 0, 0, 0] r = local_mean_gauss(i, 1) - numpy.testing.assert_allclose(r, e, err_msg = 'local mean gauss: 2D to zero failed') - - # 2D zero case - i = numpy.asarray([[0, 0], - [0, 0]]) + numpy.testing.assert_allclose( + r, e, err_msg="local mean gauss: 2D to zero failed" + ) + + # 2D zero case + i = numpy.asarray([[0, 0], [0, 0]]) r = local_mean_gauss(i, 1) - numpy.testing.assert_allclose(r, e, err_msg = 'local mean gauss: 2D zero case failed') - + numpy.testing.assert_allclose( + r, e, err_msg="local mean gauss: 2D zero case failed" + ) + # 2D different axes - i = numpy.asarray([[0, 0, 0, 1], - [0, 0, 1, 2], - [0, 1, 2, 3], - [1, 2, 3, 4]]) - e = [0, 0, 0, 0,\ - 0, 0, 1, 1,\ - 0, 0, 1, 1,\ - 0, 1, 1, 2] + i = numpy.asarray([[0, 0, 0, 1], [0, 0, 1, 2], [0, 1, 2, 3], [1, 2, 3, 4]]) + e = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 2] r = local_mean_gauss(i, (1, 0.5)) - numpy.testing.assert_allclose(r, e, err_msg = 'local mean gauss: 2D different axes failed') - + numpy.testing.assert_allclose( + r, e, err_msg="local mean gauss: 2D different axes failed" + ) + # 2D voxelspacing - r = local_mean_gauss(i, 1, voxelspacing = [1., 2.]) - numpy.testing.assert_allclose(r, e, err_msg = 'local mean gauss: 2D voxelspacing failed') + r = local_mean_gauss(i, 1, voxelspacing=[1.0, 2.0]) + numpy.testing.assert_allclose( + r, e, err_msg="local mean gauss: 2D voxelspacing failed" + ) # 3D with 2D kernel i = numpy.asarray([i, i]) e = numpy.asarray([e, e]).ravel() r = local_mean_gauss(i, (0, 1, 0.5)) - numpy.testing.assert_allclose(r, e, err_msg = 'local mean gauss: 3D with 2D kernel failed') - + numpy.testing.assert_allclose( + r, e, err_msg="local mean gauss: 3D with 2D kernel failed" + ) + # 3D - e = numpy.asarray([[[0, 0, 0, 1], - [0, 0, 0, 1], - [0, 0, 0, 1], - [0, 0, 1, 1]], - [[0, 0, 0, 1], - [0, 0, 0, 1], - [0, 0, 0, 1], - [0, 0, 1, 1]]]).ravel() + e = numpy.asarray( + [ + [[0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 1, 1]], + [[0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 1, 1]], + ] + ).ravel() r = local_mean_gauss(i, 2) - numpy.testing.assert_allclose(r, e, err_msg = 'local mean gauss: 3D failed') - + numpy.testing.assert_allclose(r, e, err_msg="local mean gauss: 3D failed") + # 4D i = numpy.asarray([i, i]) - e = [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0,\ - 1, 0, 0, 0, 1, 0, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1,\ - 1, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 2] + e = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 1, + 0, + 1, + 1, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 1, + 0, + 1, + 1, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 1, + 0, + 1, + 1, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 1, + 0, + 1, + 1, + 2, + ] r = local_mean_gauss(i, 1) - numpy.testing.assert_allclose(r, e, err_msg = 'local mean gauss: 4D failed') - + numpy.testing.assert_allclose(r, e, err_msg="local mean gauss: 4D failed") + def test_indices(self): """Test the feature: indices.""" - + # 2D - i = numpy.asarray([[0, 0], - [0, 0]]) - e = [[0,0], [0, 1], \ - [1, 0], [1, 1]] + i = numpy.asarray([[0, 0], [0, 0]]) + e = [[0, 0], [0, 1], [1, 0], [1, 1]] r = indices(i) - numpy.testing.assert_allclose(r, e, err_msg = 'indices: 2D failed') - + numpy.testing.assert_allclose(r, e, err_msg="indices: 2D failed") + # 2D multi-spectral r = indices([i, i]) - numpy.testing.assert_allclose(r, e, err_msg = 'indices: 2D multi-spectral failed') - + numpy.testing.assert_allclose(r, e, err_msg="indices: 2D multi-spectral failed") + # 2D with voxelspacing - r = indices(i, voxelspacing = (1, 2.5)) - e = [[0,0], [0, 2.5], \ - [1, 0], [1, 2.5]] - numpy.testing.assert_allclose(r, e, err_msg = 'indices: 2D \w voxelspacing failed') - + r = indices(i, voxelspacing=(1, 2.5)) + e = [[0, 0], [0, 2.5], [1, 0], [1, 2.5]] + numpy.testing.assert_allclose( + r, e, err_msg="indices: 2D \w voxelspacing failed" + ) + # 2D with mask - m = [[True, False], - [True, False]] - e = [[0,0], [1, 0]] - r = indices(i, mask = m) - numpy.testing.assert_allclose(r, e, err_msg = 'indices: 2D masked failed') - + m = [[True, False], [True, False]] + e = [[0, 0], [1, 0]] + r = indices(i, mask=m) + numpy.testing.assert_allclose(r, e, err_msg="indices: 2D masked failed") + # 3D - i = numpy.asarray([[0, 0], - [0, 0]]) + i = numpy.asarray([[0, 0], [0, 0]]) i = numpy.asarray([i, i]) - e = [[0,0,0], [0,0,1], [0,1,0], [0,1,1], - [1,0,0], [1,0,1], [1,1,0], [1,1,1]] + e = [ + [0, 0, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 1], + [1, 0, 0], + [1, 0, 1], + [1, 1, 0], + [1, 1, 1], + ] r = indices(i) - numpy.testing.assert_allclose(r, e, err_msg = 'indices: 3D failed') - + numpy.testing.assert_allclose(r, e, err_msg="indices: 3D failed") + # 4D i = numpy.asarray([i, i]) - e = [[0,0,0,0], [0,0,0,1], [0,0,1,0], [0,0,1,1], - [0,1,0,0], [0,1,0,1], [0,1,1,0], [0,1,1,1], - [1,0,0,0], [1,0,0,1], [1,0,1,0], [1,0,1,1], - [1,1,0,0], [1,1,0,1], [1,1,1,0], [1,1,1,1]] + e = [ + [0, 0, 0, 0], + [0, 0, 0, 1], + [0, 0, 1, 0], + [0, 0, 1, 1], + [0, 1, 0, 0], + [0, 1, 0, 1], + [0, 1, 1, 0], + [0, 1, 1, 1], + [1, 0, 0, 0], + [1, 0, 0, 1], + [1, 0, 1, 0], + [1, 0, 1, 1], + [1, 1, 0, 0], + [1, 1, 0, 1], + [1, 1, 1, 0], + [1, 1, 1, 1], + ] r = indices(i) - numpy.testing.assert_allclose(r, e, err_msg = 'indices: 4D failed') - + numpy.testing.assert_allclose(r, e, err_msg="indices: 4D failed") + def test_centerdistance_xdminus1(self): """Test the feature: centerdistance_xdminus1.""" - + # 2D with dim (invalid) - i = numpy.asarray([[0, 0], - [0, 0]]) + i = numpy.asarray([[0, 0], [0, 0]]) self.assertRaises(ArgumentError, centerdistance_xdminus1, i, 0) - + # 3D with invalid dims (invalid) - i = numpy.asarray([[0, 0, 0], - [0, 0, 0], - [0, 0, 0]]) + i = numpy.asarray([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) i = numpy.asarray([i, i, i]) self.assertRaises(ArgumentError, centerdistance_xdminus1, i, (0, 1)) - + # 3D with invalid dim self.assertRaises(ArgumentError, centerdistance_xdminus1, i, 3) - + # 3D with valid dim 0 - e = [math.sqrt(2), 1, math.sqrt(2),\ - 1, 0, 1,\ - math.sqrt(2), 1, math.sqrt(2)] + e = [math.sqrt(2), 1, math.sqrt(2), 1, 0, 1, math.sqrt(2), 1, math.sqrt(2)] e = numpy.asarray([e, e, e]).ravel() r = centerdistance_xdminus1(i, 0) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance_xdminus1: 3D, dim = 0 failed') - + numpy.testing.assert_allclose( + r, e, err_msg="centerdistance_xdminus1: 3D, dim = 0 failed" + ) + # 3D multi-spectral r = centerdistance_xdminus1([i, i], 0) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance_xdminus1: 3D, multi-spectral failed') - + numpy.testing.assert_allclose( + r, e, err_msg="centerdistance_xdminus1: 3D, multi-spectral failed" + ) + # 3D masked - m = [[True, False, False], - [False, True, False], - [False, False, True]] + m = [[True, False, False], [False, True, False], [False, False, True]] e = [math.sqrt(2), 0, math.sqrt(2)] e = numpy.asarray([e, e, e]).ravel() - r = centerdistance_xdminus1(i, 0, mask = [m, m, m]) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance_xdminus1: 3D, masked failed') - + r = centerdistance_xdminus1(i, 0, mask=[m, m, m]) + numpy.testing.assert_allclose( + r, e, err_msg="centerdistance_xdminus1: 3D, masked failed" + ) + # 3D with valid dim 0, uneven image - i = numpy.asarray([[[0, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0]], - [[0, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0]]]) - e = [math.sqrt(3.25), math.sqrt(1.25), math.sqrt(1.25), math.sqrt(3.25), \ - math.sqrt(2.25), math.sqrt(0.25), math.sqrt(0.25), math.sqrt(2.25), \ - math.sqrt(3.25), math.sqrt(1.25), math.sqrt(1.25), math.sqrt(3.25)] + i = numpy.asarray( + [ + [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + ] + ) + e = [ + math.sqrt(3.25), + math.sqrt(1.25), + math.sqrt(1.25), + math.sqrt(3.25), + math.sqrt(2.25), + math.sqrt(0.25), + math.sqrt(0.25), + math.sqrt(2.25), + math.sqrt(3.25), + math.sqrt(1.25), + math.sqrt(1.25), + math.sqrt(3.25), + ] e = numpy.asarray([e, e]).ravel() r = centerdistance_xdminus1(i, 0) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance_xdminus1: uneven 3D, dim = 0 failed') - + numpy.testing.assert_allclose( + r, e, err_msg="centerdistance_xdminus1: uneven 3D, dim = 0 failed" + ) + # 3D with valid dim 1, uneven image - e = [[math.sqrt(2.5), math.sqrt(0.5), math.sqrt(0.5), math.sqrt(2.5)], - [math.sqrt(2.5), math.sqrt(0.5), math.sqrt(0.5), math.sqrt(2.5)]] + e = [ + [math.sqrt(2.5), math.sqrt(0.5), math.sqrt(0.5), math.sqrt(2.5)], + [math.sqrt(2.5), math.sqrt(0.5), math.sqrt(0.5), math.sqrt(2.5)], + ] e = numpy.asarray([e, e, e]) e = numpy.rollaxis(e, 0, 2).ravel() r = centerdistance_xdminus1(i, 1) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance_xdminus1: uneven 3D, dim = 1 failed') - + numpy.testing.assert_allclose( + r, e, err_msg="centerdistance_xdminus1: uneven 3D, dim = 1 failed" + ) + # 3D with valid dim 2, uneven image - e = [[math.sqrt(1.25), math.sqrt(0.25), math.sqrt(1.25)], - [math.sqrt(1.25), math.sqrt(0.25), math.sqrt(1.25)]] + e = [ + [math.sqrt(1.25), math.sqrt(0.25), math.sqrt(1.25)], + [math.sqrt(1.25), math.sqrt(0.25), math.sqrt(1.25)], + ] e = numpy.asarray([e, e, e, e]) e = numpy.rollaxis(e, 0, 3).ravel() r = centerdistance_xdminus1(i, 2) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance_xdminus1: uneven 3D, dim = 2 failed') - + numpy.testing.assert_allclose( + r, e, err_msg="centerdistance_xdminus1: uneven 3D, dim = 2 failed" + ) + # 4D with valid dims 1, 3 - i = numpy.asarray([[0, 0, 0], - [0, 0, 0], - [0, 0, 0]]) + i = numpy.asarray([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) i = numpy.asarray([i, i, i]) i = numpy.asarray([i, i, i]) - e = [[math.sqrt(2), 1, math.sqrt(2)], - [1, 0, 1], - [math.sqrt(2), 1, math.sqrt(2)]] + e = [ + [math.sqrt(2), 1, math.sqrt(2)], + [1, 0, 1], + [math.sqrt(2), 1, math.sqrt(2)], + ] e = numpy.asarray([e] * 3) e = numpy.rollaxis(e, 0, 2) e = numpy.asarray([e] * 3) e = numpy.rollaxis(e, 0, 4).ravel() r = centerdistance_xdminus1(i, (1, 3)) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance_xdminus1: 4D, dim = (1, 3) failed') - + numpy.testing.assert_allclose( + r, e, err_msg="centerdistance_xdminus1: 4D, dim = (1, 3) failed" + ) + def test_centerdistance(self): """Test the feature: centerdistance.""" - - i = numpy.asarray([[0, 0], - [0, 0]]) - e = [math.sqrt(0.5), math.sqrt(0.5),\ - math.sqrt(0.5), math.sqrt(0.5)] + + i = numpy.asarray([[0, 0], [0, 0]]) + e = [math.sqrt(0.5), math.sqrt(0.5), math.sqrt(0.5), math.sqrt(0.5)] r = centerdistance(i) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance: 2D, single-spectrum, 2x2, unmasked and not normalized') - + numpy.testing.assert_allclose( + r, + e, + err_msg="centerdistance: 2D, single-spectrum, 2x2, unmasked and not normalized", + ) + r = centerdistance([i, i]) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance: 2D, multi-spectrum, 2x2, unmasked and not normalized') - - i = numpy.asarray([[1, 0.], - [2, 3.]]) + numpy.testing.assert_allclose( + r, + e, + err_msg="centerdistance: 2D, multi-spectrum, 2x2, unmasked and not normalized", + ) + + i = numpy.asarray([[1, 0.0], [2, 3.0]]) r = centerdistance(i) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance: 2D, single-spectrum, 2x2, unmasked and not normalized: feature not independent of image content') - - i = numpy.asarray([[0, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - e = [math.sqrt(2), 1, math.sqrt(2),\ - 1, 0, 1,\ - math.sqrt(2), 1, math.sqrt(2)] + numpy.testing.assert_allclose( + r, + e, + err_msg="centerdistance: 2D, single-spectrum, 2x2, unmasked and not normalized: feature not independent of image content", + ) + + i = numpy.asarray([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) + e = [math.sqrt(2), 1, math.sqrt(2), 1, 0, 1, math.sqrt(2), 1, math.sqrt(2)] r = centerdistance(i) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance: 2D, single-spectrum, 3x3, unmasked and not normalized') - - m = [[True, False, False], - [False, True, False], - [False, False, True]] + numpy.testing.assert_allclose( + r, + e, + err_msg="centerdistance: 2D, single-spectrum, 3x3, unmasked and not normalized", + ) + + m = [[True, False, False], [False, True, False], [False, False, True]] e = [math.sqrt(2), 0, math.sqrt(2)] - r = centerdistance(i, mask = m) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance: 2D, single-spectrum, 2x2, masked and not normalized') - - e = [math.sqrt(1.25), 1, math.sqrt(1.25),\ - math.sqrt(0.25), 0, math.sqrt(0.25),\ - math.sqrt(1.25), 1, math.sqrt(1.25)] - s = [1., 0.5] - r = centerdistance(i, voxelspacing = s) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance: 2D, single-spectrum, 3x3, unmasked and not normalized: voxel spacing not taken into account') - + r = centerdistance(i, mask=m) + numpy.testing.assert_allclose( + r, + e, + err_msg="centerdistance: 2D, single-spectrum, 2x2, masked and not normalized", + ) + + e = [ + math.sqrt(1.25), + 1, + math.sqrt(1.25), + math.sqrt(0.25), + 0, + math.sqrt(0.25), + math.sqrt(1.25), + 1, + math.sqrt(1.25), + ] + s = [1.0, 0.5] + r = centerdistance(i, voxelspacing=s) + numpy.testing.assert_allclose( + r, + e, + err_msg="centerdistance: 2D, single-spectrum, 3x3, unmasked and not normalized: voxel spacing not taken into account", + ) + i = numpy.asarray([i, i, i]) - e = [math.sqrt(2), 1, math.sqrt(2),\ - 1, 0, 1,\ - math.sqrt(2), 1, math.sqrt(2)] - en1 = [math.sqrt(3), math.sqrt(2), math.sqrt(3),\ - math.sqrt(2), 1, math.sqrt(2),\ - math.sqrt(3), math.sqrt(2), math.sqrt(3)] + e = [math.sqrt(2), 1, math.sqrt(2), 1, 0, 1, math.sqrt(2), 1, math.sqrt(2)] + en1 = [ + math.sqrt(3), + math.sqrt(2), + math.sqrt(3), + math.sqrt(2), + 1, + math.sqrt(2), + math.sqrt(3), + math.sqrt(2), + math.sqrt(3), + ] e = numpy.asarray([en1, e, en1]).ravel() r = centerdistance(i) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance: 3D, single-spectrum, 3x3x3, unmasked and not normalized') - + numpy.testing.assert_allclose( + r, + e, + err_msg="centerdistance: 3D, single-spectrum, 3x3x3, unmasked and not normalized", + ) + i = numpy.asarray([i, i, i]) - en2 = [math.sqrt(4), math.sqrt(3), math.sqrt(4),\ - math.sqrt(3), math.sqrt(2), math.sqrt(3),\ - math.sqrt(4), math.sqrt(3), math.sqrt(4)] - e = numpy.asarray([numpy.asarray([en2, en1, en2]).ravel(), e, numpy.asarray([en2, en1, en2]).ravel()]).ravel() + en2 = [ + math.sqrt(4), + math.sqrt(3), + math.sqrt(4), + math.sqrt(3), + math.sqrt(2), + math.sqrt(3), + math.sqrt(4), + math.sqrt(3), + math.sqrt(4), + ] + e = numpy.asarray( + [ + numpy.asarray([en2, en1, en2]).ravel(), + e, + numpy.asarray([en2, en1, en2]).ravel(), + ] + ).ravel() r = centerdistance(i) - numpy.testing.assert_allclose(r, e, err_msg = 'centerdistance: 4D, single-spectrum, 3x3x3x3, unmasked and not normalized') - - + numpy.testing.assert_allclose( + r, + e, + err_msg="centerdistance: 4D, single-spectrum, 3x3x3x3, unmasked and not normalized", + ) + def test_intensities(self): """Test the feature: image intensity.""" - + # Test 2D image with various settings - i = numpy.asarray([[-1., 1, 2], - [ 0., 2, 4], - [ 1., 3, 5]]) - m = [[True, False, False], - [False, True, False], - [True, True, False]] - e = [-1., 1, 2, 0, 2, 4, 1, 3, 5] - em = [-1., 2., 1., 3.] - - r = intensities(i) # normalize = False, mask = slice(None) - numpy.testing.assert_allclose(r, e, err_msg = 'intensities: 2D, single-spectrum, unmasked and not normalized') - - r = intensities(i, mask = m) # normalize = False - numpy.testing.assert_allclose(r, em, err_msg = 'intensities: 2D, single-spectrum, masked and not normalized') - - r = intensities([i, i]) # normalize = False, mask = slice(None) - numpy.testing.assert_allclose(r, join(e, e), err_msg = 'intensities: 2D, multi-spectrum, unmasked and not normalized') - + i = numpy.asarray([[-1.0, 1, 2], [0.0, 2, 4], [1.0, 3, 5]]) + m = [[True, False, False], [False, True, False], [True, True, False]] + e = [-1.0, 1, 2, 0, 2, 4, 1, 3, 5] + em = [-1.0, 2.0, 1.0, 3.0] + + r = intensities(i) # normalize = False, mask = slice(None) + numpy.testing.assert_allclose( + r, + e, + err_msg="intensities: 2D, single-spectrum, unmasked and not normalized", + ) + + r = intensities(i, mask=m) # normalize = False + numpy.testing.assert_allclose( + r, em, err_msg="intensities: 2D, single-spectrum, masked and not normalized" + ) + + r = intensities([i, i]) # normalize = False, mask = slice(None) + numpy.testing.assert_allclose( + r, + join(e, e), + err_msg="intensities: 2D, multi-spectrum, unmasked and not normalized", + ) + # Test 3D image i = numpy.asarray([i, i + 0.5]) e = append(e, numpy.asarray(e) + 0.5) - - r = intensities(i) # normalize = False, mask = slice(None) - numpy.testing.assert_allclose(r, e, err_msg = 'intensities: 3D, single-spectrum, unmasked and not normalized') - + + r = intensities(i) # normalize = False, mask = slice(None) + numpy.testing.assert_allclose( + r, + e, + err_msg="intensities: 3D, single-spectrum, unmasked and not normalized", + ) + # Test 4D image i = numpy.asarray([i, i + 0.5]) e = append(e, numpy.asarray(e) + 0.5) - - r = intensities(i) # normalize = False, mask = slice(None) - numpy.testing.assert_allclose(r, e, err_msg = 'intensities: 4D, single-spectrum, unmasked and not normalized') - - - -if __name__ == '__main__': - unittest.main() \ No newline at end of file + + r = intensities(i) # normalize = False, mask = slice(None) + numpy.testing.assert_allclose( + r, + e, + err_msg="intensities: 4D, single-spectrum, unmasked and not normalized", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/features_/texture.py b/tests/features_/texture.py index 86af1fe0..11b08914 100644 --- a/tests/features_/texture.py +++ b/tests/features_/texture.py @@ -11,79 +11,155 @@ # build-in modules import unittest -# third-party modules - # own modules from medpy.features.texture import * +# third-party modules + + # code class TestTextureFeatures(unittest.TestCase): - """ Test the Tamura Texture features programmed in medpy.features.texture. - Functions are: coarseness(image, voxelspacing = None, mask = slice(None)) - contrast(image, mask = slice(None)) - directionality(image, voxelspacing = None, mask = slice(None), min_distance = 4) + """Test the Tamura Texture features programmed in medpy.features.texture. + Functions are: coarseness(image, voxelspacing = None, mask = slice(None)) + contrast(image, mask = slice(None)) + directionality(image, voxelspacing = None, mask = slice(None), min_distance = 4) """ def setUp(self): - self.image1 = numpy.zeros([100,100]) - self.image1[:,::3] = 1 + self.image1 = numpy.zeros([100, 100]) + self.image1[:, ::3] = 1 self.voxelspacing1 = (1.0, 3.0) - self.mask1 = [slice(0,50,1), slice(0,50,1)] - + self.mask1 = [slice(0, 50, 1), slice(0, 50, 1)] + def test_Coarseness(self): res = coarseness(self.image1) - self.assertEqual(res, 1.33,"coarseness: 2D image [1,0,0...], no voxelspacing, no mask: got {} ,expected {}".format(res, 1.33)) - - res = coarseness(self.image1,voxelspacing = self.voxelspacing1) - self.assertEqual(res, 1.0,"coarseness: 2D image [1,0,0...], voxelspacing = (1,3), no mask: got {} ,expected {}".format(res, 1.0)) + self.assertEqual( + res, + 1.33, + "coarseness: 2D image [1,0,0...], no voxelspacing, no mask: got {} ,expected {}".format( + res, 1.33 + ), + ) + + res = coarseness(self.image1, voxelspacing=self.voxelspacing1) + self.assertEqual( + res, + 1.0, + "coarseness: 2D image [1,0,0...], voxelspacing = (1,3), no mask: got {} ,expected {}".format( + res, 1.0 + ), + ) # @TODO: there is a very strong relation to the border handle if the texture is very small (1px) - res = coarseness(self.image1,voxelspacing = self.voxelspacing1, mask = self.mask1) - self.assertEqual(res, 76.26,"coarseness: 2D image [1,0,0...], voxelspacing = (1,3), mask = [slice(0,50,1),slice(0,50,1)]: got {} ,expected {}".format(res, 76.26)) - - res = coarseness(numpy.zeros([100,100])) - self.assertEqual(res, 1.0,"coarseness: 2D image [0,0,0,...], no voxelspacing, no mask: got {} ,expected {}".format(res, 1.0)) - - res = coarseness(self.image1,voxelspacing = (1,2,3)) - self.assertEqual(res, None,"coarseness: 2D image [1,0,0,...], voxelspacing = (1,2,3), no mask: got {} ,expected {} ".format(res, None)) + res = coarseness(self.image1, voxelspacing=self.voxelspacing1, mask=self.mask1) + self.assertEqual( + res, + 76.26, + "coarseness: 2D image [1,0,0...], voxelspacing = (1,3), mask = [slice(0,50,1),slice(0,50,1)]: got {} ,expected {}".format( + res, 76.26 + ), + ) + + res = coarseness(numpy.zeros([100, 100])) + self.assertEqual( + res, + 1.0, + "coarseness: 2D image [0,0,0,...], no voxelspacing, no mask: got {} ,expected {}".format( + res, 1.0 + ), + ) + res = coarseness(self.image1, voxelspacing=(1, 2, 3)) + self.assertEqual( + res, + None, + "coarseness: 2D image [1,0,0,...], voxelspacing = (1,2,3), no mask: got {} ,expected {} ".format( + res, None + ), + ) def test_Contrast(self): standard_deviation = numpy.std(self.image1) - kurtosis = stats.kurtosis(self.image1, axis=None, bias=True, fisher=False) - Fcon1 = standard_deviation / (kurtosis**0.25) - + kurtosis = stats.kurtosis(self.image1, axis=None, bias=True, fisher=False) + Fcon1 = standard_deviation / (kurtosis**0.25) + res = contrast(self.image1) - self.assertEqual(res, Fcon1,"contrast: 2D image, no mask: got {} ,expected {}".format(res, Fcon1)) - - image2 = self.image1[0:50,0:50] + self.assertEqual( + res, + Fcon1, + "contrast: 2D image, no mask: got {} ,expected {}".format(res, Fcon1), + ) + + image2 = self.image1[0:50, 0:50] standard_deviation = numpy.std(image2) - kurtosis = stats.kurtosis(image2, axis=None, bias=True, fisher=False) - Fcon2 = standard_deviation / (kurtosis**0.25) - - res = contrast(self.image1, mask=self.mask1) - self.assertEqual(res, Fcon2,"contrast: 2D image, mask = [slice(0,50,1), slice(0,50,1)]: got {} ,expected {}".format(res, Fcon2)) + kurtosis = stats.kurtosis(image2, axis=None, bias=True, fisher=False) + Fcon2 = standard_deviation / (kurtosis**0.25) + res = contrast(self.image1, mask=self.mask1) + self.assertEqual( + res, + Fcon2, + "contrast: 2D image, mask = [slice(0,50,1), slice(0,50,1)]: got {} ,expected {}".format( + res, Fcon2 + ), + ) def test_Directionality(self): res = directionality(self.image1) - self.assertEqual(res, 1.0,"directionality: 2D image, no voxelspacing, no mask, default min_distance, default threshold: got {} ,expected {}".format(res, 1.0)) + self.assertEqual( + res, + 1.0, + "directionality: 2D image, no voxelspacing, no mask, default min_distance, default threshold: got {} ,expected {}".format( + res, 1.0 + ), + ) + + res = directionality(self.image1, voxelspacing=self.voxelspacing1) + self.assertEqual( + res, + 1.0, + "directionality: 2D image, voxelspacing = (1.0, 3.0), no mask, default min_distance, default threshold: got {} ,expected {}".format( + res, 1.0 + ), + ) + + res = directionality(self.image1, voxelspacing=(1, 2, 3)) + self.assertEqual( + res, + None, + "directionality: 2D image, voxelspacing = (1,2,3), no mask, default min_distance, default threshold: got {} ,expected {}".format( + res, None + ), + ) + + res = directionality( + self.image1, voxelspacing=self.voxelspacing1, mask=self.mask1 + ) + self.assertEqual( + res, + 1.0, + "directionality: 2D image, voxelspacing(1.0, 3.0), mask = [slice(0,50,1), slice(0,50,1)], default min_distance, default threshold: got {} ,expected {}".format( + res, 1.0 + ), + ) - res = directionality(self.image1,voxelspacing = self.voxelspacing1) - self.assertEqual(res, 1.0,"directionality: 2D image, voxelspacing = (1.0, 3.0), no mask, default min_distance, default threshold: got {} ,expected {}".format(res, 1.0)) + res = directionality(self.image1, min_distance=10.0) + self.assertEqual( + res, + 1.0, + "directionality: 2D image, no voxelspacing, no mask , min_distance= 10, default threshold: got {} ,expected {}".format( + res, 1.0 + ), + ) - res = directionality(self.image1,voxelspacing = (1,2,3)) - self.assertEqual(res, None,"directionality: 2D image, voxelspacing = (1,2,3), no mask, default min_distance, default threshold: got {} ,expected {}".format(res, None)) + res = directionality(self.image1, threshold=0.5) + self.assertEqual( + res, + 1.0, + "directionality: 2D image, no voxelspacing, no mask, default min_distance, threshold = 0.5: got {} ,expected {}".format( + res, 1.0 + ), + ) - res = directionality(self.image1, voxelspacing = self.voxelspacing1, mask=self.mask1) - self.assertEqual(res, 1.0,"directionality: 2D image, voxelspacing(1.0, 3.0), mask = [slice(0,50,1), slice(0,50,1)], default min_distance, default threshold: got {} ,expected {}".format(res, 1.0)) - - res = directionality(self.image1, min_distance = 10.0) - self.assertEqual(res, 1.0,"directionality: 2D image, no voxelspacing, no mask , min_distance= 10, default threshold: got {} ,expected {}".format(res, 1.0)) - - res = directionality(self.image1,threshold = 0.5) - self.assertEqual(res, 1.0,"directionality: 2D image, no voxelspacing, no mask, default min_distance, threshold = 0.5: got {} ,expected {}".format(res, 1.0)) if __name__ == "__main__": unittest.main() - - \ No newline at end of file diff --git a/tests/filter_/IntensityRangeStandardization.py b/tests/filter_/IntensityRangeStandardization.py index 7692ce30..7f0d686c 100644 --- a/tests/filter_/IntensityRangeStandardization.py +++ b/tests/filter_/IntensityRangeStandardization.py @@ -1,17 +1,23 @@ """Unittest for the IntensityRangeStandardization class.""" # build-in modules -import unittest -import tempfile import pickle +import tempfile +import unittest # third-party modules import numpy +# own modules +from medpy.filter import ( + InformationLossException, + IntensityRangeStandardization, + SingleIntensityAccumulationError, + UntrainedException, +) + # path changes -# own modules -from medpy.filter import IntensityRangeStandardization, UntrainedException, InformationLossException, SingleIntensityAccumulationError # information __author__ = "Oskar Maier" @@ -20,110 +26,172 @@ __status__ = "Release" __description__ = "IntensityRangeStandardization class unittest." -BASE_IMAGE = numpy.asarray([[1,2,3],[3,5,4],[7,8,9],[2,4,8]]) +BASE_IMAGE = numpy.asarray([[1, 2, 3], [3, 5, 4], [7, 8, 9], [2, 4, 8]]) + # code class TestIntensityRangeStandardization(unittest.TestCase): - good_trainingset = [BASE_IMAGE + x for x in range(10)] good_image = BASE_IMAGE + 11 - bad_image = BASE_IMAGE + numpy.arange(1, 24, 2).reshape((4,3)) + bad_image = BASE_IMAGE + numpy.arange(1, 24, 2).reshape((4, 3)) uniform_image = numpy.zeros((4, 3)) - single_intensity_image = numpy.asarray([[0, 0, 0], [0, 0, 0], [0, 0, 1000000], [0, 0, 0]]) - + single_intensity_image = numpy.asarray( + [[0, 0, 0], [0, 0, 0], [0, 0, 1000000], [0, 0, 0]] + ) + def test_ValidInitializationCases(self): """Test valid initialization cases.""" IntensityRangeStandardization() - IntensityRangeStandardization(landmarkp = IntensityRangeStandardization.L2) - IntensityRangeStandardization(landmarkp = IntensityRangeStandardization.L3) - IntensityRangeStandardization(landmarkp = IntensityRangeStandardization.L4) - IntensityRangeStandardization(landmarkp = (50,)) - IntensityRangeStandardization(landmarkp = [50]) - IntensityRangeStandardization(landmarkp = numpy.asarray([50])) - + IntensityRangeStandardization(landmarkp=IntensityRangeStandardization.L2) + IntensityRangeStandardization(landmarkp=IntensityRangeStandardization.L3) + IntensityRangeStandardization(landmarkp=IntensityRangeStandardization.L4) + IntensityRangeStandardization(landmarkp=(50,)) + IntensityRangeStandardization(landmarkp=[50]) + IntensityRangeStandardization(landmarkp=numpy.asarray([50])) + def test_InvalidInitializationCases(self): """Test invalid initialization cases.""" - cutoffp_testvalues = [(-1, 99), (101, 99), (1, 101), (1, -2), (40, 40), (1,), (1, 2, 3), (1), '123', None, (None, 100)] + cutoffp_testvalues = [ + (-1, 99), + (101, 99), + (1, 101), + (1, -2), + (40, 40), + (1,), + (1, 2, 3), + (1), + "123", + None, + (None, 100), + ] for cutoffp in cutoffp_testvalues: - self.assertRaises(ValueError, IntensityRangeStandardization, cutoffp = cutoffp) - - landmarkp_testvalues = [[], 'string', ('50',), (1,), (99,), (-1,), (101,)] + self.assertRaises( + ValueError, IntensityRangeStandardization, cutoffp=cutoffp + ) + + landmarkp_testvalues = [[], "string", ("50",), (1,), (99,), (-1,), (101,)] for landmarkp in landmarkp_testvalues: - self.assertRaises(ValueError, IntensityRangeStandardization, cutoffp = (1, 99), landmarkp = landmarkp) - - stdrange_testvalues = [[], [1], [1, 2, 3], ['a', 'b'], [4, 3]] + self.assertRaises( + ValueError, + IntensityRangeStandardization, + cutoffp=(1, 99), + landmarkp=landmarkp, + ) + + stdrange_testvalues = [[], [1], [1, 2, 3], ["a", "b"], [4, 3]] for stdrange in stdrange_testvalues: - self.assertRaises(ValueError, IntensityRangeStandardization, stdrange = stdrange) - + self.assertRaises( + ValueError, IntensityRangeStandardization, stdrange=stdrange + ) + def test_InvalidUseCases(self): """Test invalid use-cases.""" irs = IntensityRangeStandardization() - self.assertRaises(UntrainedException, irs.transform, image = TestIntensityRangeStandardization.good_image) - + self.assertRaises( + UntrainedException, + irs.transform, + image=TestIntensityRangeStandardization.good_image, + ) + def test_MethodLimits(self): - """Test the limits of the method.""" + """Test the limits of the method.""" irs = IntensityRangeStandardization() irs.train(TestIntensityRangeStandardization.good_trainingset) - self.assertRaises(InformationLossException, irs.transform, image = TestIntensityRangeStandardization.bad_image) - + self.assertRaises( + InformationLossException, + irs.transform, + image=TestIntensityRangeStandardization.bad_image, + ) + irs = IntensityRangeStandardization() irs.train(TestIntensityRangeStandardization.good_trainingset) - self.assertRaises(SingleIntensityAccumulationError, irs.transform, image = TestIntensityRangeStandardization.uniform_image) - + self.assertRaises( + SingleIntensityAccumulationError, + irs.transform, + image=TestIntensityRangeStandardization.uniform_image, + ) + irs = IntensityRangeStandardization() irs.train(TestIntensityRangeStandardization.good_trainingset) - self.assertRaises(SingleIntensityAccumulationError, irs.transform, image = TestIntensityRangeStandardization.single_intensity_image) - + self.assertRaises( + SingleIntensityAccumulationError, + irs.transform, + image=TestIntensityRangeStandardization.single_intensity_image, + ) + irs = IntensityRangeStandardization() - self.assertRaises(SingleIntensityAccumulationError, irs.train, images = [TestIntensityRangeStandardization.uniform_image] * 10) - + self.assertRaises( + SingleIntensityAccumulationError, + irs.train, + images=[TestIntensityRangeStandardization.uniform_image] * 10, + ) + irs = IntensityRangeStandardization() - self.assertRaises(SingleIntensityAccumulationError, irs.train, images = [TestIntensityRangeStandardization.single_intensity_image] * 10) - + self.assertRaises( + SingleIntensityAccumulationError, + irs.train, + images=[TestIntensityRangeStandardization.single_intensity_image] * 10, + ) + def test_Method(self): """Test the normal functioning of the method.""" # test training with good and bad images irs = IntensityRangeStandardization() - irs.train(TestIntensityRangeStandardization.good_trainingset + [TestIntensityRangeStandardization.bad_image]) + irs.train( + TestIntensityRangeStandardization.good_trainingset + + [TestIntensityRangeStandardization.bad_image] + ) irs.transform(TestIntensityRangeStandardization.bad_image) - + # test equal methods irs = IntensityRangeStandardization() irs_ = irs.train(TestIntensityRangeStandardization.good_trainingset) self.assertEqual(irs, irs_) - + irs = IntensityRangeStandardization() irs.train(TestIntensityRangeStandardization.good_trainingset) timages = [] for i in TestIntensityRangeStandardization.good_trainingset: timages.append(irs.transform(i)) - + irs = IntensityRangeStandardization() - irs_, timages_ = irs.train_transform(TestIntensityRangeStandardization.good_trainingset) - - self.assertEqual(irs, irs_, 'instance returned by transform() method is not the same as the once initialized') + irs_, timages_ = irs.train_transform( + TestIntensityRangeStandardization.good_trainingset + ) + + self.assertEqual( + irs, + irs_, + "instance returned by transform() method is not the same as the once initialized", + ) for ti, ti_ in zip(timages, timages_): - numpy.testing.assert_allclose(ti, ti_, err_msg = 'train_transform() failed to produce the same results as transform()') - - + numpy.testing.assert_allclose( + ti, + ti_, + err_msg="train_transform() failed to produce the same results as transform()", + ) + # test pickling irs = IntensityRangeStandardization() irs_ = irs.train(TestIntensityRangeStandardization.good_trainingset) timages = [] for i in TestIntensityRangeStandardization.good_trainingset: timages.append(irs.transform(i)) - + with tempfile.TemporaryFile() as f: pickle.dump(irs, f) f.seek(0, 0) irs_ = pickle.load(f) - + timages_ = [] for i in TestIntensityRangeStandardization.good_trainingset: timages_.append(irs_.transform(i)) - + for ti, ti_ in zip(timages, timages_): - numpy.testing.assert_allclose(ti, ti_, err_msg = 'pickling failed to preserve the instances model') - -if __name__ == '__main__': + numpy.testing.assert_allclose( + ti, ti_, err_msg="pickling failed to preserve the instances model" + ) + + +if __name__ == "__main__": unittest.main() diff --git a/tests/filter_/__init__.py b/tests/filter_/__init__.py index 18680d52..1ae6a082 100644 --- a/tests/filter_/__init__.py +++ b/tests/filter_/__init__.py @@ -1,2 +1,6 @@ -from .houghtransform import TestHoughTransform -from .IntensityRangeStandardization import TestIntensityRangeStandardization \ No newline at end of file +from .houghtransform import TestHoughTransform as TestHoughTransform +from .IntensityRangeStandardization import ( + TestIntensityRangeStandardization as TestIntensityRangeStandardization, +) + +__all__ = ["TestHoughTransform", "TestIntensityRangeStandardization"] diff --git a/tests/filter_/anisotropic_diffusion.py b/tests/filter_/anisotropic_diffusion.py index 1d892de3..4b123012 100644 --- a/tests/filter_/anisotropic_diffusion.py +++ b/tests/filter_/anisotropic_diffusion.py @@ -1,5 +1,3 @@ -import unittest -import scipy import numpy as np from medpy.filter import anisotropic_diffusion @@ -7,37 +5,43 @@ # Purpose of these tests is to ensure the filter code does not crash # Depending on Python versions + def test_anisotropic_diffusion_powerof2_single_channel(): - arr = np.random.uniform(size=(64,64)) + arr = np.random.uniform(size=(64, 64)) filtered = anisotropic_diffusion(arr) assert filtered.shape == arr.shape + def test_anisotropic_diffusion_powerof2_three_channels(): # Purpose of this test is to ensure the filter code does not crash # Depending on Python versions - arr = np.random.uniform(size=(64,64,3)) + arr = np.random.uniform(size=(64, 64, 3)) filtered = anisotropic_diffusion(arr) assert filtered.shape == arr.shape + def test_anisotropic_diffusion_single_channel(): # Purpose of this test is to ensure the filter code does not crash # Depending on Python versions - arr = np.random.uniform(size=(60,31)) + arr = np.random.uniform(size=(60, 31)) filtered = anisotropic_diffusion(arr) assert filtered.shape == arr.shape + def test_anisotropic_diffusion_three_channels(): # Purpose of this test is to ensure the filter code does not crash # Depending on Python versions - arr = np.random.uniform(size=(60,31,3)) + arr = np.random.uniform(size=(60, 31, 3)) filtered = anisotropic_diffusion(arr) assert filtered.shape == arr.shape + def test_anisotropic_diffusion_voxel_spacing_array(): # Purpose of this test is to ensure the filter code does not crash # Depending on Python versions - arr = np.random.uniform(size=(60,31,3)) + arr = np.random.uniform(size=(60, 31, 3)) filtered = anisotropic_diffusion( - arr, voxelspacing=np.array([1, 1, 1.]), + arr, + voxelspacing=np.array([1, 1, 1.0]), ) assert filtered.shape == arr.shape diff --git a/tests/filter_/houghtransform.py b/tests/filter_/houghtransform.py index 044f39b7..e8e0d5a1 100644 --- a/tests/filter_/houghtransform.py +++ b/tests/filter_/houghtransform.py @@ -14,217 +14,333 @@ import scipy # own modules -from medpy.filter import ght, template_sphere, template_ellipsoid +from medpy.filter import ght, template_ellipsoid, template_sphere + # code class TestHoughTransform(unittest.TestCase): - def setUp(self): pass def test_takes_sequences(self): - img = [[1,2,3,4,5]] - template = [[1,0]] + img = [[1, 2, 3, 4, 5]] + template = [[1, 0]] ght(img, template) - img = ((1,2,3,4,5)) - template = ((1,0)) + img = (1, 2, 3, 4, 5) + template = (1, 0) ght(img, template) def test_even_template(self): # prepare - img = [[1, 1, 0, 0, 0], - [1, 1, 0, 0, 0], - [0, 0, 1, 1, 0], - [0, 0, 1, 1, 0], - [0, 0, 0, 0, 0]] + img = [ + [1, 1, 0, 0, 0], + [1, 1, 0, 0, 0], + [0, 0, 1, 1, 0], + [0, 0, 1, 1, 0], + [0, 0, 0, 0, 0], + ] img = scipy.asarray(img).astype(scipy.bool_) - template = scipy.asarray([[True, True], - [True, True]]) - result_array = scipy.asarray([[4, 2, 0, 0, 0], - [2, 2, 2, 1, 0], - [0, 2, 4, 2, 0], - [0, 1, 2, 1, 0], - [0, 0, 0, 0, 0]]).astype(scipy.int32) + template = scipy.asarray([[True, True], [True, True]]) + result_array = scipy.asarray( + [ + [4, 2, 0, 0, 0], + [2, 2, 2, 1, 0], + [0, 2, 4, 2, 0], + [0, 1, 2, 1, 0], + [0, 0, 0, 0, 0], + ] + ).astype(scipy.int32) result_dtype = scipy.int32 - + # run result = ght(img, template) - - #test - self.assertTrue(scipy.all(result == result_array), 'Returned hough transformation differs from the expected values.') - self.assertTrue(result.dtype == result_dtype, 'Returned hough transformation is not of the expected scipy.dtype') - - + + # test + self.assertTrue( + scipy.all(result == result_array), + "Returned hough transformation differs from the expected values.", + ) + self.assertTrue( + result.dtype == result_dtype, + "Returned hough transformation is not of the expected scipy.dtype", + ) + def test_odd_template(self): # prepare - img = [[1, 1, 1, 0, 0], - [1, 1, 1, 0, 0], - [1, 1, 1, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0]] + img = [ + [1, 1, 1, 0, 0], + [1, 1, 1, 0, 0], + [1, 1, 1, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + ] img = scipy.asarray(img).astype(scipy.bool_) - template = scipy.asarray([[True, True, True], - [True, True, True], - [True, True, True]]) - result_array = scipy.asarray([[4, 6, 4, 2, 0], - [6, 9, 6, 3, 0], - [4, 6, 4, 2, 0], - [2, 3, 2, 1, 0], - [0, 0, 0, 0, 0]]).astype(scipy.int32) + template = scipy.asarray( + [[True, True, True], [True, True, True], [True, True, True]] + ) + result_array = scipy.asarray( + [ + [4, 6, 4, 2, 0], + [6, 9, 6, 3, 0], + [4, 6, 4, 2, 0], + [2, 3, 2, 1, 0], + [0, 0, 0, 0, 0], + ] + ).astype(scipy.int32) result_dtype = scipy.int32 - + # run result = ght(img, template) - - #test - self.assertTrue(scipy.all(result == result_array), 'Returned hough transformation differs from the expected values.') - self.assertTrue(result.dtype == result_dtype, 'Returned hough transformation is not of the expected scipy.dtype') - + + # test + self.assertTrue( + scipy.all(result == result_array), + "Returned hough transformation differs from the expected values.", + ) + self.assertTrue( + result.dtype == result_dtype, + "Returned hough transformation is not of the expected scipy.dtype", + ) + def test_int_img(self): # prepare - img = [[2, 1, 0, 0], - [1, 1, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0]] + img = [[2, 1, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] img = scipy.asarray(img) - template = scipy.asarray([[True, True], - [True, False]]) - result_array = scipy.asarray([[4, 2, 0, 0], - [2, 1, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0]]).astype(img.dtype) + template = scipy.asarray([[True, True], [True, False]]) + result_array = scipy.asarray( + [[4, 2, 0, 0], [2, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] + ).astype(img.dtype) result_dtype = img.dtype - + # run result = ght(img, template) - - #test - self.assertTrue(scipy.all(result == result_array), 'Returned hough transformation differs from the expected values.') - self.assertTrue(result.dtype == result_dtype, 'Returned hough transformation is not of the expected scipy.dtype') - + + # test + self.assertTrue( + scipy.all(result == result_array), + "Returned hough transformation differs from the expected values.", + ) + self.assertTrue( + result.dtype == result_dtype, + "Returned hough transformation is not of the expected scipy.dtype", + ) + def test_float_img(self): # prepare - img = [[2., 3., 0, 0], - [1., 2., 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0]] + img = [[2.0, 3.0, 0, 0], [1.0, 2.0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] img = scipy.asarray(img) - template = scipy.asarray([[True, True], - [True, False]]) - result_array = scipy.asarray([[6., 5., 0, 0], - [3., 2., 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 0]]).astype(img.dtype) + template = scipy.asarray([[True, True], [True, False]]) + result_array = scipy.asarray( + [[6.0, 5.0, 0, 0], [3.0, 2.0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] + ).astype(img.dtype) result_dtype = img.dtype - + # run result = ght(img, template) - - #test - self.assertTrue(scipy.all(result == result_array), 'Returned hough transformation differs from the expected values.') - self.assertTrue(result.dtype == result_dtype, 'Returned hough transformation is not of the expected scipy.dtype') - + + # test + self.assertTrue( + scipy.all(result == result_array), + "Returned hough transformation differs from the expected values.", + ) + self.assertTrue( + result.dtype == result_dtype, + "Returned hough transformation is not of the expected scipy.dtype", + ) + def test_template_sphere_odd_radius(self): # prepare - expected = [[[0,1,0], - [1,1,1], - [0,1,0]], - [[1,1,1], - [1,1,1], - [1,1,1]], - [[0,1,0], - [1,1,1], - [0,1,0]]] - + expected = [ + [[0, 1, 0], [1, 1, 1], [0, 1, 0]], + [[1, 1, 1], [1, 1, 1], [1, 1, 1]], + [[0, 1, 0], [1, 1, 1], [0, 1, 0]], + ] + # run result = template_sphere(1.5, 3) - + # test - self.assertTrue(scipy.all(result == expected), 'Returned template contains not the expected spherical structure.') - self.assertTrue(result.dtype == scipy.bool_, 'Returned template should be of type scipy.bool_') - + self.assertTrue( + scipy.all(result == expected), + "Returned template contains not the expected spherical structure.", + ) + self.assertTrue( + result.dtype == scipy.bool_, + "Returned template should be of type scipy.bool_", + ) + def test_template_sphere_even_radius(self): # prepare - expected = [[[0,0,0,0], - [0,1,1,0], - [0,1,1,0], - [0,0,0,0]], - [[0,1,1,0], - [1,1,1,1], - [1,1,1,1], - [0,1,1,0]], - [[0,1,1,0], - [1,1,1,1], - [1,1,1,1], - [0,1,1,0]], - [[0,0,0,0], - [0,1,1,0], - [0,1,1,0], - [0,0,0,0]]] - + expected = [ + [[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]], + [[0, 1, 1, 0], [1, 1, 1, 1], [1, 1, 1, 1], [0, 1, 1, 0]], + [[0, 1, 1, 0], [1, 1, 1, 1], [1, 1, 1, 1], [0, 1, 1, 0]], + [[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]], + ] + # run result = template_sphere(2, 3) # test - self.assertTrue(scipy.all(result == expected), 'Returned template contains not the expected spherical structure.') - self.assertTrue(result.dtype == scipy.bool_, 'Returned template should be of type scipy.bool_') - + self.assertTrue( + scipy.all(result == expected), + "Returned template contains not the expected spherical structure.", + ) + self.assertTrue( + result.dtype == scipy.bool_, + "Returned template should be of type scipy.bool_", + ) + def test_template_ellipsoid(self): # prepare - expected = [[[False, False, False, False, False,], - [False, True, True, True, False,], - [False, True, True, True, False,], - [False, False, False, False, False,]], - - [[False, True, True, True, False,], - [ True, True, True, True, True,], - [ True, True, True, True, True,], - [False, True, True, True, False,]], - - [[False, False, False, False, False,], - [False, True, True, True, False,], - [False, True, True, True, False,], - [False, False, False, False, False,]]] - + expected = [ + [ + [ + False, + False, + False, + False, + False, + ], + [ + False, + True, + True, + True, + False, + ], + [ + False, + True, + True, + True, + False, + ], + [ + False, + False, + False, + False, + False, + ], + ], + [ + [ + False, + True, + True, + True, + False, + ], + [ + True, + True, + True, + True, + True, + ], + [ + True, + True, + True, + True, + True, + ], + [ + False, + True, + True, + True, + False, + ], + ], + [ + [ + False, + False, + False, + False, + False, + ], + [ + False, + True, + True, + True, + False, + ], + [ + False, + True, + True, + True, + False, + ], + [ + False, + False, + False, + False, + False, + ], + ], + ] + # run result = template_ellipsoid((3, 4, 5)) - + # test - self.assertTrue(scipy.all(result == expected), 'Returned template contains not the expected spherical structure.') - self.assertTrue(result.dtype == scipy.bool_, 'Returned template should be of type scipy.bool_') - + self.assertTrue( + scipy.all(result == expected), + "Returned template contains not the expected spherical structure.", + ) + self.assertTrue( + result.dtype == scipy.bool_, + "Returned template should be of type scipy.bool_", + ) + def test_exceptions(self): self.assertRaises(TypeError, template_sphere, 1.1) self.assertRaises(AttributeError, ght, [[0, 1], [2, 3]], [0, 1, 2]) self.assertRaises(AttributeError, ght, [0, 1], [0, 1, 2]) - + def test_dimensions(self): # 1D img = scipy.rand(10) template = scipy.random.randint(0, 2, (3)) result = ght(img, template) - self.assertEqual(result.ndim, 1, 'Computing ght with one-dimensional input data failed.') + self.assertEqual( + result.ndim, 1, "Computing ght with one-dimensional input data failed." + ) # 2D img = scipy.rand(10, 11) template = scipy.random.randint(0, 2, (3, 4)) result = ght(img, template) - self.assertEqual(result.ndim, 2, 'Computing ght with two-dimensional input data failed.') + self.assertEqual( + result.ndim, 2, "Computing ght with two-dimensional input data failed." + ) # 3D img = scipy.rand(10, 11, 12) template = scipy.random.randint(0, 2, (3, 4, 5)) result = ght(img, template) - self.assertEqual(result.ndim, 3, 'Computing ght with three-dimensional input data failed.') + self.assertEqual( + result.ndim, 3, "Computing ght with three-dimensional input data failed." + ) # 4D img = scipy.rand(10, 11, 12, 13) template = scipy.random.randint(0, 2, (3, 4, 5, 6)) result = ght(img, template) - self.assertEqual(result.ndim, 4, 'Computing ght with four-dimensional input data failed.') + self.assertEqual( + result.ndim, 4, "Computing ght with four-dimensional input data failed." + ) # 5D img = scipy.rand(3, 4, 3, 4, 3) template = scipy.random.randint(0, 2, (2, 2, 2, 2, 2)) result = ght(img, template) - self.assertEqual(result.ndim, 5, 'Computing ght with five-dimensional input data failed.') - - -if __name__ == '__main__': + self.assertEqual( + result.ndim, 5, "Computing ght with five-dimensional input data failed." + ) + + +if __name__ == "__main__": unittest.main() diff --git a/tests/filter_/image.py b/tests/filter_/image.py index 2805e723..20759e2b 100644 --- a/tests/filter_/image.py +++ b/tests/filter_/image.py @@ -12,323 +12,240 @@ # third-party modules import numpy - +from scipy.ndimage import gaussian_filter # own modules -from medpy.filter.image import ssd, sls, sum_filter, average_filter -from scipy.ndimage import gaussian_filter +from medpy.filter.image import average_filter, sls, ssd, sum_filter + # code class TestMetrics(unittest.TestCase): - def setUp(self): pass def test_sls(self): - m = numpy.array( - [[0,0,0], - [0,0,0], - [0,0,0]]) - s = numpy.array( - [[1,2,3], - [3,4,5], - [5,6,7]]) - sn_fp = numpy.array( - [[0, 1, 0], - [1, 1, 0]]) - pn_fp = numpy.array( - [[1, 0], - [1, 0], - [0, 1]]) + m = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) + s = numpy.array([[1, 2, 3], [3, 4, 5], [5, 6, 7]]) + sn_fp = numpy.array([[0, 1, 0], [1, 1, 0]]) + pn_fp = numpy.array([[1, 0], [1, 0], [0, 1]]) # reflect patches = [ - numpy.array( - [[18, 33, 43], - [46, 69, 83], - [70,101,123]]), - numpy.array( - [[43,54, 68], - [59,70, 88], - [75,86,108]]), - numpy.array( - [[54, 81, 99], - [70,101,123], - [86,121,147]])] - patches = [patch / 3. for patch in patches] + numpy.array([[18, 33, 43], [46, 69, 83], [70, 101, 123]]), + numpy.array([[43, 54, 68], [59, 70, 88], [75, 86, 108]]), + numpy.array([[54, 81, 99], [70, 101, 123], [86, 121, 147]]), + ] + patches = [patch / 3.0 for patch in patches] noise = gaussian_filter(numpy.average(patches, 0), sigma=3) e = [-1 * numpy.exp(-1 * patch / noise) for patch in patches] e = numpy.rollaxis(numpy.asarray(e), 0, e[0].ndim + 1) - r = sls(m, s, sn_footprint = sn_fp, pn_footprint = pn_fp, noise='local', signed=True) + r = sls( + m, s, sn_footprint=sn_fp, pn_footprint=pn_fp, noise="local", signed=True + ) numpy.testing.assert_allclose(r, e) e *= -1 - r = sls(m, -1 * s, sn_footprint = sn_fp, pn_footprint = pn_fp, noise='local', signed=True) + r = sls( + m, + -1 * s, + sn_footprint=sn_fp, + pn_footprint=pn_fp, + noise="local", + signed=True, + ) numpy.testing.assert_allclose(r, e) - r = sls(m, s, sn_footprint = sn_fp, pn_footprint = pn_fp, noise='local', signed=False) + r = sls( + m, s, sn_footprint=sn_fp, pn_footprint=pn_fp, noise="local", signed=False + ) numpy.testing.assert_allclose(r, e) - r = sls(m, -1 * s, sn_footprint = sn_fp, pn_footprint = pn_fp, noise='local', signed=False) + r = sls( + m, + -1 * s, + sn_footprint=sn_fp, + pn_footprint=pn_fp, + noise="local", + signed=False, + ) numpy.testing.assert_allclose(r, e) - noise = noise.sum() / 9. + noise = noise.sum() / 9.0 e = [-1 * numpy.exp(-1 * patch / noise) for patch in patches] e = numpy.rollaxis(numpy.asarray(e), 0, e[0].ndim + 1) - r = sls(m, s, sn_footprint = sn_fp, pn_footprint = pn_fp, noise='global', signed=True) + r = sls( + m, s, sn_footprint=sn_fp, pn_footprint=pn_fp, noise="global", signed=True + ) numpy.testing.assert_allclose(r, e) def test_ssd(self): - m = numpy.array( - [[0,0,0], - [0,0,0], - [0,0,0]]) - s = numpy.array( - [[1,2,3], - [3,4,5], - [5,6,7]]) - - e = numpy.array( - [[ 1, 4, 9], - [ 9,16,25], - [25,36,49]]) + m = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) + s = numpy.array([[1, 2, 3], [3, 4, 5], [5, 6, 7]]) + + e = numpy.array([[1, 4, 9], [9, 16, 25], [25, 36, 49]]) r, sgn = ssd(m, s, normalized=False, signed=False, size=1) - self.assertEqual(sgn, 1, 'signed=False failed to return scalar 1') + self.assertEqual(sgn, 1, "signed=False failed to return scalar 1") numpy.testing.assert_allclose(r, e) - esgn = numpy.array( - [[-1,-1,-1], - [-1,-1,-1], - [-1,-1,-1]]) + esgn = numpy.array([[-1, -1, -1], [-1, -1, -1], [-1, -1, -1]]) r, sgn = ssd(m, s, normalized=False, signed=True, size=1) - numpy.testing.assert_allclose(sgn, esgn, err_msg = 'signed=True failed') + numpy.testing.assert_allclose(sgn, esgn, err_msg="signed=True failed") numpy.testing.assert_allclose(r, e) - esgn = numpy.array( - [[1,1,1], - [1,1,1], - [1,1,1]]) + esgn = numpy.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) r, sgn = ssd(s, m, normalized=False, signed=True, size=1) - numpy.testing.assert_allclose(sgn, esgn, err_msg = 'signed=True failed') + numpy.testing.assert_allclose(sgn, esgn, err_msg="signed=True failed") numpy.testing.assert_allclose(r, e) r, _ = ssd(m, s, normalized=True, signed=False, size=1) - numpy.testing.assert_allclose(r, e, err_msg='normalized=True failed') - - fp = numpy.array( - [[1, 0], - [1, 0], - [0, 1]]) - e = numpy.array( - [[26,45,50], - [46,69,70], - [50,77,90]]) - r, _ = ssd(m, s, normalized=False, signed=False, footprint=fp, mode='mirror') - numpy.testing.assert_allclose(r, e, err_msg='using footprint failed') - - e = e / 3. - r, _ = ssd(m, s, normalized=True, signed=False, footprint=fp, mode='mirror') - numpy.testing.assert_allclose(r, e, err_msg='normalized=True using footprint failed') + numpy.testing.assert_allclose(r, e, err_msg="normalized=True failed") + + fp = numpy.array([[1, 0], [1, 0], [0, 1]]) + e = numpy.array([[26, 45, 50], [46, 69, 70], [50, 77, 90]]) + r, _ = ssd(m, s, normalized=False, signed=False, footprint=fp, mode="mirror") + numpy.testing.assert_allclose(r, e, err_msg="using footprint failed") + + e = e / 3.0 + r, _ = ssd(m, s, normalized=True, signed=False, footprint=fp, mode="mirror") + numpy.testing.assert_allclose( + r, e, err_msg="normalized=True using footprint failed" + ) def test_average_filter(self): - i = numpy.array( - [[1,2,3], - [3,4,5], - [5,6,7]]) - - fp = numpy.array( - [[1, 1]]) - e = numpy.array( - [[ 3, 5, 3], - [ 7, 9, 5], - [11,13, 7]]) - r = average_filter(i, footprint=fp, mode='constant', cval=0, output=float) - numpy.testing.assert_allclose(r, e / 2.) - - r = average_filter(i, footprint=fp, mode='constant', cval=0, output=int) + i = numpy.array([[1, 2, 3], [3, 4, 5], [5, 6, 7]]) + + fp = numpy.array([[1, 1]]) + e = numpy.array([[3, 5, 3], [7, 9, 5], [11, 13, 7]]) + r = average_filter(i, footprint=fp, mode="constant", cval=0, output=float) + numpy.testing.assert_allclose(r, e / 2.0) + + r = average_filter(i, footprint=fp, mode="constant", cval=0, output=int) numpy.testing.assert_allclose(r, e / 2) - r = average_filter(i, footprint=fp, mode='constant', cval=0) + r = average_filter(i, footprint=fp, mode="constant", cval=0) numpy.testing.assert_allclose(r, e / 2) - fp = numpy.array( - [[1, 0], - [1, 0], - [0, 1]]) - e = numpy.array( - [[ 5, 7, 3], - [10,13, 8], - [ 8,10,12]]) - r = average_filter(i, footprint=fp, mode='constant', cval=0, output=float) - numpy.testing.assert_allclose(r, e / 3.) - - i = numpy.array( - [[1,3,4], - [2,2,2]]) - fp = numpy.array( - [[1,0,1]]) - e = numpy.array( - [[6,5,6], - [4,4,4]]) - r = average_filter(i, footprint=fp, mode='mirror', output=float) - numpy.testing.assert_allclose(r, e / 2.) - - e = numpy.array( - [[4,5,7], - [4,4,4]]) - r = average_filter(i, footprint=fp, mode='reflect', output=float) - numpy.testing.assert_allclose(r, e / 2.) + fp = numpy.array([[1, 0], [1, 0], [0, 1]]) + e = numpy.array([[5, 7, 3], [10, 13, 8], [8, 10, 12]]) + r = average_filter(i, footprint=fp, mode="constant", cval=0, output=float) + numpy.testing.assert_allclose(r, e / 3.0) + + i = numpy.array([[1, 3, 4], [2, 2, 2]]) + fp = numpy.array([[1, 0, 1]]) + e = numpy.array([[6, 5, 6], [4, 4, 4]]) + r = average_filter(i, footprint=fp, mode="mirror", output=float) + numpy.testing.assert_allclose(r, e / 2.0) + + e = numpy.array([[4, 5, 7], [4, 4, 4]]) + r = average_filter(i, footprint=fp, mode="reflect", output=float) + numpy.testing.assert_allclose(r, e / 2.0) def test_sum_filter(self): - i = numpy.array( - [[1,2,3], - [3,4,5], - [5,6,7]]) + i = numpy.array([[1, 2, 3], [3, 4, 5], [5, 6, 7]]) # test reaction to size parameter r = sum_filter(i, size=1) numpy.testing.assert_allclose(r, i) - e = numpy.array( - [[10,14, 8], - [18,22,12], - [11,13, 7]]) - r = sum_filter(i, size=2, mode='constant', cval=0) + e = numpy.array([[10, 14, 8], [18, 22, 12], [11, 13, 7]]) + r = sum_filter(i, size=2, mode="constant", cval=0) numpy.testing.assert_allclose(r, e) - e = numpy.array( - [[10,18,14], - [21,36,27], - [18,30,22]]) - r = sum_filter(i, size=3, mode='constant', cval=0) + e = numpy.array([[10, 18, 14], [21, 36, 27], [18, 30, 22]]) + r = sum_filter(i, size=3, mode="constant", cval=0) numpy.testing.assert_allclose(r, e) - e = numpy.array( - [[36,36,36], - [36,36,36], - [36,36,36]]) - r = sum_filter(i, size=5, mode='constant', cval=0) + e = numpy.array([[36, 36, 36], [36, 36, 36], [36, 36, 36]]) + r = sum_filter(i, size=5, mode="constant", cval=0) numpy.testing.assert_allclose(r, e) - r = sum_filter(i, size=10, mode='constant', cval=0) + r = sum_filter(i, size=10, mode="constant", cval=0) numpy.testing.assert_allclose(r, e) # test reaction to footprint parameter - fp = numpy.array( - [[1]]) + fp = numpy.array([[1]]) r = sum_filter(i, footprint=fp) numpy.testing.assert_allclose(r, i) - fp = numpy.array( - [[1,1], - [1,1]]) - e = numpy.array( - [[10,14, 8], - [18,22,12], - [11,13, 7]]) - r = sum_filter(i, footprint=fp, mode='constant', cval=0) + fp = numpy.array([[1, 1], [1, 1]]) + e = numpy.array([[10, 14, 8], [18, 22, 12], [11, 13, 7]]) + r = sum_filter(i, footprint=fp, mode="constant", cval=0) numpy.testing.assert_allclose(r, e) - fp = numpy.array( - [[1, 1]]) - e = numpy.array( - [[ 3, 5, 3], - [ 7, 9, 5], - [11,13, 7]]) - r = sum_filter(i, footprint=fp, mode='constant', cval=0) + fp = numpy.array([[1, 1]]) + e = numpy.array([[3, 5, 3], [7, 9, 5], [11, 13, 7]]) + r = sum_filter(i, footprint=fp, mode="constant", cval=0) numpy.testing.assert_allclose(r, e) - fp = numpy.array( - [[1], - [1]]) - e = numpy.array( - [[ 4, 6, 8], - [ 8,10,12], - [ 5, 6, 7]]) - r = sum_filter(i, footprint=fp, mode='constant', cval=0) + fp = numpy.array([[1], [1]]) + e = numpy.array([[4, 6, 8], [8, 10, 12], [5, 6, 7]]) + r = sum_filter(i, footprint=fp, mode="constant", cval=0) numpy.testing.assert_allclose(r, e) - fp = numpy.array( - [[1, 0], - [1, 0], - [0, 1]]) - e = numpy.array( - [[ 5, 7, 3], - [10,13, 8], - [ 8,10,12]]) - r = sum_filter(i, footprint=fp, mode='constant', cval=0) + fp = numpy.array([[1, 0], [1, 0], [0, 1]]) + e = numpy.array([[5, 7, 3], [10, 13, 8], [8, 10, 12]]) + r = sum_filter(i, footprint=fp, mode="constant", cval=0) numpy.testing.assert_allclose(r, e) - fp = numpy.array( - [[1, 0], - [0, 1], - [0, 1]]) - e = numpy.array( - [[ 6, 8, 0], - [11,14, 3], - [ 9,11, 5]]) - r = sum_filter(i, footprint=fp, mode='constant', cval=0) + fp = numpy.array([[1, 0], [0, 1], [0, 1]]) + e = numpy.array([[6, 8, 0], [11, 14, 3], [9, 11, 5]]) + r = sum_filter(i, footprint=fp, mode="constant", cval=0) numpy.testing.assert_allclose(r, e) # test border treatment modes - i = numpy.array( - [[1,3,4], - [2,2,2]]) - fp = numpy.array( - [[1,0,1]]) + i = numpy.array([[1, 3, 4], [2, 2, 2]]) + fp = numpy.array([[1, 0, 1]]) e = 6 - r = sum_filter(i, footprint=fp, mode='mirror') - self.assertAlmostEqual(r[0,0], e, msg='mirror mode failed') + r = sum_filter(i, footprint=fp, mode="mirror") + self.assertAlmostEqual(r[0, 0], e, msg="mirror mode failed") e = 4 - r = sum_filter(i, footprint=fp, mode='reflect') - self.assertAlmostEqual(r[0,0], e, msg='reflect mode failed') + r = sum_filter(i, footprint=fp, mode="reflect") + self.assertAlmostEqual(r[0, 0], e, msg="reflect mode failed") e = 7 - r = sum_filter(i, footprint=fp, mode='wrap') - self.assertAlmostEqual(r[0,0], e, msg='wrap mode failed') + r = sum_filter(i, footprint=fp, mode="wrap") + self.assertAlmostEqual(r[0, 0], e, msg="wrap mode failed") e = 4 - r = sum_filter(i, footprint=fp, mode='nearest') - self.assertAlmostEqual(r[0,0], e, msg='nearest mode failed') + r = sum_filter(i, footprint=fp, mode="nearest") + self.assertAlmostEqual(r[0, 0], e, msg="nearest mode failed") e = 3 - r = sum_filter(i, footprint=fp, mode='constant', cval=0) - self.assertAlmostEqual(r[0,0], e, msg='constant mode failed') + r = sum_filter(i, footprint=fp, mode="constant", cval=0) + self.assertAlmostEqual(r[0, 0], e, msg="constant mode failed") e = 12 - r = sum_filter(i, footprint=fp, mode='constant', cval=9) - self.assertAlmostEqual(r[0,0], e, msg='constant mode failed') + r = sum_filter(i, footprint=fp, mode="constant", cval=9) + self.assertAlmostEqual(r[0, 0], e, msg="constant mode failed") - fp = numpy.array( - [[1,0,0], - [0,0,0]]) + fp = numpy.array([[1, 0, 0], [0, 0, 0]]) e = 3 - r = sum_filter(i, footprint=fp, mode='mirror') - self.assertAlmostEqual(r[0,0], e, msg='mirror mode failed') + r = sum_filter(i, footprint=fp, mode="mirror") + self.assertAlmostEqual(r[0, 0], e, msg="mirror mode failed") e = 1 - r = sum_filter(i, footprint=fp, mode='reflect') - self.assertAlmostEqual(r[0,0], e, msg='reflect mode failed') + r = sum_filter(i, footprint=fp, mode="reflect") + self.assertAlmostEqual(r[0, 0], e, msg="reflect mode failed") e = 4 - r = sum_filter(i, footprint=fp, mode='wrap') - self.assertAlmostEqual(r[0,0], e, msg='wrap mode failed') + r = sum_filter(i, footprint=fp, mode="wrap") + self.assertAlmostEqual(r[0, 0], e, msg="wrap mode failed") e = 1 - r = sum_filter(i, footprint=fp, mode='nearest') - self.assertAlmostEqual(r[0,0], e, msg='nearest mode failed') + r = sum_filter(i, footprint=fp, mode="nearest") + self.assertAlmostEqual(r[0, 0], e, msg="nearest mode failed") e = 0 - r = sum_filter(i, footprint=fp, mode='constant', cval=0) - self.assertAlmostEqual(r[0,0], e, msg='constant mode failed') + r = sum_filter(i, footprint=fp, mode="constant", cval=0) + self.assertAlmostEqual(r[0, 0], e, msg="constant mode failed") e = 9 - r = sum_filter(i, footprint=fp, mode='constant', cval=9) - self.assertAlmostEqual(r[0,0], e, msg='constant mode failed') + r = sum_filter(i, footprint=fp, mode="constant", cval=9) + self.assertAlmostEqual(r[0, 0], e, msg="constant mode failed") -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/filter_/utilities.py b/tests/filter_/utilities.py index d951e3d6..389f9cf9 100644 --- a/tests/filter_/utilities.py +++ b/tests/filter_/utilities.py @@ -16,145 +16,115 @@ # own modules from medpy.filter import pad + # code class TestUtilities(unittest.TestCase): - def setUp(self): pass def test_pad_bordercases(self): "Test pad for border cases in 3D" - input = numpy.ones((3,3,3)) - + input = numpy.ones((3, 3, 3)) + # no padding in all dimensions - pad(input=input, size=1, mode='reflect') - pad(input=input, size=1, mode='mirror') - pad(input=input, size=1, mode='constant') - pad(input=input, size=1, mode='nearest') - pad(input=input, size=1, mode='wrap') - + pad(input=input, size=1, mode="reflect") + pad(input=input, size=1, mode="mirror") + pad(input=input, size=1, mode="constant") + pad(input=input, size=1, mode="nearest") + pad(input=input, size=1, mode="wrap") + # no padding in one dimension - pad(input=input, size=(1, 2, 2), mode='reflect') - pad(input=input, size=(1, 2, 2), mode='mirror') - pad(input=input, size=(1, 2, 2), mode='constant') - pad(input=input, size=(1, 2, 2), mode='nearest') - pad(input=input, size=(1, 2, 2), mode='wrap') - + pad(input=input, size=(1, 2, 2), mode="reflect") + pad(input=input, size=(1, 2, 2), mode="mirror") + pad(input=input, size=(1, 2, 2), mode="constant") + pad(input=input, size=(1, 2, 2), mode="nearest") + pad(input=input, size=(1, 2, 2), mode="wrap") + # same size as image - pad(input=input, size=3, mode='reflect') - pad(input=input, size=3, mode='mirror') - pad(input=input, size=3, mode='constant') - pad(input=input, size=3, mode='nearest') - pad(input=input, size=3, mode='wrap') - + pad(input=input, size=3, mode="reflect") + pad(input=input, size=3, mode="mirror") + pad(input=input, size=3, mode="constant") + pad(input=input, size=3, mode="nearest") + pad(input=input, size=3, mode="wrap") + # bigger than image - pad(input=input, size=4, mode='reflect') - pad(input=input, size=4, mode='mirror') - pad(input=input, size=4, mode='constant') - pad(input=input, size=4, mode='nearest') - pad(input=input, size=4, mode='wrap') + pad(input=input, size=4, mode="reflect") + pad(input=input, size=4, mode="mirror") + pad(input=input, size=4, mode="constant") + pad(input=input, size=4, mode="nearest") + pad(input=input, size=4, mode="wrap") def test_pad_odd(self): "Test pad for odd footprints in 2D" - input = numpy.asarray([[1,3,4],[2,2,2]]) + input = numpy.asarray([[1, 3, 4], [2, 2, 2]]) size = 3 - + expected = numpy.asarray( - [[2,2,2,2,2], - [3,1,3,4,3], - [2,2,2,2,2], - [3,1,3,4,3]]) - result = pad(input=input, size=size, mode='mirror') + [[2, 2, 2, 2, 2], [3, 1, 3, 4, 3], [2, 2, 2, 2, 2], [3, 1, 3, 4, 3]] + ) + result = pad(input=input, size=size, mode="mirror") self.assertTrue(numpy.all(result == expected)) - + expected = numpy.asarray( - [[1,1,3,4,4], - [1,1,3,4,4], - [2,2,2,2,2], - [2,2,2,2,2]]) - result = pad(input=input, size=size, mode='reflect') + [[1, 1, 3, 4, 4], [1, 1, 3, 4, 4], [2, 2, 2, 2, 2], [2, 2, 2, 2, 2]] + ) + result = pad(input=input, size=size, mode="reflect") self.assertTrue(numpy.all(result == expected)) - + expected = numpy.asarray( - [[2,2,2,2,2], - [4,1,3,4,1], - [2,2,2,2,2], - [4,1,3,4,1]]) - result = pad(input=input, size=size, mode='wrap') + [[2, 2, 2, 2, 2], [4, 1, 3, 4, 1], [2, 2, 2, 2, 2], [4, 1, 3, 4, 1]] + ) + result = pad(input=input, size=size, mode="wrap") self.assertTrue(numpy.all(result == expected)) - + expected = numpy.asarray( - [[1,1,3,4,4], - [1,1,3,4,4], - [2,2,2,2,2], - [2,2,2,2,2]]) - result = pad(input=input, size=size, mode='nearest') + [[1, 1, 3, 4, 4], [1, 1, 3, 4, 4], [2, 2, 2, 2, 2], [2, 2, 2, 2, 2]] + ) + result = pad(input=input, size=size, mode="nearest") numpy.testing.assert_array_equal(result, expected) self.assertTrue(numpy.all(result == expected)) - + expected = numpy.asarray( - [[0,0,0,0,0], - [0,1,3,4,0], - [0,2,2,2,0], - [0,0,0,0,0]]) - result = pad(input=input, size=size, mode='constant', cval=0) + [[0, 0, 0, 0, 0], [0, 1, 3, 4, 0], [0, 2, 2, 2, 0], [0, 0, 0, 0, 0]] + ) + result = pad(input=input, size=size, mode="constant", cval=0) self.assertTrue(numpy.all(result == expected)) - + expected = numpy.asarray( - [[9,9,9,9,9], - [9,1,3,4,9], - [9,2,2,2,9], - [9,9,9,9,9]]) - result = pad(input=input, size=size, mode='constant', cval=9) + [[9, 9, 9, 9, 9], [9, 1, 3, 4, 9], [9, 2, 2, 2, 9], [9, 9, 9, 9, 9]] + ) + result = pad(input=input, size=size, mode="constant", cval=9) self.assertTrue(numpy.all(result == expected)) - def test_pad_even(self): "Test pad for even footprints in 2D" - input = numpy.asarray([[1,3,4],[2,2,2]]) + input = numpy.asarray([[1, 3, 4], [2, 2, 2]]) size = (2, 3) - - expected = numpy.asarray( - [[3,1,3,4,3], - [2,2,2,2,2], - [3,1,3,4,3]]) - result = pad(input=input, size=size, mode='mirror') + + expected = numpy.asarray([[3, 1, 3, 4, 3], [2, 2, 2, 2, 2], [3, 1, 3, 4, 3]]) + result = pad(input=input, size=size, mode="mirror") self.assertTrue(numpy.all(result == expected)) - - expected = numpy.asarray( - [[1,1,3,4,4], - [2,2,2,2,2], - [2,2,2,2,2]]) - result = pad(input=input, size=size, mode='reflect') + + expected = numpy.asarray([[1, 1, 3, 4, 4], [2, 2, 2, 2, 2], [2, 2, 2, 2, 2]]) + result = pad(input=input, size=size, mode="reflect") self.assertTrue(numpy.all(result == expected)) - - expected = numpy.asarray( - [[4,1,3,4,1], - [2,2,2,2,2], - [4,1,3,4,1]]) - result = pad(input=input, size=size, mode='wrap') + + expected = numpy.asarray([[4, 1, 3, 4, 1], [2, 2, 2, 2, 2], [4, 1, 3, 4, 1]]) + result = pad(input=input, size=size, mode="wrap") self.assertTrue(numpy.all(result == expected)) - - expected = numpy.asarray( - [[1,1,3,4,4], - [2,2,2,2,2], - [2,2,2,2,2]]) - result = pad(input=input, size=size, mode='nearest') + + expected = numpy.asarray([[1, 1, 3, 4, 4], [2, 2, 2, 2, 2], [2, 2, 2, 2, 2]]) + result = pad(input=input, size=size, mode="nearest") self.assertTrue(numpy.all(result == expected)) - - expected = numpy.asarray( - [[0,1,3,4,0], - [0,2,2,2,0], - [0,0,0,0,0]]) - result = pad(input=input, size=size, mode='constant', cval=0) + + expected = numpy.asarray([[0, 1, 3, 4, 0], [0, 2, 2, 2, 0], [0, 0, 0, 0, 0]]) + result = pad(input=input, size=size, mode="constant", cval=0) self.assertTrue(numpy.all(result == expected)) - - expected = numpy.asarray( - [[9,1,3,4,9], - [9,2,2,2,9], - [9,9,9,9,9]]) - result = pad(input=input, size=size, mode='constant', cval=9) + + expected = numpy.asarray([[9, 1, 3, 4, 9], [9, 2, 2, 2, 9], [9, 9, 9, 9, 9]]) + result = pad(input=input, size=size, mode="constant", cval=9) self.assertTrue(numpy.all(result == expected)) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/tests/graphcut_/__init__.py b/tests/graphcut_/__init__.py index c7700a54..19785f58 100644 --- a/tests/graphcut_/__init__.py +++ b/tests/graphcut_/__init__.py @@ -1,4 +1,6 @@ -#from cut import TestCut # deactivated since faulty -from .graph import TestGraph -from .energy_label import TestEnergyLabel -from .energy_voxel import TestEnergyVoxel +# from cut import TestCut # deactivated since faulty +from .energy_label import TestEnergyLabel as TestEnergyLabel +from .energy_voxel import TestEnergyVoxel as TestEnergyVoxel +from .graph import TestGraph as TestGraph + +__all__ = ["TestEnergyLabel", "TestEnergyVoxel", "TestGraph"] diff --git a/tests/graphcut_/cut.py b/tests/graphcut_/cut.py index fb449b1a..a1677845 100644 --- a/tests/graphcut_/cut.py +++ b/tests/graphcut_/cut.py @@ -15,140 +15,168 @@ # build-in modules import unittest -# third-party modules +import scipy + +from medpy import filter # own modules -from medpy.graphcut import graph_from_labels, GraphDouble, Graph, graph_from_voxels +from medpy.graphcut import Graph, GraphDouble, graph_from_labels, graph_from_voxels from medpy.graphcut.energy_voxel import boundary_difference_linear -from medpy import filter -import scipy + +# third-party modules + # code class TestCut(unittest.TestCase): """Executes the complete pipeline of the graph cut algorithm, checking the results.""" # data for voxel based test - __voriginal_image = [[[1,0,1,2,3], - [1,0,1,4,3], - [0,1,1,6,4]], - [[1,0,1,2,3], - [1,0,1,4,3], - [0,1,1,6,4]]] - - __vfg_markers = [[[0,0,0,0,0], - [0,0,0,0,0], - [1,0,0,0,0]], - [[0,0,0,0,0], - [0,0,0,0,0], - [1,0,0,0,0]]] - - __vbg_markers = [[[0,0,0,0,1], - [0,0,0,0,0], - [0,0,0,0,0]], - [[0,0,0,0,1], - [0,0,0,0,0], - [0,0,0,0,0]]] - __vexpected = [[[1,1,1,0,0], - [1,1,1,0,0], - [1,1,1,0,0]], - [[1,1,1,0,0], - [1,1,1,0,0], - [1,1,1,0,0]]] + __voriginal_image = [ + [[1, 0, 1, 2, 3], [1, 0, 1, 4, 3], [0, 1, 1, 6, 4]], + [[1, 0, 1, 2, 3], [1, 0, 1, 4, 3], [0, 1, 1, 6, 4]], + ] + + __vfg_markers = [ + [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0]], + [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0]], + ] + + __vbg_markers = [ + [[0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], + [[0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], + ] + __vexpected = [ + [[1, 1, 1, 0, 0], [1, 1, 1, 0, 0], [1, 1, 1, 0, 0]], + [[1, 1, 1, 0, 0], [1, 1, 1, 0, 0], [1, 1, 1, 0, 0]], + ] __vmaxflow = 3 # data for region based test - __label_image = [[ 1, 2, 3, 3, 10], - [ 1, 4, 3, 8, 10], - [ 5, 5, 6, 7, 10], - [ 6, 6, 6, 9, 10]] - __fg_marker = [[1, 0, 0, 0, 0], - [1, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0]] - __bg_marker = [[0, 0, 0, 0, 1], - [0, 0, 0, 0, 1], - [0, 0, 0, 0, 1], - [0, 0, 0, 0, 1]] - __result = [[1, 1, 1, 1, 0], - [1, 1, 1, 0, 0], - [1, 1, 1, 1, 0], - [1, 1, 1, 1, 0]] + __label_image = [ + [1, 2, 3, 3, 10], + [1, 4, 3, 8, 10], + [5, 5, 6, 7, 10], + [6, 6, 6, 9, 10], + ] + __fg_marker = [[1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] + __bg_marker = [[0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1]] + __result = [[1, 1, 1, 1, 0], [1, 1, 1, 0, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 0]] __maxflow = 16 - + def test_voxel_based(self): """Executes the complete pipeline of the graph cut algorithm.""" # create the graph from the image original_image = scipy.asarray(self.__voriginal_image) - graph = graph_from_voxels(scipy.asarray(self.__vfg_markers), - scipy.asarray(self.__vbg_markers), - boundary_term=boundary_difference_linear, - boundary_term_args=(original_image, False)) - + graph = graph_from_voxels( + scipy.asarray(self.__vfg_markers), + scipy.asarray(self.__vbg_markers), + boundary_term=boundary_difference_linear, + boundary_term_args=(original_image, False), + ) + # execute min-cut / executing BK_MFMC try: maxflow = graph.maxflow() except Exception as e: - self.fail('An error was thrown during the external executions: {}'.format(e.message)) - + self.fail( + "An error was thrown during the external executions: {}".format( + e.message + ) + ) + # reshape results to form a valid mask result = scipy.zeros(original_image.size, dtype=scipy.bool_) for idx in range(len(result)): result[idx] = 0 if graph.termtype.SINK == graph.what_segment(idx) else 1 result = result.reshape(original_image.shape) - + # check results for validity - self.assertTrue((result == scipy.asarray(self.__vexpected)).all(), 'Resulting voxel-based cut is different than expected.') - self.assertEqual(maxflow, self.__vmaxflow, 'The resulting maxflow {} differs from the expected one {}.'.format(maxflow, self.__vmaxflow)) - - + self.assertTrue( + (result == scipy.asarray(self.__vexpected)).all(), + "Resulting voxel-based cut is different than expected.", + ) + self.assertEqual( + maxflow, + self.__vmaxflow, + "The resulting maxflow {} differs from the expected one {}.".format( + maxflow, self.__vmaxflow + ), + ) + def test_region_based(self): """Executes the complete pipeline of the graph cut algorithm.""" # create the graph from the image label_image = self.__label_image - graph = graph_from_labels(label_image, - self.__fg_marker, - self.__bg_marker, - boundary_term=self.__boundary_term) - + graph = graph_from_labels( + label_image, + self.__fg_marker, + self.__bg_marker, + boundary_term=self.__boundary_term, + ) + # alter the graph, removing some edges that are undesired nweights = graph.get_nweights() for edge in self.__get_bad_edges(): - if edge in nweights: del nweights[edge] - else: del nweights[(edge[1], edge[0])] - + if edge in nweights: + del nweights[edge] + else: + del nweights[(edge[1], edge[0])] + # create new graph from old graph to check the setting methods of the Graph object graph_new = Graph() graph_new.set_nodes(graph.get_node_count()) graph_new.set_source_nodes(graph.get_source_nodes()) graph_new.set_sink_nodes(graph.get_sink_nodes()) graph_new.set_nweights(nweights) - + if graph_new.inconsistent(): - self.fail('The newly generated graph is inconsistent. Reasons: {}'.format('\n'.join(graph_new.inconsistent()))) - + self.fail( + "The newly generated graph is inconsistent. Reasons: {}".format( + "\n".join(graph_new.inconsistent()) + ) + ) + # build graph cut graph from graph gcgraph = GraphDouble(len(graph_new.get_nodes()), len(graph_new.get_nweights())) gcgraph.add_node(len(graph_new.get_nodes())) for node, weight in list(graph_new.get_tweights().items()): gcgraph.add_tweights(int(node - 1), weight[0], weight[1]) for edge, weight in list(graph_new.get_nweights().items()): - gcgraph.add_edge(int(edge[0] - 1), int(edge[1] - 1), weight[0], weight[1]) - + gcgraph.add_edge(int(edge[0] - 1), int(edge[1] - 1), weight[0], weight[1]) + # execute min-cut / executing BK_MFMC try: maxflow = gcgraph.maxflow() except Exception as e: - self.fail('An error was thrown during the external executions: {}'.format(e.message)) - + self.fail( + "An error was thrown during the external executions: {}".format( + e.message + ) + ) + # apply results to the label image - label_image = filter.relabel_map(label_image, - gcgraph.what_segment, - lambda fun, rid: 0 if gcgraph.termtype.SINK == fun(int(rid) - 1) else 1) - + label_image = filter.relabel_map( + label_image, + gcgraph.what_segment, + lambda fun, rid: 0 if gcgraph.termtype.SINK == fun(int(rid) - 1) else 1, + ) + # check results for validity - self.assertEqual(maxflow, self.__maxflow, 'The resulting maxflow {} differs from the expected one {}.'.format(maxflow, self.__maxflow)) - self.assertSequenceEqual(label_image.tolist(), self.__result, 'The resulting cut is wrong. Expected\n {}\n got\n{}'.format(scipy.asarray(self.__result, dtype=scipy.bool_), label_image)) - + self.assertEqual( + maxflow, + self.__maxflow, + "The resulting maxflow {} differs from the expected one {}.".format( + maxflow, self.__maxflow + ), + ) + self.assertSequenceEqual( + label_image.tolist(), + self.__result, + "The resulting cut is wrong. Expected\n {}\n got\n{}".format( + scipy.asarray(self.__result, dtype=scipy.bool_), label_image + ), + ) + @staticmethod def __boundary_term(graph, label_image, boundary_term_args): "The boundary term function used for this tests." @@ -156,7 +184,7 @@ def __boundary_term(graph, label_image, boundary_term_args): for key, value in list(dic.items()): dic[key] = (value, value) return dic - + @staticmethod def __get_mapping(): "Returns a dict holding the edge to weight mappings." @@ -167,7 +195,7 @@ def __get_mapping(): mapping[(2, 3)] = 6 mapping[(2, 4)] = 4 mapping[(3, 4)] = 9 - mapping[(3, 6)] = 1 # edge that has to be removed later + mapping[(3, 6)] = 1 # edge that has to be removed later mapping[(3, 8)] = 2 mapping[(3, 10)] = 6 mapping[(4, 5)] = 3 @@ -176,15 +204,16 @@ def __get_mapping(): mapping[(6, 9)] = 3 mapping[(7, 8)] = 3 mapping[(7, 9)] = 7 - mapping[(7, 10)] = 1 # edge that has to be removed later + mapping[(7, 10)] = 1 # edge that has to be removed later mapping[(8, 10)] = 8 mapping[(9, 10)] = 5 - + return mapping - + def __get_bad_edges(self): "Returns the edges that should not be in the graph and have to be removed." return ((3, 6), (7, 10)) - -if __name__ == '__main__': + + +if __name__ == "__main__": unittest.main() diff --git a/tests/graphcut_/energy_label.py b/tests/graphcut_/energy_label.py index 95b532ab..379711ba 100644 --- a/tests/graphcut_/energy_label.py +++ b/tests/graphcut_/energy_label.py @@ -7,141 +7,213 @@ @status Release """ +import math + # build-in modules import sys -import math import unittest +import numpy + # third-party modules import scipy -import numpy from numpy.testing import assert_raises # own modules -from medpy.graphcut.energy_label import boundary_stawiaski, boundary_difference_of_means,\ - boundary_stawiaski_directed, regional_atlas +from medpy.graphcut.energy_label import ( + boundary_difference_of_means, + boundary_stawiaski, + boundary_stawiaski_directed, + regional_atlas, +) from medpy.graphcut.graph import GCGraph + # code class TestEnergyLabel(unittest.TestCase): - - BOUNDARY_TERMS = [boundary_stawiaski, boundary_difference_of_means, - boundary_stawiaski_directed, regional_atlas] + BOUNDARY_TERMS = [ + boundary_stawiaski, + boundary_difference_of_means, + boundary_stawiaski_directed, + regional_atlas, + ] BOUNDARY_TERMS_1ARG = [boundary_stawiaski, boundary_difference_of_means] BOUNDARY_TERMS_2ARG = [boundary_stawiaski_directed, regional_atlas] # dedicated function tests def test_boundary_stawiaski(self): - label = [[[1,1], - [1,1]], - [[1,2], - [2,2]], - [[2,2], - [2,2,]]] + label = [ + [[1, 1], [1, 1]], + [[1, 2], [2, 2]], + [ + [2, 2], + [ + 2, + 2, + ], + ], + ] expected_result = {(0, 1): (6, 6)} - self.__run_boundary_stawiaski_test(label, numpy.zeros_like(label), expected_result, '3D images') - - gradient = [[0., 0., 0.], - [0., 0., sys.float_info.max]] - label = [[1, 2, 3], - [1, 2, 4]] - expected_result = {(0, 1): (2.0, 2.0), (1, 2): (1.0, 1.0), (1, 3): (sys.float_info.min, sys.float_info.min), (2, 3): (sys.float_info.min, sys.float_info.min)} - self.__run_boundary_stawiaski_test(label, gradient, expected_result, 'zero edge weight') - - label = [[1, 3, 4], - [1, 2, 5], - [1, 2, 5]] - expected_result = {(0, 1): (2.0, 2.0), (0, 2): (1.0, 1.0), (2, 3): (1.0, 1.0), (1, 2): (1.0, 1.0), (1, 4): (2.0, 2.0), (3, 4): (1.0, 1.0)} - self.__run_boundary_stawiaski_test(label, numpy.zeros(numpy.asarray(label).shape, int), expected_result, 'integer gradient image') - - label = scipy.asarray(label, order='C') # C-order, gradient same order - gradient = scipy.zeros(label.shape, order='C') - self.__run_boundary_stawiaski_test(label, gradient, expected_result, 'order (C, C)') - - label = scipy.asarray(label, order='F') # Fortran order, gradient same order - gradient = scipy.zeros(label.shape, order='F') - self.__run_boundary_stawiaski_test(label, gradient, expected_result, 'order (F, F)') - - label = scipy.asarray(label, order='C') # C-order, gradient different order - gradient = scipy.zeros(label.shape, order='F') - self.__run_boundary_stawiaski_test(label, gradient, expected_result, 'order (C, F)') - - label = scipy.asarray(label, order='F') # F-order, gradient different order - gradient = scipy.zeros(label.shape, order='C') - self.__run_boundary_stawiaski_test(label, gradient, expected_result, 'order (F, C)') - - def __run_boundary_stawiaski_test(self, label, gradient, expected_result, msg = ''): + self.__run_boundary_stawiaski_test( + label, numpy.zeros_like(label), expected_result, "3D images" + ) + + gradient = [[0.0, 0.0, 0.0], [0.0, 0.0, sys.float_info.max]] + label = [[1, 2, 3], [1, 2, 4]] + expected_result = { + (0, 1): (2.0, 2.0), + (1, 2): (1.0, 1.0), + (1, 3): (sys.float_info.min, sys.float_info.min), + (2, 3): (sys.float_info.min, sys.float_info.min), + } + self.__run_boundary_stawiaski_test( + label, gradient, expected_result, "zero edge weight" + ) + + label = [[1, 3, 4], [1, 2, 5], [1, 2, 5]] + expected_result = { + (0, 1): (2.0, 2.0), + (0, 2): (1.0, 1.0), + (2, 3): (1.0, 1.0), + (1, 2): (1.0, 1.0), + (1, 4): (2.0, 2.0), + (3, 4): (1.0, 1.0), + } + self.__run_boundary_stawiaski_test( + label, + numpy.zeros(numpy.asarray(label).shape, int), + expected_result, + "integer gradient image", + ) + + label = scipy.asarray(label, order="C") # C-order, gradient same order + gradient = scipy.zeros(label.shape, order="C") + self.__run_boundary_stawiaski_test( + label, gradient, expected_result, "order (C, C)" + ) + + label = scipy.asarray(label, order="F") # Fortran order, gradient same order + gradient = scipy.zeros(label.shape, order="F") + self.__run_boundary_stawiaski_test( + label, gradient, expected_result, "order (F, F)" + ) + + label = scipy.asarray(label, order="C") # C-order, gradient different order + gradient = scipy.zeros(label.shape, order="F") + self.__run_boundary_stawiaski_test( + label, gradient, expected_result, "order (C, F)" + ) + + label = scipy.asarray(label, order="F") # F-order, gradient different order + gradient = scipy.zeros(label.shape, order="C") + self.__run_boundary_stawiaski_test( + label, gradient, expected_result, "order (F, C)" + ) + + def __run_boundary_stawiaski_test(self, label, gradient, expected_result, msg=""): label = numpy.asarray(label) gradient = numpy.asarray(gradient) - graph = GCGraphTest(numpy.unique(label).size, math.pow(numpy.unique(label).size, 2)) + graph = GCGraphTest( + numpy.unique(label).size, math.pow(numpy.unique(label).size, 2) + ) boundary_stawiaski(graph, label, gradient) graph.validate_nweights(self, expected_result, msg) - def __run_boundary_difference_of_means_test(self, label, gradient, expected_result, msg = ''): + def __run_boundary_difference_of_means_test( + self, label, gradient, expected_result, msg="" + ): label = numpy.asarray(label) gradient = numpy.asarray(gradient) - graph = GCGraphTest(numpy.unique(label).size, math.pow(numpy.unique(label).size, 2)) + graph = GCGraphTest( + numpy.unique(label).size, math.pow(numpy.unique(label).size, 2) + ) boundary_difference_of_means(graph, label, gradient) graph.validate_nweights(self, expected_result, msg) - # exception tests def test_exception_not_consecutively_labelled(self): - label = [[1, 4, 8], - [1, 3, 10], - [1, 3, 10]] + label = [[1, 4, 8], [1, 3, 10], [1, 3, 10]] for bt in self.BOUNDARY_TERMS_1ARG: - assert_raises(AttributeError, bt, None, label, (None, )) + assert_raises(AttributeError, bt, None, label, (None,)) for bt in self.BOUNDARY_TERMS_2ARG: assert_raises(AttributeError, bt, None, label, (None, None)) def test_exception_not_starting_with_index_one(self): - label = [[2, 3, 4], - [2, 3, 4], - [2, 3, 4]] + label = [[2, 3, 4], [2, 3, 4], [2, 3, 4]] for bt in self.BOUNDARY_TERMS_1ARG: - assert_raises(AttributeError, bt, None, label, (None, )) + assert_raises(AttributeError, bt, None, label, (None,)) for bt in self.BOUNDARY_TERMS_2ARG: assert_raises(AttributeError, bt, None, label, (None, None)) def test_boundary_difference_of_means_borders(self): - label = [[[1,1], - [1,1]], - [[1,2], - [2,2]], - [[2,2], - [2,2,]]] + label = [ + [[1, 1], [1, 1]], + [[1, 2], [2, 2]], + [ + [2, 2], + [ + 2, + 2, + ], + ], + ] expected_result = {(0, 1): (sys.float_info.min, sys.float_info.min)} - self.__run_boundary_difference_of_means_test(label, numpy.zeros_like(label), expected_result, '3D images') - - gradient = [[0., 0., 0.], - [0., 0., sys.float_info.max]] - label = [[1, 2, 3], - [1, 2, 4]] - expected_result = {(0, 1): (1.0, 1.0), (1, 2): (1.0, 1.0), (1, 3): (sys.float_info.min, sys.float_info.min), (2, 3): (sys.float_info.min, sys.float_info.min)} - self.__run_boundary_difference_of_means_test(label, gradient, expected_result, 'zero edge weight') - - label = [[1, 3, 4], - [1, 2, 5], - [1, 2, 5]] - expected_result = {(0, 1): (sys.float_info.min, sys.float_info.min), (0, 2): (sys.float_info.min, sys.float_info.min), (2, 3): (sys.float_info.min, sys.float_info.min), (1, 2): (sys.float_info.min, sys.float_info.min), (1, 4): (sys.float_info.min, sys.float_info.min), (3, 4): (sys.float_info.min, sys.float_info.min)} - self.__run_boundary_difference_of_means_test(label, numpy.zeros(numpy.asarray(label).shape, int), expected_result, 'integer gradient image') - - label = scipy.asarray(label, order='C') # C-order, gradient same order - gradient = scipy.zeros(label.shape, order='C') - self.__run_boundary_difference_of_means_test(label, gradient, expected_result, 'order (C, C)') + self.__run_boundary_difference_of_means_test( + label, numpy.zeros_like(label), expected_result, "3D images" + ) + + gradient = [[0.0, 0.0, 0.0], [0.0, 0.0, sys.float_info.max]] + label = [[1, 2, 3], [1, 2, 4]] + expected_result = { + (0, 1): (1.0, 1.0), + (1, 2): (1.0, 1.0), + (1, 3): (sys.float_info.min, sys.float_info.min), + (2, 3): (sys.float_info.min, sys.float_info.min), + } + self.__run_boundary_difference_of_means_test( + label, gradient, expected_result, "zero edge weight" + ) + + label = [[1, 3, 4], [1, 2, 5], [1, 2, 5]] + expected_result = { + (0, 1): (sys.float_info.min, sys.float_info.min), + (0, 2): (sys.float_info.min, sys.float_info.min), + (2, 3): (sys.float_info.min, sys.float_info.min), + (1, 2): (sys.float_info.min, sys.float_info.min), + (1, 4): (sys.float_info.min, sys.float_info.min), + (3, 4): (sys.float_info.min, sys.float_info.min), + } + self.__run_boundary_difference_of_means_test( + label, + numpy.zeros(numpy.asarray(label).shape, int), + expected_result, + "integer gradient image", + ) + + label = scipy.asarray(label, order="C") # C-order, gradient same order + gradient = scipy.zeros(label.shape, order="C") + self.__run_boundary_difference_of_means_test( + label, gradient, expected_result, "order (C, C)" + ) + + label = scipy.asarray(label, order="F") # Fortran order, gradient same order + gradient = scipy.zeros(label.shape, order="F") + self.__run_boundary_difference_of_means_test( + label, gradient, expected_result, "order (F, F)" + ) + + label = scipy.asarray(label, order="C") # C-order, gradient different order + gradient = scipy.zeros(label.shape, order="F") + self.__run_boundary_difference_of_means_test( + label, gradient, expected_result, "order (C, F)" + ) + + label = scipy.asarray(label, order="F") # F-order, gradient different order + gradient = scipy.zeros(label.shape, order="C") + self.__run_boundary_difference_of_means_test( + label, gradient, expected_result, "order (F, C)" + ) - label = scipy.asarray(label, order='F') # Fortran order, gradient same order - gradient = scipy.zeros(label.shape, order='F') - self.__run_boundary_difference_of_means_test(label, gradient, expected_result, 'order (F, F)') - - label = scipy.asarray(label, order='C') # C-order, gradient different order - gradient = scipy.zeros(label.shape, order='F') - self.__run_boundary_difference_of_means_test(label, gradient, expected_result, 'order (C, F)') - - label = scipy.asarray(label, order='F') # F-order, gradient different order - gradient = scipy.zeros(label.shape, order='C') - self.__run_boundary_difference_of_means_test(label, gradient, expected_result, 'order (F, C)') class GCGraphTest(GCGraph): """Wrapper around GCGraph, disabling its main functionalities to enable checking of the received values.""" @@ -153,40 +225,99 @@ def __init__(self, nodes, edges): def set_nweight(self, node_from, node_to, weight_there, weight_back): """Original graph sums if edges already exists.""" - #print (node_from, node_to, weight_there, weight_back) + # print (node_from, node_to, weight_there, weight_back) if not (node_from, node_to) in self.__nweights: self.__nweights[(node_from, node_to)] = (weight_there, weight_back) else: weight_there_old, weight_back_old = self.__nweights[(node_from, node_to)] - self.__nweights[(node_from, node_to)] = (weight_there_old + weight_there, weight_back_old + weight_back) + self.__nweights[(node_from, node_to)] = ( + weight_there_old + weight_there, + weight_back_old + weight_back, + ) def get_nweights(self): return self.__nweights - def validate_nweights(self, unittest, expected_result, msg_base = ''): + def validate_nweights(self, unittest, expected_result, msg_base=""): """Compares the nweights hold by the graph with the once provided (as a dict).""" - unittest.assertTrue(len(self.__nweights) == len(expected_result), '{}: Expected {} edges, but {} were added.'.format(msg_base, len(expected_result), len(self.__nweights))) + unittest.assertTrue( + len(self.__nweights) == len(expected_result), + "{}: Expected {} edges, but {} were added.".format( + msg_base, len(expected_result), len(self.__nweights) + ), + ) node_id_set = set() for key in list(self.__nweights.keys()): node_id_set.add(key[0]) node_id_set.add(key[1]) - unittest.assertTrue(len(node_id_set) == self.__nodes), '{}: Not all {} node-ids appeared in the edges, but only {}. Missing are {}.'.format(msg_base, self.__nodes, len(node_id_set), set(range(0, self.__nodes)) - node_id_set) - self.__compare_dictionaries(unittest, self.__nweights, expected_result, msg_base) - - def __compare_dictionaries(self, unittest, result, expected_result, msg_base = ''): + unittest.assertTrue( + len(node_id_set) == self.__nodes + ), "{}: Not all {} node-ids appeared in the edges, but only {}. Missing are {}.".format( + msg_base, + self.__nodes, + len(node_id_set), + set(range(0, self.__nodes)) - node_id_set, + ) + self.__compare_dictionaries( + unittest, self.__nweights, expected_result, msg_base + ) + + def __compare_dictionaries(self, unittest, result, expected_result, msg_base=""): """Evaluates the returned results.""" - unittest.assertEqual(len(expected_result), len(result), '{}: The expected result dict contains {} entries (for 4-connectedness), instead found {}.'.format(msg_base, len(expected_result), len(result))) + unittest.assertEqual( + len(expected_result), + len(result), + "{}: The expected result dict contains {} entries (for 4-connectedness), instead found {}.".format( + msg_base, len(expected_result), len(result) + ), + ) for key, value in list(result.items()): - unittest.assertTrue(key in expected_result, '{}: Region border {} unexpectedly found in expected results.'.format(msg_base, key)) + unittest.assertTrue( + key in expected_result, + "{}: Region border {} unexpectedly found in expected results.".format( + msg_base, key + ), + ) if key in expected_result: - unittest.assertAlmostEqual(value[0], expected_result[key][0], msg='{}: Weight for region border {} is {}. Expected {}.'.format(msg_base, key, value, expected_result[key]), delta=sys.float_info.epsilon) - unittest.assertAlmostEqual(value[1], expected_result[key][1], msg='{}: Weight for region border {} is {}. Expected {}.'.format(msg_base, key, value, expected_result[key]), delta=sys.float_info.epsilon) - unittest.assertGreater(value[0], 0.0, '{}: Encountered a weight {} <= 0.0 for key {}.'.format(msg_base, value, key)) - unittest.assertGreater(value[1], 0.0, '{}: Encountered a weight {} <= 0.0 for key {}.'.format(msg_base, value, key)) + unittest.assertAlmostEqual( + value[0], + expected_result[key][0], + msg="{}: Weight for region border {} is {}. Expected {}.".format( + msg_base, key, value, expected_result[key] + ), + delta=sys.float_info.epsilon, + ) + unittest.assertAlmostEqual( + value[1], + expected_result[key][1], + msg="{}: Weight for region border {} is {}. Expected {}.".format( + msg_base, key, value, expected_result[key] + ), + delta=sys.float_info.epsilon, + ) + unittest.assertGreater( + value[0], + 0.0, + "{}: Encountered a weight {} <= 0.0 for key {}.".format( + msg_base, value, key + ), + ) + unittest.assertGreater( + value[1], + 0.0, + "{}: Encountered a weight {} <= 0.0 for key {}.".format( + msg_base, value, key + ), + ) for key, value in list(expected_result.items()): - unittest.assertTrue(key in result, '{}: Region border {} expectedly but not found in results.'.format(msg_base, key)) + unittest.assertTrue( + key in result, + "{}: Region border {} expectedly but not found in results.".format( + msg_base, key + ), + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/graphcut_/energy_voxel.py b/tests/graphcut_/energy_voxel.py index ba034868..4d0a551c 100644 --- a/tests/graphcut_/energy_voxel.py +++ b/tests/graphcut_/energy_voxel.py @@ -16,138 +16,182 @@ # own modules from medpy.graphcut import graph_from_voxels -from medpy.graphcut.energy_voxel import boundary_difference_linear, boundary_difference_exponential,\ - boundary_difference_division, boundary_difference_power,\ - boundary_maximum_linear, boundary_maximum_exponential,\ - boundary_maximum_division, boundary_maximum_power, \ - regional_probability_map +from medpy.graphcut.energy_voxel import ( + boundary_difference_division, + boundary_difference_exponential, + boundary_difference_linear, + boundary_difference_power, + boundary_maximum_division, + boundary_maximum_exponential, + boundary_maximum_linear, + boundary_maximum_power, + regional_probability_map, +) -class TestEnergyVoxel(unittest.TestCase): - BOUNDARY_TERMS = [boundary_difference_linear, boundary_difference_exponential,\ - boundary_difference_division, boundary_difference_power,\ - boundary_maximum_linear, boundary_maximum_exponential,\ - boundary_maximum_division, boundary_maximum_power] +class TestEnergyVoxel(unittest.TestCase): + BOUNDARY_TERMS = [ + boundary_difference_linear, + boundary_difference_exponential, + boundary_difference_division, + boundary_difference_power, + boundary_maximum_linear, + boundary_maximum_exponential, + boundary_maximum_division, + boundary_maximum_power, + ] BOUNDARY_TERMS_2ARGS = [boundary_difference_linear, boundary_maximum_linear] - BOUNDARY_TERMS_3ARGS = [boundary_difference_exponential,\ - boundary_difference_division, boundary_difference_power,\ - boundary_maximum_exponential,\ - boundary_maximum_division, boundary_maximum_power] - - image = numpy.asarray([[0,0,0,0], - [0,0,0,0], - [0,0,1,1], - [0,0,1,1]], dtype=float) - fgmarkers = numpy.asarray([[0,0,0,0], - [0,0,0,0], - [0,0,0,0], - [0,0,0,1]]) - bgmarkers = numpy.asarray([[1,0,0,0], - [0,0,0,0], - [0,0,0,0], - [0,0,0,0]]) - result = numpy.asarray([[0,0,0,0], - [0,0,0,0], - [0,0,1,1], - [0,0,1,1]], dtype=numpy.bool_) - - gradient = numpy.asarray([[0,0,0,0], - [0,1,1,1], - [0,1,0,0], - [0,1,0,0]], dtype=float) + BOUNDARY_TERMS_3ARGS = [ + boundary_difference_exponential, + boundary_difference_division, + boundary_difference_power, + boundary_maximum_exponential, + boundary_maximum_division, + boundary_maximum_power, + ] + + image = numpy.asarray( + [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]], dtype=float + ) + fgmarkers = numpy.asarray([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]) + bgmarkers = numpy.asarray([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) + result = numpy.asarray( + [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]], dtype=numpy.bool_ + ) + + gradient = numpy.asarray( + [[0, 0, 0, 0], [0, 1, 1, 1], [0, 1, 0, 0], [0, 1, 0, 0]], dtype=float + ) # Base functionality tests def test_boundary_difference_linear_2D(self): self.__test_boundary_term_2d(boundary_difference_linear, (self.image, False)) def test_boundary_difference_exponential_2D(self): - self.__test_boundary_term_2d(boundary_difference_exponential, (self.image, 1., False)) + self.__test_boundary_term_2d( + boundary_difference_exponential, (self.image, 1.0, False) + ) def test_boundary_difference_division_2D(self): - self.__test_boundary_term_2d(boundary_difference_division, (self.image, .5, False)) + self.__test_boundary_term_2d( + boundary_difference_division, (self.image, 0.5, False) + ) def test_boundary_difference_power_2D(self): - self.__test_boundary_term_2d(boundary_difference_power, (self.image, 2., False)) + self.__test_boundary_term_2d( + boundary_difference_power, (self.image, 2.0, False) + ) def test_boundary_maximum_linear_2D(self): self.__test_boundary_term_2d(boundary_maximum_linear, (self.gradient, False)) def test_boundary_maximum_exponential_2D(self): - self.__test_boundary_term_2d(boundary_maximum_exponential, (self.gradient, 1., False)) + self.__test_boundary_term_2d( + boundary_maximum_exponential, (self.gradient, 1.0, False) + ) def test_boundary_maximum_division_2D(self): - self.__test_boundary_term_2d(boundary_maximum_division, (self.gradient, .5, False)) + self.__test_boundary_term_2d( + boundary_maximum_division, (self.gradient, 0.5, False) + ) def test_boundary_maximum_power_2D(self): - self.__test_boundary_term_2d(boundary_maximum_power, (self.gradient, 2., False)) + self.__test_boundary_term_2d( + boundary_maximum_power, (self.gradient, 2.0, False) + ) def test_regional_probability_map(self): - probability = self.image / 2. + probability = self.image / 2.0 self.__test_regional_term_2d(regional_probability_map, (probability, 1.0)) # Spacing tests def test_spacing(self): - image = numpy.asarray([[0,0,0,0,0], - [0,0,2,0,0], - [0,0,2,0,0], - [0,0,2,0,0], - [0,0,2,0,0]], dtype=float) - fgmarkers = numpy.asarray([[0,0,0,0,0], - [0,0,0,0,0], - [0,0,0,0,0], - [0,0,0,0,0], - [0,0,1,0,0]], dtype=numpy.bool_) - bgmarkers = numpy.asarray([[1,0,0,0,1], - [0,0,0,0,0], - [0,0,0,0,0], - [0,0,0,0,0], - [0,0,0,0,0]], dtype=numpy.bool_) + image = numpy.asarray( + [ + [0, 0, 0, 0, 0], + [0, 0, 2, 0, 0], + [0, 0, 2, 0, 0], + [0, 0, 2, 0, 0], + [0, 0, 2, 0, 0], + ], + dtype=float, + ) + fgmarkers = numpy.asarray( + [ + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 1, 0, 0], + ], + dtype=numpy.bool_, + ) + bgmarkers = numpy.asarray( + [ + [1, 0, 0, 0, 1], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + ], + dtype=numpy.bool_, + ) expected = image.astype(numpy.bool_) - graph = graph_from_voxels(fgmarkers, - bgmarkers, - boundary_term=boundary_difference_division, - boundary_term_args=(image, 1.0, (1., 5.0))) + graph = graph_from_voxels( + fgmarkers, + bgmarkers, + boundary_term=boundary_difference_division, + boundary_term_args=(image, 1.0, (1.0, 5.0)), + ) result = self.__execute(graph, image) assert_array_equal(result, expected) # Special case tests def test_negative_image(self): - image = numpy.asarray([[-1,1,-4],[2,-7,3],[-2.3,3,-7]], dtype=float) + image = numpy.asarray([[-1, 1, -4], [2, -7, 3], [-2.3, 3, -7]], dtype=float) self.__test_all_on_image(image) def test_zero_image(self): - image = numpy.asarray([[0,0,0],[0,0,0],[0,0,0]], dtype=float) + image = numpy.asarray([[0, 0, 0], [0, 0, 0], [0, 0, 0]], dtype=float) self.__test_all_on_image(image) # Helper functions def __test_all_on_image(self, image): for bt in self.BOUNDARY_TERMS_2ARGS: - graph = graph_from_voxels(self.fgmarkers, - self.bgmarkers, - boundary_term=bt, - boundary_term_args=(image, False)) + graph = graph_from_voxels( + self.fgmarkers, + self.bgmarkers, + boundary_term=bt, + boundary_term_args=(image, False), + ) self.__execute(graph, self.image) for bt in self.BOUNDARY_TERMS_3ARGS: - graph = graph_from_voxels(self.fgmarkers, - self.bgmarkers, - boundary_term=bt, - boundary_term_args=(image, 1.0, False)) + graph = graph_from_voxels( + self.fgmarkers, + self.bgmarkers, + boundary_term=bt, + boundary_term_args=(image, 1.0, False), + ) self.__execute(graph, self.image) def __test_boundary_term_2d(self, term, term_args): - graph = graph_from_voxels(self.fgmarkers, - self.bgmarkers, - boundary_term=term, - boundary_term_args=term_args) + graph = graph_from_voxels( + self.fgmarkers, + self.bgmarkers, + boundary_term=term, + boundary_term_args=term_args, + ) result = self.__execute(graph, self.image) assert_array_equal(result, self.result) def __test_regional_term_2d(self, term, term_args): - graph = graph_from_voxels(self.fgmarkers, - self.bgmarkers, - regional_term=term, - regional_term_args=term_args) + graph = graph_from_voxels( + self.fgmarkers, + self.bgmarkers, + regional_term=term, + regional_term_args=term_args, + ) result = self.__execute(graph, self.image) assert_array_equal(result, self.result) @@ -157,7 +201,11 @@ def __execute(self, graph, image): try: graph.maxflow() except Exception as e: - self.fail('An error was thrown during the external executions: {}'.format(e.message)) + self.fail( + "An error was thrown during the external executions: {}".format( + e.message + ) + ) # reshape results to form a valid mask result = numpy.zeros(image.size, dtype=numpy.bool_) diff --git a/tests/graphcut_/graph.py b/tests/graphcut_/graph.py index 56680520..84149d12 100644 --- a/tests/graphcut_/graph.py +++ b/tests/graphcut_/graph.py @@ -13,27 +13,27 @@ # build-in modules import unittest -# third-party modules - # own modules from medpy.graphcut import GCGraph +# third-party modules + + # code class TestGraph(unittest.TestCase): - def test_Graph(self): """Test the @link medpy.graphcut.graph.Graph implementation.""" pass - + def test_GCGraph(self): """Test the @link medpy.graphcut.graph.GCGraph implementation.""" # set test parmeters nodes = 10 edges = 20 - + # construct graph - graph = GCGraph(nodes, edges) # nodes edges - + graph = GCGraph(nodes, edges) # nodes edges + # SETTER TESTS # set_source_nodes should accept a sequence and raise an error if an invalid node id was passed graph.set_source_nodes(list(range(0, nodes))) @@ -44,42 +44,45 @@ def test_GCGraph(self): self.assertRaises(ValueError, graph.set_sink_nodes, [-1]) self.assertRaises(ValueError, graph.set_sink_nodes, [nodes]) # set_nweight should accept integers resp. floats and raise an error if an invalid node id was passed or the weight is zero or negative - graph.set_nweight(0, nodes-1, 1, 2) - graph.set_nweight(nodes-1, 0, 0.5, 1.5) + graph.set_nweight(0, nodes - 1, 1, 2) + graph.set_nweight(nodes - 1, 0, 0.5, 1.5) self.assertRaises(ValueError, graph.set_nweight, -1, 0, 1, 1) self.assertRaises(ValueError, graph.set_nweight, 0, nodes, 1, 1) self.assertRaises(ValueError, graph.set_nweight, 0, 0, 1, 1) - self.assertRaises(ValueError, graph.set_nweight, 0, nodes-1, 0, 0) - self.assertRaises(ValueError, graph.set_nweight, 0, nodes-1, -1, -2) - self.assertRaises(ValueError, graph.set_nweight, 0, nodes-1, -0.5, -1.5) + self.assertRaises(ValueError, graph.set_nweight, 0, nodes - 1, 0, 0) + self.assertRaises(ValueError, graph.set_nweight, 0, nodes - 1, -1, -2) + self.assertRaises(ValueError, graph.set_nweight, 0, nodes - 1, -0.5, -1.5) # set_nweights works as set_nweight but takes a dictionary as argument - graph.set_nweights({(0, nodes-1): (1, 2)}) - graph.set_nweights({(nodes-1, 0): (0.5, 1.5)}) + graph.set_nweights({(0, nodes - 1): (1, 2)}) + graph.set_nweights({(nodes - 1, 0): (0.5, 1.5)}) self.assertRaises(ValueError, graph.set_nweights, {(-1, 0): (1, 1)}) self.assertRaises(ValueError, graph.set_nweights, {(0, nodes): (1, 1)}) self.assertRaises(ValueError, graph.set_nweights, {(0, 0): (1, 1)}) - self.assertRaises(ValueError, graph.set_nweights, {(0, nodes-1): (0, 0)}) - self.assertRaises(ValueError, graph.set_nweights, {(0, nodes-1): (-1, -2)}) - self.assertRaises(ValueError, graph.set_nweights, {(0, nodes-1): (-0.5, -1.5)}) + self.assertRaises(ValueError, graph.set_nweights, {(0, nodes - 1): (0, 0)}) + self.assertRaises(ValueError, graph.set_nweights, {(0, nodes - 1): (-1, -2)}) + self.assertRaises( + ValueError, graph.set_nweights, {(0, nodes - 1): (-0.5, -1.5)} + ) # set_tweight should accept integers resp. floats and raise an error if an invalid node id was passed or the weight is zero or negative graph.set_tweight(0, 1, 2) - graph.set_tweight(nodes-1, 0.5, 1.5) + graph.set_tweight(nodes - 1, 0.5, 1.5) graph.set_tweight(0, -1, -2) graph.set_tweight(0, 0, 0) self.assertRaises(ValueError, graph.set_tweight, -1, 1, 1) self.assertRaises(ValueError, graph.set_tweight, nodes, 1, 1) # set_tweights works as set_tweight but takes a dictionary as argument graph.set_tweights({0: (1, 2)}) - graph.set_tweights({nodes-1: (0.5, 1.5)}) + graph.set_tweights({nodes - 1: (0.5, 1.5)}) graph.set_tweights({0: (-1, -2)}) graph.set_tweights({0: (0, 0)}) self.assertRaises(ValueError, graph.set_tweights, {-1: (1, 1)}) self.assertRaises(ValueError, graph.set_tweights, {nodes: (1, 1)}) - + # SOME MINOR GETTERS self.assertEqual(graph.get_node_count(), nodes) self.assertEqual(graph.get_edge_count(), edges) self.assertSequenceEqual(graph.get_nodes(), list(range(0, nodes))) - -if __name__ == '__main__': + + +if __name__ == "__main__": unittest.main() diff --git a/tests/io_/__init__.py b/tests/io_/__init__.py index 1a1e33c2..982631ff 100644 --- a/tests/io_/__init__.py +++ b/tests/io_/__init__.py @@ -1,2 +1,4 @@ -from .loadsave import TestIOFacilities -from .metadata import TestMetadataConsistency \ No newline at end of file +from .loadsave import TestIOFacilities as TestIOFacilities +from .metadata import TestMetadataConsistency as TestMetadataConsistency + +__all__ = ["TestIOFacilities", "TestMetadataConsistency"] diff --git a/tests/io_/loadsave.py b/tests/io_/loadsave.py index b57d8d9e..d27c57cb 100644 --- a/tests/io_/loadsave.py +++ b/tests/io_/loadsave.py @@ -1,19 +1,20 @@ """Unittest for the input/output facilities class.""" # build-in modules -import unittest -import tempfile import os +import tempfile +import unittest # third-party modules import scipy - -# path changes +from medpy.core.logger import Logger # own modules from medpy.io import load, save -from medpy.core.logger import Logger + +# path changes + # information __author__ = "Oskar Maier" @@ -22,90 +23,103 @@ __status__ = "Release" __description__ = "Input/output facilities unittest." + # code class TestIOFacilities(unittest.TestCase): - #### # Comprehensive list of image format endings #### # The most important image formats for medical image processing - __important = ['.nii', '.nii.gz', '.hdr', '.img', '.img.gz', '.dcm', '.dicom', '.mhd', '.nrrd', '.mha'] - + __important = [ + ".nii", + ".nii.gz", + ".hdr", + ".img", + ".img.gz", + ".dcm", + ".dicom", + ".mhd", + ".nrrd", + ".mha", + ] + # list of image formats ITK is theoretically able to load - __itk = ['.analyze', # failed saving - '.hdr', - '.img', - '.bmp', - '.dcm', - '.gdcm', # failed saving - '.dicom', - '.4x', # failed saving - '.5x', # failed saving - '.ge', # failed saving - '.ge4', # failed saving - '.ge4x', # failed saving - '.ge5', # failed saving - '.ge5x', # failed saving - '.gipl', - '.h5', - '.hdf5', - '.he5', - '.ipl', # failed saving - '.jpg', - '.jpeg', - '.lsm', - '.mha', - '.mhd', - '.pic', - '.png', - '.raw', # failed saving - '.vision', # failed saving - '.siemens', # failed saving - '.spr', - '.sdt', # failed saving - '.stimulate', # failed saving - '.tif', - '.tiff', - '.vtk', - '.bio', # failed saving - '.biorad', # failed saving - '.brains', # failed saving - '.brains2', # failed saving - '.brains2mask', # failed saving - '.bruker', # failed saving - '.bruker2d', # failed saving - '.bruker2dseq', # failed saving - '.mnc', # failed saving - '.mnc2', # failed saving - '.minc', # failed saving - '.minc2', # failed saving - '.nii', - '.nifti', # failed saving - '.nhdr', - '.nrrd', - '.philips', # failed saving - '.philipsreq', # failed saving - '.rec', # failed saving - '.par', # failed saving - '.recpar', # failed saving - '.vox', # failed saving - '.voxbo', # failed saving - '.voxbocub'] # failed saving - + __itk = [ + ".analyze", # failed saving + ".hdr", + ".img", + ".bmp", + ".dcm", + ".gdcm", # failed saving + ".dicom", + ".4x", # failed saving + ".5x", # failed saving + ".ge", # failed saving + ".ge4", # failed saving + ".ge4x", # failed saving + ".ge5", # failed saving + ".ge5x", # failed saving + ".gipl", + ".h5", + ".hdf5", + ".he5", + ".ipl", # failed saving + ".jpg", + ".jpeg", + ".lsm", + ".mha", + ".mhd", + ".pic", + ".png", + ".raw", # failed saving + ".vision", # failed saving + ".siemens", # failed saving + ".spr", + ".sdt", # failed saving + ".stimulate", # failed saving + ".tif", + ".tiff", + ".vtk", + ".bio", # failed saving + ".biorad", # failed saving + ".brains", # failed saving + ".brains2", # failed saving + ".brains2mask", # failed saving + ".bruker", # failed saving + ".bruker2d", # failed saving + ".bruker2dseq", # failed saving + ".mnc", # failed saving + ".mnc2", # failed saving + ".minc", # failed saving + ".minc2", # failed saving + ".nii", + ".nifti", # failed saving + ".nhdr", + ".nrrd", + ".philips", # failed saving + ".philipsreq", # failed saving + ".rec", # failed saving + ".par", # failed saving + ".recpar", # failed saving + ".vox", # failed saving + ".voxbo", # failed saving + ".voxbocub", + ] # failed saving + ########## # Combinations to avoid due to technical problems, dim->file ending pairs ######### - __avoid = {} # e.g. {4: ('.dcm', '.dicom')} - + __avoid = {} # e.g. {4: ('.dcm', '.dicom')} + def test_SaveLoad(self): """ The bases essence of this test is to check if any one image format in any one dimension can be saved and read, as this is the only base requirement for using medpy. - + Additionally checks the basic expected behaviour of the load and save functionality. - + Since this usually does not make much sense, this implementation allows also to set a switch (verboose) which causes the test to print a comprehensive overview over which image formats with how many dimensions and which pixel data types @@ -125,156 +139,244 @@ def test_SaveLoad(self): # that seem to work but failed the consistency tests. These should be handled # with special care, as they might be the source of errors. inconsistent = False - + #### # OTHER SETTINGS #### # debug settings logger = Logger.getInstance() - #logger.setLevel(logging.DEBUG) - + # logger.setLevel(logging.DEBUG) + # run test either for most important formats or for all - #__suffixes = self.__important # (choice 1) - __suffixes = self.__important + self.__itk # (choice 2) - - + # __suffixes = self.__important # (choice 1) + __suffixes = self.__important + self.__itk # (choice 2) + # dimensions and dtypes to check __suffixes = list(set(__suffixes)) __ndims = [1, 2, 3, 4, 5] - __dtypes = [scipy.bool_, - scipy.int8, scipy.int16, scipy.int32, scipy.int64, - scipy.uint8, scipy.uint16, scipy.uint32, scipy.uint64, - scipy.float32, scipy.float64, - scipy.complex64, scipy.complex128] - + __dtypes = [ + scipy.bool_, + scipy.int8, + scipy.int16, + scipy.int32, + scipy.int64, + scipy.uint8, + scipy.uint16, + scipy.uint32, + scipy.uint64, + scipy.float32, + scipy.float64, + scipy.complex64, + scipy.complex128, + ] + # prepare struct to save settings that passed the test valid_types = dict.fromkeys(__suffixes) for k1 in valid_types: valid_types[k1] = dict.fromkeys(__ndims) for k2 in valid_types[k1]: valid_types[k1][k2] = [] - + # prepare struct to save settings that did not unsupported_type = dict.fromkeys(__suffixes) for k1 in unsupported_type: unsupported_type[k1] = dict.fromkeys(__ndims) for k2 in unsupported_type[k1]: - unsupported_type[k1][k2] = dict.fromkeys(__dtypes) - + unsupported_type[k1][k2] = dict.fromkeys(__dtypes) + # prepare struct to save settings that did not pass the data integrity test invalid_types = dict.fromkeys(__suffixes) for k1 in invalid_types: invalid_types[k1] = dict.fromkeys(__ndims) for k2 in invalid_types[k1]: invalid_types[k1][k2] = dict.fromkeys(__dtypes) - + # create artifical images, save them, load them again and compare them path = tempfile.mkdtemp() try: for ndim in __ndims: - logger.debug('Testing for dimension {}...'.format(ndim)) + logger.debug("Testing for dimension {}...".format(ndim)) arr_base = scipy.random.randint(0, 10, list(range(10, ndim + 10))) for dtype in __dtypes: arr_save = arr_base.astype(dtype) for suffix in __suffixes: # do not run test, if in avoid array if ndim in self.__avoid and suffix in self.__avoid[ndim]: - unsupported_type[suffix][ndim][dtype] = "Test skipped, as combination in the tests __avoid array." + unsupported_type[suffix][ndim][ + dtype + ] = "Test skipped, as combination in the tests __avoid array." continue - - image = '{}/img{}'.format(path, suffix) + + image = "{}/img{}".format(path, suffix) try: # attempt to save the image save(arr_save, image) - self.assertTrue(os.path.exists(image), 'Image of type {} with shape={}/dtype={} has been saved without exception, but the file does not exist.'.format(suffix, arr_save.shape, dtype)) - + self.assertTrue( + os.path.exists(image), + "Image of type {} with shape={}/dtype={} has been saved without exception, but the file does not exist.".format( + suffix, arr_save.shape, dtype + ), + ) + # attempt to load the image arr_load, header = load(image) - self.assertTrue(header, 'Image of type {} with shape={}/dtype={} has been loaded without exception, but no header has been supplied (got: {})'.format(suffix, arr_save.shape, dtype, header)) - + self.assertTrue( + header, + "Image of type {} with shape={}/dtype={} has been loaded without exception, but no header has been supplied (got: {})".format( + suffix, arr_save.shape, dtype, header + ), + ) + # check for data consistency msg = self.__diff(arr_save, arr_load) if msg: invalid_types[suffix][ndim][dtype] = msg - #elif list == type(valid_types[suffix][ndim]): + # elif list == type(valid_types[suffix][ndim]): else: valid_types[suffix][ndim].append(dtype) - + # remove image - if os.path.exists(image): os.remove(image) - except Exception as e: # clean up + if os.path.exists(image): + os.remove(image) + except Exception as e: # clean up try: unsupported_type[suffix][ndim][dtype] = str(e.args) except Exception as _: unsupported_type[suffix][ndim][dtype] = e.message - if os.path.exists(image): os.remove(image) + if os.path.exists(image): + os.remove(image) except Exception: - if not os.listdir(path): os.rmdir(path) - else: logger.debug('Could not delete temporary directory {}. Is not empty.'.format(path)) + if not os.listdir(path): + os.rmdir(path) + else: + logger.debug( + "Could not delete temporary directory {}. Is not empty.".format( + path + ) + ) raise - + if supported: - print('\nsave() and load() support (at least) the following image configurations:') - print('type\tndim\tdtypes') + print( + "\nsave() and load() support (at least) the following image configurations:" + ) + print("type\tndim\tdtypes") for suffix in valid_types: for ndim, dtypes in list(valid_types[suffix].items()): if list == type(dtypes) and not 0 == len(dtypes): - print(('{}\t{}D\t{}'.format(suffix, ndim, [str(x).split('.')[-1][:-2] for x in dtypes]))) + print( + ( + "{}\t{}D\t{}".format( + suffix, + ndim, + [str(x).split(".")[-1][:-2] for x in dtypes], + ) + ) + ) if notsupported: - print('\nthe following configurations are not supported:') - print('type\tndim\tdtype\t\terror') + print("\nthe following configurations are not supported:") + print("type\tndim\tdtype\t\terror") for suffix in unsupported_type: for ndim in unsupported_type[suffix]: for dtype, msg in list(unsupported_type[suffix][ndim].items()): if msg: - print(('{}\t{}D\t{}\t\t{}'.format(suffix, ndim, str(dtype).split('.')[-1][:-2], msg))) - + print( + ( + "{}\t{}D\t{}\t\t{}".format( + suffix, + ndim, + str(dtype).split(".")[-1][:-2], + msg, + ) + ) + ) + if inconsistent: - print('\nthe following configurations show inconsistent saving and loading behaviour:') - print('type\tndim\tdtype\t\terror') + print( + "\nthe following configurations show inconsistent saving and loading behaviour:" + ) + print("type\tndim\tdtype\t\terror") for suffix in invalid_types: for ndim in invalid_types[suffix]: for dtype, msg in list(invalid_types[suffix][ndim].items()): if msg: - print(('{}\t{}D\t{}\t\t{}'.format(suffix, ndim, str(dtype).split('.')[-1][:-2], msg))) - + print( + ( + "{}\t{}D\t{}\t\t{}".format( + suffix, + ndim, + str(dtype).split(".")[-1][:-2], + msg, + ) + ) + ) + def __diff(self, arr1, arr2): """ Returns an error message if the two supplied arrays differ, otherwise false. - """ + """ if not arr1.ndim == arr2.ndim: - return 'ndim differs ({} to {})'.format(arr1.ndim, arr2.ndim) + return "ndim differs ({} to {})".format(arr1.ndim, arr2.ndim) elif not self.__is_lossless(arr1.dtype.type, arr2.dtype.type): - return 'loss of data due to conversion from {} to {}'.format(arr1.dtype.type, arr2.dtype.type) + return "loss of data due to conversion from {} to {}".format( + arr1.dtype.type, arr2.dtype.type + ) elif not arr1.shape == arr2.shape: - return 'shapes differs ({} to {}).'.format(arr1.shape, arr2.shape) + return "shapes differs ({} to {}).".format(arr1.shape, arr2.shape) elif not (arr1 == arr2).all(): - return 'contents differs' - else: return False - + return "contents differs" + else: + return False + def __is_lossless(self, _from, _to): """ Returns True if a data conversion from dtype _from to _to is lossless, otherwise False. """ __int_order = [scipy.int8, scipy.int16, scipy.int32, scipy.int64] - - __uint_order = [scipy.uint8, scipy.int16, scipy.uint16, scipy.int32, scipy.uint32, scipy.int64, scipy.uint64] - + + __uint_order = [ + scipy.uint8, + scipy.int16, + scipy.uint16, + scipy.int32, + scipy.uint32, + scipy.int64, + scipy.uint64, + ] + __float_order = [scipy.float32, scipy.float64, scipy.float128] - + __complex_order = [scipy.complex64, scipy.complex128, scipy.complex256] - - __bool_order = [scipy.bool_, scipy.int8, scipy.uint8, scipy.int16, scipy.uint16, scipy.int32, scipy.uint32, scipy.int64, scipy.uint64] - - __orders = [__int_order, __uint_order, __float_order, __complex_order, __bool_order] - + + __bool_order = [ + scipy.bool_, + scipy.int8, + scipy.uint8, + scipy.int16, + scipy.uint16, + scipy.int32, + scipy.uint32, + scipy.int64, + scipy.uint64, + ] + + __orders = [ + __int_order, + __uint_order, + __float_order, + __complex_order, + __bool_order, + ] + for order in __orders: if _from in order: - if _to in order[order.index(_from):]: return True - else: return False + if _to in order[order.index(_from) :]: + return True + else: + return False return False - - -if __name__ == '__main__': + + +if __name__ == "__main__": unittest.main() diff --git a/tests/io_/metadata.py b/tests/io_/metadata.py index ae4e5bf6..01f20652 100644 --- a/tests/io_/metadata.py +++ b/tests/io_/metadata.py @@ -1,19 +1,20 @@ """Unittest for meta-data consistency.""" # build-in modules -import unittest -import tempfile import os +import tempfile +import unittest # third-party modules import scipy +from medpy.core.logger import Logger + +# own modules +from medpy.io import header, load, save # path changes -# own modules -from medpy.io import load, save, header -from medpy.core.logger import Logger # information __author__ = "Oskar Maier" @@ -22,105 +23,118 @@ __status__ = "Release" __description__ = "Meta-data consistency unittest." + # code class TestMetadataConsistency(unittest.TestCase): - -#### + #### # Comprehensive list of image format endings #### # The most important image formats for medical image processing - __important = ['.nii', '.nii.gz', '.hdr', '.img', '.img.gz', '.dcm', '.dicom', '.mhd', '.nrrd', '.mha'] - + __important = [ + ".nii", + ".nii.gz", + ".hdr", + ".img", + ".img.gz", + ".dcm", + ".dicom", + ".mhd", + ".nrrd", + ".mha", + ] + # list of image formats ITK is theoretically able to load - __itk = ['.analyze', # failed saving - '.hdr', - '.img', - '.bmp', - '.dcm', - '.gdcm', # failed saving - '.dicom', - '.4x', # failed saving - '.5x', # failed saving - '.ge', # failed saving - '.ge4', # failed saving - '.ge4x', # failed saving - '.ge5', # failed saving - '.ge5x', # failed saving - '.gipl', - '.h5', - '.hdf5', - '.he5', - '.ipl', # failed saving - '.jpg', - '.jpeg', - '.lsm', - '.mha', - '.mhd', - '.pic', - '.png', - '.raw', # failed saving - '.vision', # failed saving - '.siemens', # failed saving - '.spr', - '.sdt', # failed saving - '.stimulate', # failed saving - '.tif', - '.tiff', - '.vtk', - '.bio', # failed saving - '.biorad', # failed saving - '.brains', # failed saving - '.brains2', # failed saving - '.brains2mask', # failed saving - '.bruker', # failed saving - '.bruker2d', # failed saving - '.bruker2dseq', # failed saving - '.mnc', # failed saving - '.mnc2', # failed saving - '.minc', # failed saving - '.minc2', # failed saving - '.nii', - '.nifti', # failed saving - '.nhdr', - '.nrrd', - '.philips', # failed saving - '.philipsreq', # failed saving - '.rec', # failed saving - '.par', # failed saving - '.recpar', # failed saving - '.vox', # failed saving - '.voxbo', # failed saving - '.voxbocub'] # failed saving - + __itk = [ + ".analyze", # failed saving + ".hdr", + ".img", + ".bmp", + ".dcm", + ".gdcm", # failed saving + ".dicom", + ".4x", # failed saving + ".5x", # failed saving + ".ge", # failed saving + ".ge4", # failed saving + ".ge4x", # failed saving + ".ge5", # failed saving + ".ge5x", # failed saving + ".gipl", + ".h5", + ".hdf5", + ".he5", + ".ipl", # failed saving + ".jpg", + ".jpeg", + ".lsm", + ".mha", + ".mhd", + ".pic", + ".png", + ".raw", # failed saving + ".vision", # failed saving + ".siemens", # failed saving + ".spr", + ".sdt", # failed saving + ".stimulate", # failed saving + ".tif", + ".tiff", + ".vtk", + ".bio", # failed saving + ".biorad", # failed saving + ".brains", # failed saving + ".brains2", # failed saving + ".brains2mask", # failed saving + ".bruker", # failed saving + ".bruker2d", # failed saving + ".bruker2dseq", # failed saving + ".mnc", # failed saving + ".mnc2", # failed saving + ".minc", # failed saving + ".minc2", # failed saving + ".nii", + ".nifti", # failed saving + ".nhdr", + ".nrrd", + ".philips", # failed saving + ".philipsreq", # failed saving + ".rec", # failed saving + ".par", # failed saving + ".recpar", # failed saving + ".vox", # failed saving + ".voxbo", # failed saving + ".voxbocub", + ] # failed saving + ########## # Combinations to avoid due to technical problems, dim->file ending pairs ########## - __avoid = {} # {4: ('.dcm', '.dicom')} - + __avoid = {} # {4: ('.dcm', '.dicom')} + ########## # Error delta: the maximum difference between to meta-data entries that is still considered consistent (required, as there may be rounding errors) ########## __delta = 0.0001 - + def test_MetadataConsistency(self): """ This test checks the ability of different image formats to consistently save meta-data information. Especially if a conversion between formats is required, that involves different 3rd party modules, this is not always guaranteed. - + The images are saved in one format, loaded and then saved in another format. Subsequently the differences in the meta-data is checked. - + Currently this test can only check: - voxel spacing - image offset - + Note that some other test are inherently performed by the loadsave.TestIOFacilities class: - data type - shape - content - + With the verboose switches, a comprehensive list of the results can be obtianed. """ #### @@ -135,27 +149,37 @@ def test_MetadataConsistency(self): inconsistent = False # Print a list of formats that failed conversion in general unsupported = False - + #### # OTHER SETTINGS #### # debug settings logger = Logger.getInstance() - #logger.setLevel(logging.DEBUG) - + # logger.setLevel(logging.DEBUG) + # run test either for most important formats or for all (see loadsave.TestIOFacilities) - #__suffixes = self.__important # (choice 1) - __suffixes = self.__important + self.__itk # (choice 2) - + # __suffixes = self.__important # (choice 1) + __suffixes = self.__important + self.__itk # (choice 2) + # dimensions and dtypes to check __suffixes = list(set(__suffixes)) __ndims = [1, 2, 3, 4, 5] - __dtypes = [scipy.bool_, - scipy.int8, scipy.int16, scipy.int32, scipy.int64, - scipy.uint8, scipy.uint16, scipy.uint32, scipy.uint64, - scipy.float32, scipy.float64, #scipy.float128, # last one removed, as not present on every machine - scipy.complex64, scipy.complex128, ] #scipy.complex256 ## removed, as not present on every machine - + __dtypes = [ + scipy.bool_, + scipy.int8, + scipy.int16, + scipy.int32, + scipy.int64, + scipy.uint8, + scipy.uint16, + scipy.uint32, + scipy.uint64, + scipy.float32, + scipy.float64, # scipy.float128, # last one removed, as not present on every machine + scipy.complex64, + scipy.complex128, + ] # scipy.complex256 ## removed, as not present on every machine + # prepare struct to save settings that passed the test consistent_types = dict.fromkeys(__suffixes) for k0 in consistent_types: @@ -164,7 +188,7 @@ def test_MetadataConsistency(self): consistent_types[k0][k1] = dict.fromkeys(__ndims) for k2 in consistent_types[k0][k1]: consistent_types[k0][k1][k2] = [] - + # prepare struct to save settings that did not inconsistent_types = dict.fromkeys(__suffixes) for k0 in inconsistent_types: @@ -173,7 +197,7 @@ def test_MetadataConsistency(self): inconsistent_types[k0][k1] = dict.fromkeys(__ndims) for k2 in inconsistent_types[k0][k1]: inconsistent_types[k0][k1][k2] = dict.fromkeys(__dtypes) - + # prepare struct to save settings that did not pass the data integrity test unsupported_types = dict.fromkeys(__suffixes) for k0 in consistent_types: @@ -182,134 +206,247 @@ def test_MetadataConsistency(self): unsupported_types[k0][k1] = dict.fromkeys(__ndims) for k2 in unsupported_types[k0][k1]: unsupported_types[k0][k1][k2] = dict.fromkeys(__dtypes) - + # create artifical images, save them, load them again and compare them path = tempfile.mkdtemp() try: for ndim in __ndims: - logger.debug('Testing for dimension {}...'.format(ndim)) + logger.debug("Testing for dimension {}...".format(ndim)) arr_base = scipy.random.randint(0, 10, list(range(10, ndim + 10))) for dtype in __dtypes: arr_save = arr_base.astype(dtype) for suffix_from in __suffixes: # do not run test, if in avoid array if ndim in self.__avoid and suffix_from in self.__avoid[ndim]: - unsupported_types[suffix_from][suffix_from][ndim][dtype] = "Test skipped, as combination in the tests __avoid array." + unsupported_types[suffix_from][suffix_from][ndim][ + dtype + ] = "Test skipped, as combination in the tests __avoid array." continue - + # save array as file, load again to obtain header and set the meta-data - image_from = '{}/img{}'.format(path, suffix_from) + image_from = "{}/img{}".format(path, suffix_from) try: save(arr_save, image_from, None, True) if not os.path.exists(image_from): - raise Exception('Image of type {} with shape={}/dtype={} has been saved without exception, but the file does not exist.'.format(suffix_from, arr_save.shape, dtype)) + raise Exception( + "Image of type {} with shape={}/dtype={} has been saved without exception, but the file does not exist.".format( + suffix_from, arr_save.shape, dtype + ) + ) except Exception as e: - unsupported_types[suffix_from][suffix_from][ndim][dtype] = e.message if hasattr(e, 'message') else str(e.args) + unsupported_types[suffix_from][suffix_from][ndim][dtype] = ( + e.message if hasattr(e, "message") else str(e.args) + ) continue - + try: img_from, hdr_from = load(image_from) - img_from = img_from.astype(dtype) # change dtype of loaded image again, as sometimes the type is higher (e.g. int64 instead of int32) after loading! + img_from = img_from.astype( + dtype + ) # change dtype of loaded image again, as sometimes the type is higher (e.g. int64 instead of int32) after loading! except Exception as e: - _message = e.message if hasattr(e, 'message') else str(e.args) - unsupported_types[suffix_from][suffix_from][ndim][dtype] = 'Saved reference image of type {} with shape={}/dtype={} could not be loaded. Reason: {}'.format(suffix_from, arr_save.shape, dtype, _message) + _message = ( + e.message if hasattr(e, "message") else str(e.args) + ) + unsupported_types[suffix_from][suffix_from][ndim][ + dtype + ] = "Saved reference image of type {} with shape={}/dtype={} could not be loaded. Reason: {}".format( + suffix_from, arr_save.shape, dtype, _message + ) continue - header.set_pixel_spacing(hdr_from, [scipy.random.rand() * scipy.random.randint(1, 10) for _ in range(img_from.ndim)]) + header.set_pixel_spacing( + hdr_from, + [ + scipy.random.rand() * scipy.random.randint(1, 10) + for _ in range(img_from.ndim) + ], + ) try: - header.set_pixel_spacing(hdr_from, [scipy.random.rand() * scipy.random.randint(1, 10) for _ in range(img_from.ndim)]) - header.set_offset(hdr_from, [scipy.random.rand() * scipy.random.randint(1, 10) for _ in range(img_from.ndim)]) + header.set_pixel_spacing( + hdr_from, + [ + scipy.random.rand() * scipy.random.randint(1, 10) + for _ in range(img_from.ndim) + ], + ) + header.set_offset( + hdr_from, + [ + scipy.random.rand() * scipy.random.randint(1, 10) + for _ in range(img_from.ndim) + ], + ) except Exception as e: - logger.error('Could not set the header meta-data for image of type {} with shape={}/dtype={}. This should not happen and hints to a bug in the code. Signaled reason is: {}'.format(suffix_from, arr_save.shape, dtype, e)) - unsupported_types[suffix_from][suffix_from][ndim][dtype] = e.message if hasattr(e, 'message') else str(e.args) + logger.error( + "Could not set the header meta-data for image of type {} with shape={}/dtype={}. This should not happen and hints to a bug in the code. Signaled reason is: {}".format( + suffix_from, arr_save.shape, dtype, e + ) + ) + unsupported_types[suffix_from][suffix_from][ndim][dtype] = ( + e.message if hasattr(e, "message") else str(e.args) + ) continue for suffix_to in __suffixes: # do not run test, if in avoid array if ndim in self.__avoid and suffix_to in self.__avoid[ndim]: - unsupported_types[suffix_from][suffix_to][ndim][dtype] = "Test skipped, as combination in the tests __avoid array." + unsupported_types[suffix_from][suffix_to][ndim][ + dtype + ] = "Test skipped, as combination in the tests __avoid array." continue - + # for each other format, try format to format conversion an check if the meta-data is consistent - image_to = '{}/img_to{}'.format(path, suffix_to) + image_to = "{}/img_to{}".format(path, suffix_to) try: save(img_from, image_to, hdr_from, True) if not os.path.exists(image_to): - raise Exception('Image of type {} with shape={}/dtype={} has been saved without exception, but the file does not exist.'.format(suffix_to, arr_save.shape, dtype)) + raise Exception( + "Image of type {} with shape={}/dtype={} has been saved without exception, but the file does not exist.".format( + suffix_to, arr_save.shape, dtype + ) + ) except Exception as e: - unsupported_types[suffix_from][suffix_from][ndim][dtype] = e.message if hasattr(e, 'message') else str(e.args) + unsupported_types[suffix_from][suffix_from][ndim][ + dtype + ] = ( + e.message if hasattr(e, "message") else str(e.args) + ) continue - + try: _, hdr_to = load(image_to) except Exception as e: - _message = e.message if hasattr(e, 'message') else str(e.args) - unsupported_types[suffix_from][suffix_to][ndim][dtype] = 'Saved testing image of type {} with shape={}/dtype={} could not be loaded. Reason: {}'.format(suffix_to, arr_save.shape, dtype, _message) + _message = ( + e.message if hasattr(e, "message") else str(e.args) + ) + unsupported_types[suffix_from][suffix_to][ndim][ + dtype + ] = "Saved testing image of type {} with shape={}/dtype={} could not be loaded. Reason: {}".format( + suffix_to, arr_save.shape, dtype, _message + ) continue - + msg = self.__diff(hdr_from, hdr_to) if msg: - inconsistent_types[suffix_from][suffix_to][ndim][dtype] = msg + inconsistent_types[suffix_from][suffix_to][ndim][ + dtype + ] = msg else: - consistent_types[suffix_from][suffix_to][ndim].append(dtype) - + consistent_types[suffix_from][suffix_to][ndim].append( + dtype + ) + # remove testing image - if os.path.exists(image_to): os.remove(image_to) - + if os.path.exists(image_to): + os.remove(image_to) + # remove reference image - if os.path.exists(image_to): os.remove(image_to) - + if os.path.exists(image_to): + os.remove(image_to) + except Exception: - if not os.listdir(path): os.rmdir(path) - else: logger.debug('Could not delete temporary directory {}. Is not empty.'.format(path)) + if not os.listdir(path): + os.rmdir(path) + else: + logger.debug( + "Could not delete temporary directory {}. Is not empty.".format( + path + ) + ) raise - + if consistent: - print('\nthe following format conversions are meta-data consistent:') - print('from\tto\tndim\tdtypes') + print("\nthe following format conversions are meta-data consistent:") + print("from\tto\tndim\tdtypes") for suffix_from in consistent_types: for suffix_to in consistent_types[suffix_from]: - for ndim, dtypes in list(consistent_types[suffix_from][suffix_to].items()): + for ndim, dtypes in list( + consistent_types[suffix_from][suffix_to].items() + ): if list == type(dtypes) and not 0 == len(dtypes): - print(('{}\t{}\t{}D\t{}'.format(suffix_from, suffix_to, ndim, [str(x).split('.')[-1][:-2] for x in dtypes]))) + print( + ( + "{}\t{}\t{}D\t{}".format( + suffix_from, + suffix_to, + ndim, + [str(x).split(".")[-1][:-2] for x in dtypes], + ) + ) + ) if inconsistent: - print('\nthe following form conversions are not meta-data consistent:') - print('from\tto\tndim\tdtype\t\terror') + print("\nthe following form conversions are not meta-data consistent:") + print("from\tto\tndim\tdtype\t\terror") for suffix_from in inconsistent_types: for suffix_to in inconsistent_types[suffix_from]: for ndim in inconsistent_types[suffix_from][suffix_to]: - for dtype, msg in list(inconsistent_types[suffix_from][suffix_to][ndim].items()): + for dtype, msg in list( + inconsistent_types[suffix_from][suffix_to][ndim].items() + ): if msg: - print(('{}\t{}\t{}D\t{}\t\t{}'.format(suffix_from, suffix_to, ndim, str(dtype).split('.')[-1][:-2], msg))) - + print( + ( + "{}\t{}\t{}D\t{}\t\t{}".format( + suffix_from, + suffix_to, + ndim, + str(dtype).split(".")[-1][:-2], + msg, + ) + ) + ) + if unsupported: - print('\nthe following form conversions could not be tested due to errors:') - print('from\tto\tndim\tdtype\t\terror') + print("\nthe following form conversions could not be tested due to errors:") + print("from\tto\tndim\tdtype\t\terror") for suffix_from in unsupported_types: for suffix_to in unsupported_types[suffix_from]: for ndim in unsupported_types[suffix_from][suffix_to]: - for dtype, msg in list(unsupported_types[suffix_from][suffix_to][ndim].items()): + for dtype, msg in list( + unsupported_types[suffix_from][suffix_to][ndim].items() + ): if msg: - print(('{}\t{}\t{}D\t{}\t\t{}'.format(suffix_from, suffix_to, ndim, str(dtype).split('.')[-1][:-2], msg))) - + print( + ( + "{}\t{}\t{}D\t{}\t\t{}".format( + suffix_from, + suffix_to, + ndim, + str(dtype).split(".")[-1][:-2], + msg, + ) + ) + ) + def __diff(self, hdr1, hdr2): """ Returns an error message if the meta-data of the supplied headers differ, - otherwise False. + otherwise False. """ - if not self.__same_seq(header.get_pixel_spacing(hdr1), header.get_pixel_spacing(hdr2)): - return 'the voxel spacing is not consistent: {} != {}'.format(header.get_pixel_spacing(hdr1), header.get_pixel_spacing(hdr2)) + if not self.__same_seq( + header.get_pixel_spacing(hdr1), header.get_pixel_spacing(hdr2) + ): + return "the voxel spacing is not consistent: {} != {}".format( + header.get_pixel_spacing(hdr1), header.get_pixel_spacing(hdr2) + ) if not self.__same_seq(header.get_offset(hdr1), header.get_offset(hdr2)): - return 'the offset is not consistent: {} != {}'.format(header.get_offset(hdr1), header.get_offset(hdr2)) - #return 'the offset is not consistent: {} != {}\n{} / {}\n{} / {}'.format(header.get_offset(hdr1), header.get_offset(hdr2), type(hdr1), type(hdr2), hdr2.NumberOfFrames if "NumberOfFrames" in hdr2 else "NONE", hdr2.ImagePositionPatient if "ImagePositionPatient" in hdr2 else 'NONE') - else: return False - + return "the offset is not consistent: {} != {}".format( + header.get_offset(hdr1), header.get_offset(hdr2) + ) + # return 'the offset is not consistent: {} != {}\n{} / {}\n{} / {}'.format(header.get_offset(hdr1), header.get_offset(hdr2), type(hdr1), type(hdr2), hdr2.NumberOfFrames if "NumberOfFrames" in hdr2 else "NONE", hdr2.ImagePositionPatient if "ImagePositionPatient" in hdr2 else 'NONE') + else: + return False + def __same_seq(self, seq1, seq2): - if len(seq1) != len(seq2): return False + if len(seq1) != len(seq2): + return False for e1, e2 in zip(seq1, seq2): diff = abs(e1 - e2) - if diff > self.__delta: return False + if diff > self.__delta: + return False return True - -if __name__ == '__main__': + + +if __name__ == "__main__": unittest.main() diff --git a/tests/metric_/histogram.py b/tests/metric_/histogram.py index 91318076..9f132963 100644 --- a/tests/metric_/histogram.py +++ b/tests/metric_/histogram.py @@ -4,20 +4,31 @@ """ import numpy as np -from hypothesis import given, strategies, assume, Verbosity, note, event +from hypothesis import assume, given from hypothesis import settings as hyp_settings +from hypothesis import strategies from medpy.metric import histogram -metric_list = ['manhattan', 'minowski', 'euclidean', 'noelle_2', 'noelle_4', 'noelle_5'] -metric_list_to_doublecheck = ['cosine_1'] - -unknown_property = ['histogram_intersection'] -still_under_dev = ['quadratic_forms'] -similarity_funcs = ['correlate', 'cosine', 'cosine_2', 'cosine_alt', 'fidelity_based'] -semi_metric_list = ['kullback_leibler', 'jensen_shannon', 'chi_square', 'chebyshev', 'chebyshev_neg', - 'histogram_intersection_1', 'relative_deviation', 'relative_bin_deviation', - 'noelle_1', 'noelle_3', 'correlate_1'] +metric_list = ["manhattan", "minowski", "euclidean", "noelle_2", "noelle_4", "noelle_5"] +metric_list_to_doublecheck = ["cosine_1"] + +unknown_property = ["histogram_intersection"] +still_under_dev = ["quadratic_forms"] +similarity_funcs = ["correlate", "cosine", "cosine_2", "cosine_alt", "fidelity_based"] +semi_metric_list = [ + "kullback_leibler", + "jensen_shannon", + "chi_square", + "chebyshev", + "chebyshev_neg", + "histogram_intersection_1", + "relative_deviation", + "relative_bin_deviation", + "noelle_1", + "noelle_3", + "correlate_1", +] default_feature_dim = 1000 default_num_bins = 20 @@ -46,23 +57,28 @@ def within_tolerance(x, y): def make_random_histogram(length=default_feature_dim, num_bins=default_num_bins): "Returns a sequence of histogram density values that sum to 1.0" - hist, bin_edges = np.histogram(np.random.random(length), - bins=num_bins, density=True) + hist, bin_edges = np.histogram( + np.random.random(length), bins=num_bins, density=True + ) # to ensure they sum to 1.0 hist = hist / sum(hist) if len(hist) < 2: - raise ValueError('Invalid histogram') + raise ValueError("Invalid histogram") return hist # Increasing the number of examples to try -@hyp_settings(max_examples=1000, min_satisfying_examples=100) # , verbosity=Verbosity.verbose) -@given(strategies.sampled_from(metric_list), - strategies.integers(range_feature_dim[0], range_feature_dim[1]), - strategies.integers(range_num_bins[0], range_num_bins[1])) +@hyp_settings( + max_examples=1000, min_satisfying_examples=100 +) # , verbosity=Verbosity.verbose) +@given( + strategies.sampled_from(metric_list), + strategies.integers(range_feature_dim[0], range_feature_dim[1]), + strategies.integers(range_num_bins[0], range_num_bins[1]), +) def test_math_properties_metric(method_str, feat_dim, num_bins): """Trying to test the four properties on the same set of histograms""" @@ -103,7 +119,7 @@ def check_nonnegativity(method, h1, h2): def check_triangle_inequality(method, h1, h2, h3): - """ Classic test for a metric: dist(a,b) < dist(a,b) + dist(a,c)""" + """Classic test for a metric: dist(a,b) < dist(a,b) + dist(a,c)""" d12 = method(h1, h2) d23 = method(h2, h3) diff --git a/tests/support.py b/tests/support.py index 46231639..381a2718 100755 --- a/tests/support.py +++ b/tests/support.py @@ -2,16 +2,18 @@ """Check supported image formats.""" +import unittest + # build-in modules import warnings -import unittest + +# own modules +import io_ # third-party modules # path changes -# own modules -import io_ # information __author__ = "Oskar Maier" @@ -20,17 +22,23 @@ __status__ = "Release" __description__ = "Check supported image formats." + # code def main(): # load io tests with warnings.catch_warnings(): warnings.simplefilter("ignore") suite_io = unittest.TestSuite() - suite_io.addTests(unittest.TestLoader().loadTestsFromTestCase(io_.TestIOFacilities)) - suite_io.addTests(unittest.TestLoader().loadTestsFromTestCase(io_.TestMetadataConsistency)) - + suite_io.addTests( + unittest.TestLoader().loadTestsFromTestCase(io_.TestIOFacilities) + ) + suite_io.addTests( + unittest.TestLoader().loadTestsFromTestCase(io_.TestMetadataConsistency) + ) + # execute tests unittest.TextTestRunner(verbosity=2).run(suite_io) -if __name__ == '__main__': + +if __name__ == "__main__": main()