diff --git a/CellProfiler4_AutoConvert/noise2void.py b/CellProfiler4_AutoConvert/noise2void.py new file mode 100644 index 00000000..03423758 --- /dev/null +++ b/CellProfiler4_AutoConvert/noise2void.py @@ -0,0 +1,186 @@ +################################# +# +# Imports from useful Python libraries +# +################################# + +from os.path import split +from n2v.models import N2V + + +################################# +# +# Imports from CellProfiler +# +################################## +from cellprofiler_core.setting.choice import Choice +from cellprofiler_core.setting import Binary +from cellprofiler_core.setting.text import Directory +from cellprofiler_core.setting.text import Text +from cellprofiler_core.module import ImageProcessing +from cellprofiler_core.constants.module import IO_FOLDER_CHOICE_HELP_TEXT + +__doc__ = """\ +Noise2Void +============= + +**Noise2Void** is a deep learning based image denoiser. + +| + +============ ============ =============== +Supports 2D? Supports 3D? Respects masks? +============ ============ =============== + YES YES NO +============ ============ =============== + +What do I need as input? +^^^^^^^^^^^^^^^^^^^^^^^^ + +**Noise2Void** expects a 2D or a 3D image. The 2D image may have color, the 3D image may only be grayscale. +This module only offers **Noise2Void**'s prediction (denoising) capabilities. Therefore, the module has to be configured to know path to a pre-trained machine learning model via its settings. +Information on training and example models can be gained from https://github.com/juglab/n2v. + +What do I get as output? +^^^^^^^^^^^^^^^^^^^^^^^^ +A denoised version of the input image. The dimensions and other properties of the image stay untouched. + +Technical notes +^^^^^^^^^^^^^^^ + +Alongside n2v, Tensorflow 2 should be installed and configured correctly so that this module runs on GPU and not on CPU which is much slower. + +References +^^^^^^^^^^ + +- Krull, Alexander and Buchholz, Tim-Oliver and Jug, Florian (2019) “Noise2void-learning denoising from single noisy images” **Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition 1**, 2129--2137. (`link `__) + +- https://github.com/juglab/n2v + +""" + +# +# Constants +# +N2V_AXES_3D = 'ZYX' +N2V_AXES_2D = 'YX' +N2V_AXES_COLOR = 'C' + + +class Noise2Void(ImageProcessing): + module_name = "Noise2Void" + + variable_revision_number = 1 + + def create_settings(self): + super(Noise2Void, self).create_settings() + self.ml_model = Directory("Path to ML Model", + doc="""\ +Select the folder containing the machine learning model to be used. +This model has to be generated via the noise2void training. See +https://github.com/juglab/n2v/blob/master/examples/2D/denoising2D_RGB/01_training.ipynb +for an example of training. +""" + ) + self.color = Binary("Process as color image?", + value=False, + doc="""\ +Select whether your image should be processed as a color image or not. +""") + self.manual_slicing = Binary("Slice Image manually?", + value=False, doc="""\ +If necessary, **Noise2Void** will slice your image into tiles automatically for a better memory fit. +If you want to manually determine the size of the said tiles, check this setting. + +Colored images **do not** support custom slicing as of right now! +""") + self.slicing_configuration = Text("Tile size", value="(2,2,2)", doc="""\ +You can provide an image slicing configuration for Noise2Void for a better memory fit. +Specify your custom slicing configuration as follows: + +- (x,y) for 2D Images +- (x,y,z) for 3D Images, whereas x,y and z are positive integers. + +If your input cannot be parsed, no slicing configuration will be provided to n2v. +""") + + self.axes_configuration = Text(text="N2V Axes", value=N2V_AXES_3D, doc="""\ +For internal use only. +Communicates axes configuration (2D or 3D, color or not) to n2v. +""") + + self.x_name.doc = """\ +This is the image that the module operates on. You can choose any image +that is made available by a prior module. + +**Noise2Void** will denoise this image using a tensorflow based neural network. +""" + + def settings(self): + settings = super(Noise2Void, self).settings() + return settings + [self.ml_model, self.slicing_configuration, self.color, self.axes_configuration] + + def visible_settings(self): + visible_settings = super(Noise2Void, self).visible_settings() + + visible_settings += [self.ml_model, self.color] + + if not self.color: + visible_settings += [self.manual_slicing] + if self.manual_slicing: + visible_settings += [self.slicing_configuration] + return visible_settings + + # + # This is the function that gets called during "run" to create the output image. + # + def denoise(self, pixels, ml_model, final_tile_choice, color, axes): + + path = self.ml_model.get_absolute_path() + (basedir, model_name) = split(path) + + try: + model = N2V(config=None, name=model_name, basedir=basedir) + except FileNotFoundError as e: + raise FileNotFoundError( + 'Path ' + path + ' doesn\'t lead to valid model') from e + if self.manual_slicing: + tile_tuple = self.convert_string_to_tuple(final_tile_choice) + if color or not self.manual_slicing or tile_tuple == None: + axes = self.adjust_for_color(axes) + pred = model.predict(pixels, axes=axes) + else: + pred = model.predict(pixels, axes=axes, n_tiles=tile_tuple) + return pred + + def run(self, workspace): + image = workspace.image_set.get_image(self.x_name.value) + self.adjust_settings_for_dimensionality(image.volumetric) + self.function = self.denoise + + super(Noise2Void, self).run(workspace) + + def volumetric(self): + return True + + def adjust_settings_for_dimensionality(self, image_is_3d_in_workspace): + if image_is_3d_in_workspace: + self.axes_configuration.value = N2V_AXES_3D + else: + self.axes_configuration.value = N2V_AXES_2D + + def adjust_for_color(self, axes): + axes.replace(N2V_AXES_COLOR, '') + if self.color: + axes += N2V_AXES_COLOR + return axes + + def convert_string_to_tuple(self, text): + try: + text = text.strip() + text = text.replace('(', '') + text = text.replace(')', '') + text = text.replace(' ', '') + return tuple(map(int, text.split(','))) + except ValueError: + return None diff --git a/environment.yml b/environment.yml index d5317bf5..49a9b989 100644 --- a/environment.yml +++ b/environment.yml @@ -23,6 +23,8 @@ dependencies: - rise - tifffile - wxpython + - tensorflow-gpu=2.4.1 + - keras=2.3.1 # - wxpython=3.0.2.0 - pip: - git+https://github.com/CellProfiler/CellProfiler.git@master#egg=CellProfiler diff --git a/requirements.txt b/requirements.txt index 5c952acc..4b0d80dd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,3 +2,4 @@ cellh5 pyimagej microscopeimagequality keras +n2v diff --git a/tests/test_noise2void.py b/tests/test_noise2void.py new file mode 100644 index 00000000..b62e1a1b --- /dev/null +++ b/tests/test_noise2void.py @@ -0,0 +1,174 @@ +import pytest + +from unittest.mock import patch + +from cellprofiler.modules import noise2void +from n2v.models import N2V + +instance = noise2void.Noise2Void + +AXES_3D = 'ZYX' +AXES_COLOR_3D = 'ZYXC' +AXES_COLOR_2D = 'YXC' +AXES_2D = 'YX' +DEFAULT_TILES_3D = (2, 4, 4) +DEFAULT_TILES_2D = (2, 1) + + +@pytest.fixture(scope="function") +def module(request): + instance = getattr(request.module, "instance") + + return instance() + + +def test_3d_support(module): + assert module.volumetric() + + +def test_number_of_visible_settings_color(module): + module.color.value = True + + visible_settings = module.visible_settings() + assert len(visible_settings) == 4 + assert all(setting in visible_settings for setting in [ + module.ml_model, module.color]) + assert module.manual_slicing not in visible_settings + + +def test_no_slicing_settings_when_color_toggled_true(module): + module.manual_slicing.value = True + module.color.value = True + + visible_settings = module.visible_settings() + assert len(visible_settings) == 4 + assert all(setting in visible_settings for setting in [ + module.ml_model, module.color]) + assert module.manual_slicing not in visible_settings + + +def test_number_of_default_visibile_settings(module): + + # given + settings_no_manual_tiles = module.visible_settings() # default settings + + # then + assert len(settings_no_manual_tiles) == 5 + assert all(setting in settings_no_manual_tiles for setting in [ + module.ml_model, module.color, module.manual_slicing]) + + +def test_number_of_settings_manual_tile_choice(module): + + # when + module.manual_slicing.value = True # activate manual tile selection + + visible_settings = module.visible_settings() + + # then + assert len(visible_settings) == 6 + assert all(setting in visible_settings for setting in [ + module.ml_model, module.color, module.manual_slicing, + module.slicing_configuration]) + + +@patch('cellprofiler.modules.noise2void.N2V', autospec=True) +def test_n2v_creation(N2V, module, workspace): + + module.x_name.value = 'example' + module.ml_model.value = 'Default Input Folder sub-folder|Documents/CellProfiler/data/n2v_3D' + module.run(workspace) + + # TODO check if this is multiplatform compatible + N2V.assert_called_with( + None, 'n2v_3D', '/home/nesta/Documents/CellProfiler/data') + + +@patch.object(N2V, 'predict') # default == no color, no manual slicing +def test_run_default(pred, module, workspace): + + image_name = 'example' + image = workspace.image_set.get_image(image_name) + image_array = image.image + module.x_name.value = image_name + module.ml_model.value = 'Default Input Folder sub-folder|Documents/CellProfiler/data/n2v_3D' + module.run(workspace) + + if not image.volumetric: + pred.assert_called_with(image_array, axes=AXES_2D) + else: + pred.assert_called_with( + image_array, axes=AXES_3D) + + +# make sure color axis is added to axes configuration. tiles not available when color == True +@patch.object(N2V, 'predict') +def test_run_color(pred, module, workspace): + + image_name = 'example' + image = workspace.image_set.get_image(image_name) + image_array = image.image + module.x_name.value = image_name + module.ml_model.value = 'Default Input Folder sub-folder|Documents/CellProfiler/data/n2v_3D' + module.color.value = True + module.run(workspace) + + if not image.volumetric: + pred.assert_called_with(image_array, axes=AXES_COLOR_2D) + else: + pred.assert_called_with( + image_array, axes=AXES_COLOR_3D) + + +# make sure custom tiles end up in actual n2v call +@patch.object(N2V, 'predict') +def test_run_manual_tiles(pred, module, workspace): + + image_name = 'example' + image = workspace.image_set.get_image(image_name) + image_array = image.image + module.x_name.value = image_name + module.ml_model.value = 'Default Input Folder sub-folder|Documents/CellProfiler/data/n2v_3D' + module.manual_slicing.value = True + module.slicing_configuration.value = '(1,2,3)' if image.volumetric else '(1,2)' + + module.run(workspace) + + if not image.volumetric: + pred.assert_called_with( + image_array, axes=AXES_2D, n_tiles=(1, 2)) + else: + pred.assert_called_with( + image_array, axes=AXES_3D, n_tiles=(1, 2, 3)) + + +@patch.object(N2V, 'predict') +def test_wrong_tile_dimensionality_leads_to_run_with_no_tiles(pred, module, workspace): + image_name = 'example' + image = workspace.image_set.get_image(image_name) + image_array = image.image + module.x_name.value = image_name + module.ml_model.value = 'Default Input Folder sub-folder|Documents/CellProfiler/data/n2v_3D' + + module.slicing_configuration.value = '(1,2)' if image.volumetric else '(1,2,3)' + module.run(workspace) + + if not image.volumetric: + pred.assert_called_with(image_array, axes=AXES_2D) + else: + pred.assert_called_with(image_array, axes=AXES_3D) + + +def test_2d_tile_parsing(module): + values_to_test = ["(1,2)", " (1,2)", " (1,2) ", " (1 , 2)", + " ( 1 ,2)", "1,2", "1,2)", "( 1,2"] + assert all(parsed == (1, 2) for parsed in list( + map(module.convert_string_to_tuple, values_to_test))) + + +def test_3d_tile_parsing(module): + values_to_test = ["(1,2,2)", " (1,2,2)", " (1,2,2) ", + " (1 , 2,2)", " ( 1 ,2 , 2)", "1,2,2", "1,2,2)", "( 1,2,2"] + assert all(parsed == (1, 2, 2) for parsed in list( + map(module.convert_string_to_tuple, values_to_test))) +